repo
stringclasses 679
values | path
stringlengths 6
122
| func_name
stringlengths 2
76
| original_string
stringlengths 87
70.9k
| language
stringclasses 1
value | code
stringlengths 87
70.9k
| code_tokens
sequencelengths 20
6.91k
| docstring
stringlengths 1
21.7k
| docstring_tokens
sequencelengths 1
1.6k
| sha
stringclasses 679
values | url
stringlengths 92
213
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
aleju/imgaug | imgaug/augmentables/kps.py | Keypoint.shift | def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return self.deepcopy(self.x + x, self.y + y) | python | def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return self.deepcopy(self.x + x, self.y + y) | [
"def",
"shift",
"(",
"self",
",",
"x",
"=",
"0",
",",
"y",
"=",
"0",
")",
":",
"return",
"self",
".",
"deepcopy",
"(",
"self",
".",
"x",
"+",
"x",
",",
"self",
".",
"y",
"+",
"y",
")"
] | Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates. | [
"Move",
"the",
"keypoint",
"around",
"on",
"an",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L133-L151 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | Keypoint.draw_on_image | def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3,
copy=True, raise_if_out_of_image=False):
"""
Draw the keypoint onto a given image.
The keypoint is drawn as a square.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoint.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of the keypoint. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of the keypoint. If set to ``S``, each square will have
size ``S x S``.
copy : bool, optional
Whether to copy the image before drawing the keypoint.
raise_if_out_of_image : bool, optional
Whether to raise an exception if the keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoint.
"""
if copy:
image = np.copy(image)
if image.ndim == 2:
assert ia.is_single_number(color), (
"Got a 2D image. Expected then 'color' to be a single number, "
"but got %s." % (str(color),))
elif image.ndim == 3 and ia.is_single_number(color):
color = [color] * image.shape[-1]
input_dtype = image.dtype
alpha_color = color
if alpha < 0.01:
# keypoint invisible, nothing to do
return image
elif alpha > 0.99:
alpha = 1
else:
image = image.astype(np.float32, copy=False)
alpha_color = alpha * np.array(color)
height, width = image.shape[0:2]
y, x = self.y_int, self.x_int
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
x1_clipped, x2_clipped = np.clip([x1, x2], 0, width)
y1_clipped, y2_clipped = np.clip([y1, y2], 0, height)
x1_clipped_ooi = (x1_clipped < 0 or x1_clipped >= width)
x2_clipped_ooi = (x2_clipped < 0 or x2_clipped >= width+1)
y1_clipped_ooi = (y1_clipped < 0 or y1_clipped >= height)
y2_clipped_ooi = (y2_clipped < 0 or y2_clipped >= height+1)
x_ooi = (x1_clipped_ooi and x2_clipped_ooi)
y_ooi = (y1_clipped_ooi and y2_clipped_ooi)
x_zero_size = (x2_clipped - x1_clipped) < 1 # min size is 1px
y_zero_size = (y2_clipped - y1_clipped) < 1
if not x_ooi and not y_ooi and not x_zero_size and not y_zero_size:
if alpha == 1:
image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = color
else:
image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = (
(1 - alpha)
* image[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
+ alpha_color
)
else:
if raise_if_out_of_image:
raise Exception(
"Cannot draw keypoint x=%.8f, y=%.8f on image with "
"shape %s." % (y, x, image.shape))
if image.dtype.name != input_dtype.name:
if input_dtype.name == "uint8":
image = np.clip(image, 0, 255, out=image)
image = image.astype(input_dtype, copy=False)
return image | python | def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3,
copy=True, raise_if_out_of_image=False):
"""
Draw the keypoint onto a given image.
The keypoint is drawn as a square.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoint.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of the keypoint. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of the keypoint. If set to ``S``, each square will have
size ``S x S``.
copy : bool, optional
Whether to copy the image before drawing the keypoint.
raise_if_out_of_image : bool, optional
Whether to raise an exception if the keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoint.
"""
if copy:
image = np.copy(image)
if image.ndim == 2:
assert ia.is_single_number(color), (
"Got a 2D image. Expected then 'color' to be a single number, "
"but got %s." % (str(color),))
elif image.ndim == 3 and ia.is_single_number(color):
color = [color] * image.shape[-1]
input_dtype = image.dtype
alpha_color = color
if alpha < 0.01:
# keypoint invisible, nothing to do
return image
elif alpha > 0.99:
alpha = 1
else:
image = image.astype(np.float32, copy=False)
alpha_color = alpha * np.array(color)
height, width = image.shape[0:2]
y, x = self.y_int, self.x_int
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
x1_clipped, x2_clipped = np.clip([x1, x2], 0, width)
y1_clipped, y2_clipped = np.clip([y1, y2], 0, height)
x1_clipped_ooi = (x1_clipped < 0 or x1_clipped >= width)
x2_clipped_ooi = (x2_clipped < 0 or x2_clipped >= width+1)
y1_clipped_ooi = (y1_clipped < 0 or y1_clipped >= height)
y2_clipped_ooi = (y2_clipped < 0 or y2_clipped >= height+1)
x_ooi = (x1_clipped_ooi and x2_clipped_ooi)
y_ooi = (y1_clipped_ooi and y2_clipped_ooi)
x_zero_size = (x2_clipped - x1_clipped) < 1 # min size is 1px
y_zero_size = (y2_clipped - y1_clipped) < 1
if not x_ooi and not y_ooi and not x_zero_size and not y_zero_size:
if alpha == 1:
image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = color
else:
image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = (
(1 - alpha)
* image[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
+ alpha_color
)
else:
if raise_if_out_of_image:
raise Exception(
"Cannot draw keypoint x=%.8f, y=%.8f on image with "
"shape %s." % (y, x, image.shape))
if image.dtype.name != input_dtype.name:
if input_dtype.name == "uint8":
image = np.clip(image, 0, 255, out=image)
image = image.astype(input_dtype, copy=False)
return image | [
"def",
"draw_on_image",
"(",
"self",
",",
"image",
",",
"color",
"=",
"(",
"0",
",",
"255",
",",
"0",
")",
",",
"alpha",
"=",
"1.0",
",",
"size",
"=",
"3",
",",
"copy",
"=",
"True",
",",
"raise_if_out_of_image",
"=",
"False",
")",
":",
"if",
"copy",
":",
"image",
"=",
"np",
".",
"copy",
"(",
"image",
")",
"if",
"image",
".",
"ndim",
"==",
"2",
":",
"assert",
"ia",
".",
"is_single_number",
"(",
"color",
")",
",",
"(",
"\"Got a 2D image. Expected then 'color' to be a single number, \"",
"\"but got %s.\"",
"%",
"(",
"str",
"(",
"color",
")",
",",
")",
")",
"elif",
"image",
".",
"ndim",
"==",
"3",
"and",
"ia",
".",
"is_single_number",
"(",
"color",
")",
":",
"color",
"=",
"[",
"color",
"]",
"*",
"image",
".",
"shape",
"[",
"-",
"1",
"]",
"input_dtype",
"=",
"image",
".",
"dtype",
"alpha_color",
"=",
"color",
"if",
"alpha",
"<",
"0.01",
":",
"# keypoint invisible, nothing to do",
"return",
"image",
"elif",
"alpha",
">",
"0.99",
":",
"alpha",
"=",
"1",
"else",
":",
"image",
"=",
"image",
".",
"astype",
"(",
"np",
".",
"float32",
",",
"copy",
"=",
"False",
")",
"alpha_color",
"=",
"alpha",
"*",
"np",
".",
"array",
"(",
"color",
")",
"height",
",",
"width",
"=",
"image",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"y",
",",
"x",
"=",
"self",
".",
"y_int",
",",
"self",
".",
"x_int",
"x1",
"=",
"max",
"(",
"x",
"-",
"size",
"//",
"2",
",",
"0",
")",
"x2",
"=",
"min",
"(",
"x",
"+",
"1",
"+",
"size",
"//",
"2",
",",
"width",
")",
"y1",
"=",
"max",
"(",
"y",
"-",
"size",
"//",
"2",
",",
"0",
")",
"y2",
"=",
"min",
"(",
"y",
"+",
"1",
"+",
"size",
"//",
"2",
",",
"height",
")",
"x1_clipped",
",",
"x2_clipped",
"=",
"np",
".",
"clip",
"(",
"[",
"x1",
",",
"x2",
"]",
",",
"0",
",",
"width",
")",
"y1_clipped",
",",
"y2_clipped",
"=",
"np",
".",
"clip",
"(",
"[",
"y1",
",",
"y2",
"]",
",",
"0",
",",
"height",
")",
"x1_clipped_ooi",
"=",
"(",
"x1_clipped",
"<",
"0",
"or",
"x1_clipped",
">=",
"width",
")",
"x2_clipped_ooi",
"=",
"(",
"x2_clipped",
"<",
"0",
"or",
"x2_clipped",
">=",
"width",
"+",
"1",
")",
"y1_clipped_ooi",
"=",
"(",
"y1_clipped",
"<",
"0",
"or",
"y1_clipped",
">=",
"height",
")",
"y2_clipped_ooi",
"=",
"(",
"y2_clipped",
"<",
"0",
"or",
"y2_clipped",
">=",
"height",
"+",
"1",
")",
"x_ooi",
"=",
"(",
"x1_clipped_ooi",
"and",
"x2_clipped_ooi",
")",
"y_ooi",
"=",
"(",
"y1_clipped_ooi",
"and",
"y2_clipped_ooi",
")",
"x_zero_size",
"=",
"(",
"x2_clipped",
"-",
"x1_clipped",
")",
"<",
"1",
"# min size is 1px",
"y_zero_size",
"=",
"(",
"y2_clipped",
"-",
"y1_clipped",
")",
"<",
"1",
"if",
"not",
"x_ooi",
"and",
"not",
"y_ooi",
"and",
"not",
"x_zero_size",
"and",
"not",
"y_zero_size",
":",
"if",
"alpha",
"==",
"1",
":",
"image",
"[",
"y1_clipped",
":",
"y2_clipped",
",",
"x1_clipped",
":",
"x2_clipped",
"]",
"=",
"color",
"else",
":",
"image",
"[",
"y1_clipped",
":",
"y2_clipped",
",",
"x1_clipped",
":",
"x2_clipped",
"]",
"=",
"(",
"(",
"1",
"-",
"alpha",
")",
"*",
"image",
"[",
"y1_clipped",
":",
"y2_clipped",
",",
"x1_clipped",
":",
"x2_clipped",
"]",
"+",
"alpha_color",
")",
"else",
":",
"if",
"raise_if_out_of_image",
":",
"raise",
"Exception",
"(",
"\"Cannot draw keypoint x=%.8f, y=%.8f on image with \"",
"\"shape %s.\"",
"%",
"(",
"y",
",",
"x",
",",
"image",
".",
"shape",
")",
")",
"if",
"image",
".",
"dtype",
".",
"name",
"!=",
"input_dtype",
".",
"name",
":",
"if",
"input_dtype",
".",
"name",
"==",
"\"uint8\"",
":",
"image",
"=",
"np",
".",
"clip",
"(",
"image",
",",
"0",
",",
"255",
",",
"out",
"=",
"image",
")",
"image",
"=",
"image",
".",
"astype",
"(",
"input_dtype",
",",
"copy",
"=",
"False",
")",
"return",
"image"
] | Draw the keypoint onto a given image.
The keypoint is drawn as a square.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoint.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of the keypoint. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of the keypoint. If set to ``S``, each square will have
size ``S x S``.
copy : bool, optional
Whether to copy the image before drawing the keypoint.
raise_if_out_of_image : bool, optional
Whether to raise an exception if the keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoint. | [
"Draw",
"the",
"keypoint",
"onto",
"a",
"given",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L153-L250 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | Keypoint.generate_similar_points_manhattan | def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [self.deepcopy(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])] | python | def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [self.deepcopy(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])] | [
"def",
"generate_similar_points_manhattan",
"(",
"self",
",",
"nb_steps",
",",
"step_size",
",",
"return_array",
"=",
"False",
")",
":",
"# TODO add test",
"# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated",
"# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,",
"# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.",
"# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the",
"# center. Hence the total number of points is S+1+S + 2*(S^2).",
"points",
"=",
"np",
".",
"zeros",
"(",
"(",
"nb_steps",
"+",
"1",
"+",
"nb_steps",
"+",
"2",
"*",
"(",
"nb_steps",
"**",
"2",
")",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# we start at the bottom-most line and move towards the top-most line",
"yy",
"=",
"np",
".",
"linspace",
"(",
"self",
".",
"y",
"-",
"nb_steps",
"*",
"step_size",
",",
"self",
".",
"y",
"+",
"nb_steps",
"*",
"step_size",
",",
"nb_steps",
"+",
"1",
"+",
"nb_steps",
")",
"# bottom-most line contains only one point",
"width",
"=",
"1",
"nth_point",
"=",
"0",
"for",
"i_y",
",",
"y",
"in",
"enumerate",
"(",
"yy",
")",
":",
"if",
"width",
"==",
"1",
":",
"xx",
"=",
"[",
"self",
".",
"x",
"]",
"else",
":",
"xx",
"=",
"np",
".",
"linspace",
"(",
"self",
".",
"x",
"-",
"(",
"width",
"-",
"1",
")",
"//",
"2",
"*",
"step_size",
",",
"self",
".",
"x",
"+",
"(",
"width",
"-",
"1",
")",
"//",
"2",
"*",
"step_size",
",",
"width",
")",
"for",
"x",
"in",
"xx",
":",
"points",
"[",
"nth_point",
"]",
"=",
"[",
"x",
",",
"y",
"]",
"nth_point",
"+=",
"1",
"if",
"i_y",
"<",
"nb_steps",
":",
"width",
"+=",
"2",
"else",
":",
"width",
"-=",
"2",
"if",
"return_array",
":",
"return",
"points",
"return",
"[",
"self",
".",
"deepcopy",
"(",
"x",
"=",
"points",
"[",
"i",
",",
"0",
"]",
",",
"y",
"=",
"points",
"[",
"i",
",",
"1",
"]",
")",
"for",
"i",
"in",
"sm",
".",
"xrange",
"(",
"points",
".",
"shape",
"[",
"0",
"]",
")",
"]"
] | Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included. | [
"Generate",
"nearby",
"points",
"to",
"this",
"keypoint",
"based",
"on",
"manhattan",
"distance",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L252-L315 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | Keypoint.copy | def copy(self, x=None, y=None):
"""
Create a shallow copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Shallow copy.
"""
return self.deepcopy(x=x, y=y) | python | def copy(self, x=None, y=None):
"""
Create a shallow copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Shallow copy.
"""
return self.deepcopy(x=x, y=y) | [
"def",
"copy",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
")",
":",
"return",
"self",
".",
"deepcopy",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
")"
] | Create a shallow copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Shallow copy. | [
"Create",
"a",
"shallow",
"copy",
"of",
"the",
"Keypoint",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L317-L337 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | Keypoint.deepcopy | def deepcopy(self, x=None, y=None):
"""
Create a deep copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Deep copy.
"""
x = self.x if x is None else x
y = self.y if y is None else y
return Keypoint(x=x, y=y) | python | def deepcopy(self, x=None, y=None):
"""
Create a deep copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Deep copy.
"""
x = self.x if x is None else x
y = self.y if y is None else y
return Keypoint(x=x, y=y) | [
"def",
"deepcopy",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
")",
":",
"x",
"=",
"self",
".",
"x",
"if",
"x",
"is",
"None",
"else",
"x",
"y",
"=",
"self",
".",
"y",
"if",
"y",
"is",
"None",
"else",
"y",
"return",
"Keypoint",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
")"
] | Create a deep copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Deep copy. | [
"Create",
"a",
"deep",
"copy",
"of",
"the",
"Keypoint",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L339-L361 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.on | def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return self.deepcopy(keypoints, shape) | python | def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return self.deepcopy(keypoints, shape) | [
"def",
"on",
"(",
"self",
",",
"image",
")",
":",
"shape",
"=",
"normalize_shape",
"(",
"image",
")",
"if",
"shape",
"[",
"0",
":",
"2",
"]",
"==",
"self",
".",
"shape",
"[",
"0",
":",
"2",
"]",
":",
"return",
"self",
".",
"deepcopy",
"(",
")",
"else",
":",
"keypoints",
"=",
"[",
"kp",
".",
"project",
"(",
"self",
".",
"shape",
",",
"shape",
")",
"for",
"kp",
"in",
"self",
".",
"keypoints",
"]",
"return",
"self",
".",
"deepcopy",
"(",
"keypoints",
",",
"shape",
")"
] | Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints. | [
"Project",
"keypoints",
"from",
"one",
"image",
"to",
"a",
"new",
"one",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L414-L435 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.draw_on_image | def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3,
copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image.
Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of each point. If set to ``C``, each square will have
size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
image = np.copy(image) if copy else image
for keypoint in self.keypoints:
image = keypoint.draw_on_image(
image, color=color, alpha=alpha, size=size, copy=False,
raise_if_out_of_image=raise_if_out_of_image)
return image | python | def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3,
copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image.
Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of each point. If set to ``C``, each square will have
size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
image = np.copy(image) if copy else image
for keypoint in self.keypoints:
image = keypoint.draw_on_image(
image, color=color, alpha=alpha, size=size, copy=False,
raise_if_out_of_image=raise_if_out_of_image)
return image | [
"def",
"draw_on_image",
"(",
"self",
",",
"image",
",",
"color",
"=",
"(",
"0",
",",
"255",
",",
"0",
")",
",",
"alpha",
"=",
"1.0",
",",
"size",
"=",
"3",
",",
"copy",
"=",
"True",
",",
"raise_if_out_of_image",
"=",
"False",
")",
":",
"image",
"=",
"np",
".",
"copy",
"(",
"image",
")",
"if",
"copy",
"else",
"image",
"for",
"keypoint",
"in",
"self",
".",
"keypoints",
":",
"image",
"=",
"keypoint",
".",
"draw_on_image",
"(",
"image",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
"alpha",
",",
"size",
"=",
"size",
",",
"copy",
"=",
"False",
",",
"raise_if_out_of_image",
"=",
"raise_if_out_of_image",
")",
"return",
"image"
] | Draw all keypoints onto a given image.
Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of each point. If set to ``C``, each square will have
size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints. | [
"Draw",
"all",
"keypoints",
"onto",
"a",
"given",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L437-L480 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.shift | def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return self.deepcopy(keypoints) | python | def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return self.deepcopy(keypoints) | [
"def",
"shift",
"(",
"self",
",",
"x",
"=",
"0",
",",
"y",
"=",
"0",
")",
":",
"keypoints",
"=",
"[",
"keypoint",
".",
"shift",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
")",
"for",
"keypoint",
"in",
"self",
".",
"keypoints",
"]",
"return",
"self",
".",
"deepcopy",
"(",
"keypoints",
")"
] | Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them. | [
"Move",
"the",
"keypoints",
"around",
"on",
"an",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L482-L501 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.to_xy_array | def to_xy_array(self):
"""
Convert keypoint coordinates to ``(N,2)`` array.
Returns
-------
(N, 2) ndarray
Array containing the coordinates of all keypoints.
Shape is ``(N,2)`` with coordinates in xy-form.
"""
result = np.zeros((len(self.keypoints), 2), dtype=np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result | python | def to_xy_array(self):
"""
Convert keypoint coordinates to ``(N,2)`` array.
Returns
-------
(N, 2) ndarray
Array containing the coordinates of all keypoints.
Shape is ``(N,2)`` with coordinates in xy-form.
"""
result = np.zeros((len(self.keypoints), 2), dtype=np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result | [
"def",
"to_xy_array",
"(",
"self",
")",
":",
"result",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"self",
".",
"keypoints",
")",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"i",
",",
"keypoint",
"in",
"enumerate",
"(",
"self",
".",
"keypoints",
")",
":",
"result",
"[",
"i",
",",
"0",
"]",
"=",
"keypoint",
".",
"x",
"result",
"[",
"i",
",",
"1",
"]",
"=",
"keypoint",
".",
"y",
"return",
"result"
] | Convert keypoint coordinates to ``(N,2)`` array.
Returns
-------
(N, 2) ndarray
Array containing the coordinates of all keypoints.
Shape is ``(N,2)`` with coordinates in xy-form. | [
"Convert",
"keypoint",
"coordinates",
"to",
"(",
"N",
"2",
")",
"array",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L517-L532 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.from_xy_array | def from_xy_array(cls, xy, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
xy : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image, given
as ``(N,2)`` array of xy-coordinates.
shape : tuple of int or ndarray
Shape tuple of the image on which the keypoints are placed.
Returns
-------
KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coord[0], y=coord[1]) for coord in xy]
return KeypointsOnImage(keypoints, shape) | python | def from_xy_array(cls, xy, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
xy : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image, given
as ``(N,2)`` array of xy-coordinates.
shape : tuple of int or ndarray
Shape tuple of the image on which the keypoints are placed.
Returns
-------
KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coord[0], y=coord[1]) for coord in xy]
return KeypointsOnImage(keypoints, shape) | [
"def",
"from_xy_array",
"(",
"cls",
",",
"xy",
",",
"shape",
")",
":",
"keypoints",
"=",
"[",
"Keypoint",
"(",
"x",
"=",
"coord",
"[",
"0",
"]",
",",
"y",
"=",
"coord",
"[",
"1",
"]",
")",
"for",
"coord",
"in",
"xy",
"]",
"return",
"KeypointsOnImage",
"(",
"keypoints",
",",
"shape",
")"
] | Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
xy : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image, given
as ``(N,2)`` array of xy-coordinates.
shape : tuple of int or ndarray
Shape tuple of the image on which the keypoints are placed.
Returns
-------
KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array. | [
"Convert",
"an",
"array",
"(",
"N",
"2",
")",
"with",
"a",
"given",
"image",
"shape",
"to",
"a",
"KeypointsOnImage",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L559-L579 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.to_keypoint_image | def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
ia.do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
ia.do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image | python | def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
ia.do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
ia.do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image | [
"def",
"to_keypoint_image",
"(",
"self",
",",
"size",
"=",
"1",
")",
":",
"ia",
".",
"do_assert",
"(",
"len",
"(",
"self",
".",
"keypoints",
")",
">",
"0",
")",
"height",
",",
"width",
"=",
"self",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"image",
"=",
"np",
".",
"zeros",
"(",
"(",
"height",
",",
"width",
",",
"len",
"(",
"self",
".",
"keypoints",
")",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"ia",
".",
"do_assert",
"(",
"size",
"%",
"2",
"!=",
"0",
")",
"sizeh",
"=",
"max",
"(",
"0",
",",
"(",
"size",
"-",
"1",
")",
"//",
"2",
")",
"for",
"i",
",",
"keypoint",
"in",
"enumerate",
"(",
"self",
".",
"keypoints",
")",
":",
"# TODO for float values spread activation over several cells",
"# here and do voting at the end",
"y",
"=",
"keypoint",
".",
"y_int",
"x",
"=",
"keypoint",
".",
"x_int",
"x1",
"=",
"np",
".",
"clip",
"(",
"x",
"-",
"sizeh",
",",
"0",
",",
"width",
"-",
"1",
")",
"x2",
"=",
"np",
".",
"clip",
"(",
"x",
"+",
"sizeh",
"+",
"1",
",",
"0",
",",
"width",
")",
"y1",
"=",
"np",
".",
"clip",
"(",
"y",
"-",
"sizeh",
",",
"0",
",",
"height",
"-",
"1",
")",
"y2",
"=",
"np",
".",
"clip",
"(",
"y",
"+",
"sizeh",
"+",
"1",
",",
"0",
",",
"height",
")",
"if",
"x1",
"<",
"x2",
"and",
"y1",
"<",
"y2",
":",
"image",
"[",
"y1",
":",
"y2",
",",
"x1",
":",
"x2",
",",
"i",
"]",
"=",
"128",
"if",
"0",
"<=",
"y",
"<",
"height",
"and",
"0",
"<=",
"x",
"<",
"width",
":",
"image",
"[",
"y",
",",
"x",
",",
"i",
"]",
"=",
"255",
"return",
"image"
] | Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints. | [
"Draws",
"a",
"new",
"black",
"image",
"of",
"shape",
"(",
"H",
"W",
"N",
")",
"in",
"which",
"all",
"keypoint",
"coordinates",
"are",
"set",
"to",
"255",
".",
"(",
"H",
"=",
"shape",
"height",
"W",
"=",
"shape",
"width",
"N",
"=",
"number",
"of",
"keypoints",
")"
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L582-L623 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.from_keypoint_image | def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
ia.do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
ia.do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape) | python | def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
ia.do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
ia.do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape) | [
"def",
"from_keypoint_image",
"(",
"image",
",",
"if_not_found_coords",
"=",
"{",
"\"x\"",
":",
"-",
"1",
",",
"\"y\"",
":",
"-",
"1",
"}",
",",
"threshold",
"=",
"1",
",",
"nb_channels",
"=",
"None",
")",
":",
"# pylint: disable=locally-disabled, dangerous-default-value, line-too-long",
"ia",
".",
"do_assert",
"(",
"len",
"(",
"image",
".",
"shape",
")",
"==",
"3",
")",
"height",
",",
"width",
",",
"nb_keypoints",
"=",
"image",
".",
"shape",
"drop_if_not_found",
"=",
"False",
"if",
"if_not_found_coords",
"is",
"None",
":",
"drop_if_not_found",
"=",
"True",
"if_not_found_x",
"=",
"-",
"1",
"if_not_found_y",
"=",
"-",
"1",
"elif",
"isinstance",
"(",
"if_not_found_coords",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"ia",
".",
"do_assert",
"(",
"len",
"(",
"if_not_found_coords",
")",
"==",
"2",
")",
"if_not_found_x",
"=",
"if_not_found_coords",
"[",
"0",
"]",
"if_not_found_y",
"=",
"if_not_found_coords",
"[",
"1",
"]",
"elif",
"isinstance",
"(",
"if_not_found_coords",
",",
"dict",
")",
":",
"if_not_found_x",
"=",
"if_not_found_coords",
"[",
"\"x\"",
"]",
"if_not_found_y",
"=",
"if_not_found_coords",
"[",
"\"y\"",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"Expected if_not_found_coords to be None or tuple or list or dict, got %s.\"",
"%",
"(",
"type",
"(",
"if_not_found_coords",
")",
",",
")",
")",
"keypoints",
"=",
"[",
"]",
"for",
"i",
"in",
"sm",
".",
"xrange",
"(",
"nb_keypoints",
")",
":",
"maxidx_flat",
"=",
"np",
".",
"argmax",
"(",
"image",
"[",
"...",
",",
"i",
"]",
")",
"maxidx_ndim",
"=",
"np",
".",
"unravel_index",
"(",
"maxidx_flat",
",",
"(",
"height",
",",
"width",
")",
")",
"found",
"=",
"(",
"image",
"[",
"maxidx_ndim",
"[",
"0",
"]",
",",
"maxidx_ndim",
"[",
"1",
"]",
",",
"i",
"]",
">=",
"threshold",
")",
"if",
"found",
":",
"keypoints",
".",
"append",
"(",
"Keypoint",
"(",
"x",
"=",
"maxidx_ndim",
"[",
"1",
"]",
",",
"y",
"=",
"maxidx_ndim",
"[",
"0",
"]",
")",
")",
"else",
":",
"if",
"drop_if_not_found",
":",
"pass",
"# dont add the keypoint to the result list, i.e. drop it",
"else",
":",
"keypoints",
".",
"append",
"(",
"Keypoint",
"(",
"x",
"=",
"if_not_found_x",
",",
"y",
"=",
"if_not_found_y",
")",
")",
"out_shape",
"=",
"(",
"height",
",",
"width",
")",
"if",
"nb_channels",
"is",
"not",
"None",
":",
"out_shape",
"+=",
"(",
"nb_channels",
",",
")",
"return",
"KeypointsOnImage",
"(",
"keypoints",
",",
"shape",
"=",
"out_shape",
")"
] | Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints. | [
"Converts",
"an",
"image",
"generated",
"by",
"to_keypoint_image",
"()",
"back",
"to",
"a",
"KeypointsOnImage",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L626-L695 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.to_distance_maps | def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
ia.do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps | python | def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
ia.do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps | [
"def",
"to_distance_maps",
"(",
"self",
",",
"inverted",
"=",
"False",
")",
":",
"ia",
".",
"do_assert",
"(",
"len",
"(",
"self",
".",
"keypoints",
")",
">",
"0",
")",
"height",
",",
"width",
"=",
"self",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"distance_maps",
"=",
"np",
".",
"zeros",
"(",
"(",
"height",
",",
"width",
",",
"len",
"(",
"self",
".",
"keypoints",
")",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"yy",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"height",
")",
"xx",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"width",
")",
"grid_xx",
",",
"grid_yy",
"=",
"np",
".",
"meshgrid",
"(",
"xx",
",",
"yy",
")",
"for",
"i",
",",
"keypoint",
"in",
"enumerate",
"(",
"self",
".",
"keypoints",
")",
":",
"y",
",",
"x",
"=",
"keypoint",
".",
"y",
",",
"keypoint",
".",
"x",
"distance_maps",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"(",
"grid_xx",
"-",
"x",
")",
"**",
"2",
"+",
"(",
"grid_yy",
"-",
"y",
")",
"**",
"2",
"distance_maps",
"=",
"np",
".",
"sqrt",
"(",
"distance_maps",
")",
"if",
"inverted",
":",
"return",
"1",
"/",
"(",
"distance_maps",
"+",
"1",
")",
"return",
"distance_maps"
] | Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``. | [
"Generates",
"a",
"(",
"H",
"W",
"K",
")",
"output",
"containing",
"K",
"distance",
"maps",
"for",
"K",
"keypoints",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L697-L736 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.from_distance_maps | def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
ia.do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
ia.do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))
if not inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] < threshold)
elif inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] >= threshold)
else:
found = True
if found:
keypoints.append(Keypoint(x=hitidx_ndim[1], y=hitidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape) | python | def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
ia.do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
ia.do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))
if not inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] < threshold)
elif inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] >= threshold)
else:
found = True
if found:
keypoints.append(Keypoint(x=hitidx_ndim[1], y=hitidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape) | [
"def",
"from_distance_maps",
"(",
"distance_maps",
",",
"inverted",
"=",
"False",
",",
"if_not_found_coords",
"=",
"{",
"\"x\"",
":",
"-",
"1",
",",
"\"y\"",
":",
"-",
"1",
"}",
",",
"threshold",
"=",
"None",
",",
"# pylint: disable=locally-disabled, dangerous-default-value, line-too-long",
"nb_channels",
"=",
"None",
")",
":",
"ia",
".",
"do_assert",
"(",
"len",
"(",
"distance_maps",
".",
"shape",
")",
"==",
"3",
")",
"height",
",",
"width",
",",
"nb_keypoints",
"=",
"distance_maps",
".",
"shape",
"drop_if_not_found",
"=",
"False",
"if",
"if_not_found_coords",
"is",
"None",
":",
"drop_if_not_found",
"=",
"True",
"if_not_found_x",
"=",
"-",
"1",
"if_not_found_y",
"=",
"-",
"1",
"elif",
"isinstance",
"(",
"if_not_found_coords",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"ia",
".",
"do_assert",
"(",
"len",
"(",
"if_not_found_coords",
")",
"==",
"2",
")",
"if_not_found_x",
"=",
"if_not_found_coords",
"[",
"0",
"]",
"if_not_found_y",
"=",
"if_not_found_coords",
"[",
"1",
"]",
"elif",
"isinstance",
"(",
"if_not_found_coords",
",",
"dict",
")",
":",
"if_not_found_x",
"=",
"if_not_found_coords",
"[",
"\"x\"",
"]",
"if_not_found_y",
"=",
"if_not_found_coords",
"[",
"\"y\"",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"Expected if_not_found_coords to be None or tuple or list or dict, got %s.\"",
"%",
"(",
"type",
"(",
"if_not_found_coords",
")",
",",
")",
")",
"keypoints",
"=",
"[",
"]",
"for",
"i",
"in",
"sm",
".",
"xrange",
"(",
"nb_keypoints",
")",
":",
"# TODO introduce voting here among all distance values that have min/max values",
"if",
"inverted",
":",
"hitidx_flat",
"=",
"np",
".",
"argmax",
"(",
"distance_maps",
"[",
"...",
",",
"i",
"]",
")",
"else",
":",
"hitidx_flat",
"=",
"np",
".",
"argmin",
"(",
"distance_maps",
"[",
"...",
",",
"i",
"]",
")",
"hitidx_ndim",
"=",
"np",
".",
"unravel_index",
"(",
"hitidx_flat",
",",
"(",
"height",
",",
"width",
")",
")",
"if",
"not",
"inverted",
"and",
"threshold",
"is",
"not",
"None",
":",
"found",
"=",
"(",
"distance_maps",
"[",
"hitidx_ndim",
"[",
"0",
"]",
",",
"hitidx_ndim",
"[",
"1",
"]",
",",
"i",
"]",
"<",
"threshold",
")",
"elif",
"inverted",
"and",
"threshold",
"is",
"not",
"None",
":",
"found",
"=",
"(",
"distance_maps",
"[",
"hitidx_ndim",
"[",
"0",
"]",
",",
"hitidx_ndim",
"[",
"1",
"]",
",",
"i",
"]",
">=",
"threshold",
")",
"else",
":",
"found",
"=",
"True",
"if",
"found",
":",
"keypoints",
".",
"append",
"(",
"Keypoint",
"(",
"x",
"=",
"hitidx_ndim",
"[",
"1",
"]",
",",
"y",
"=",
"hitidx_ndim",
"[",
"0",
"]",
")",
")",
"else",
":",
"if",
"drop_if_not_found",
":",
"pass",
"# dont add the keypoint to the result list, i.e. drop it",
"else",
":",
"keypoints",
".",
"append",
"(",
"Keypoint",
"(",
"x",
"=",
"if_not_found_x",
",",
"y",
"=",
"if_not_found_y",
")",
")",
"out_shape",
"=",
"(",
"height",
",",
"width",
")",
"if",
"nb_channels",
"is",
"not",
"None",
":",
"out_shape",
"+=",
"(",
"nb_channels",
",",
")",
"return",
"KeypointsOnImage",
"(",
"keypoints",
",",
"shape",
"=",
"out_shape",
")"
] | Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints. | [
"Converts",
"maps",
"generated",
"by",
"to_distance_maps",
"()",
"back",
"to",
"a",
"KeypointsOnImage",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L740-L823 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.copy | def copy(self, keypoints=None, shape=None):
"""
Create a shallow copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy.
"""
result = copy.copy(self)
if keypoints is not None:
result.keypoints = keypoints
if shape is not None:
result.shape = shape
return result | python | def copy(self, keypoints=None, shape=None):
"""
Create a shallow copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy.
"""
result = copy.copy(self)
if keypoints is not None:
result.keypoints = keypoints
if shape is not None:
result.shape = shape
return result | [
"def",
"copy",
"(",
"self",
",",
"keypoints",
"=",
"None",
",",
"shape",
"=",
"None",
")",
":",
"result",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"if",
"keypoints",
"is",
"not",
"None",
":",
"result",
".",
"keypoints",
"=",
"keypoints",
"if",
"shape",
"is",
"not",
"None",
":",
"result",
".",
"shape",
"=",
"shape",
"return",
"result"
] | Create a shallow copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy. | [
"Create",
"a",
"shallow",
"copy",
"of",
"the",
"KeypointsOnImage",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L825-L850 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | KeypointsOnImage.deepcopy | def deepcopy(self, keypoints=None, shape=None):
"""
Create a deep copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
if keypoints is None:
keypoints = [kp.deepcopy() for kp in self.keypoints]
if shape is None:
shape = tuple(self.shape)
return KeypointsOnImage(keypoints, shape) | python | def deepcopy(self, keypoints=None, shape=None):
"""
Create a deep copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
if keypoints is None:
keypoints = [kp.deepcopy() for kp in self.keypoints]
if shape is None:
shape = tuple(self.shape)
return KeypointsOnImage(keypoints, shape) | [
"def",
"deepcopy",
"(",
"self",
",",
"keypoints",
"=",
"None",
",",
"shape",
"=",
"None",
")",
":",
"# for some reason deepcopy is way slower here than manual copy",
"if",
"keypoints",
"is",
"None",
":",
"keypoints",
"=",
"[",
"kp",
".",
"deepcopy",
"(",
")",
"for",
"kp",
"in",
"self",
".",
"keypoints",
"]",
"if",
"shape",
"is",
"None",
":",
"shape",
"=",
"tuple",
"(",
"self",
".",
"shape",
")",
"return",
"KeypointsOnImage",
"(",
"keypoints",
",",
"shape",
")"
] | Create a deep copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Deep copy. | [
"Create",
"a",
"deep",
"copy",
"of",
"the",
"KeypointsOnImage",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L852-L877 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.contains | def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : tuple of number or imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
if isinstance(other, tuple):
x, y = other
else:
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2 | python | def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : tuple of number or imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
if isinstance(other, tuple):
x, y = other
else:
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2 | [
"def",
"contains",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"tuple",
")",
":",
"x",
",",
"y",
"=",
"other",
"else",
":",
"x",
",",
"y",
"=",
"other",
".",
"x",
",",
"other",
".",
"y",
"return",
"self",
".",
"x1",
"<=",
"x",
"<=",
"self",
".",
"x2",
"and",
"self",
".",
"y1",
"<=",
"y",
"<=",
"self",
".",
"y2"
] | Estimate whether the bounding box contains a point.
Parameters
----------
other : tuple of number or imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise. | [
"Estimate",
"whether",
"the",
"bounding",
"box",
"contains",
"a",
"point",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L176-L195 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.project | def project(self, from_shape, to_shape):
"""
Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates.
"""
coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],
from_shape, to_shape)
return self.copy(
x1=coords_proj[0][0],
y1=coords_proj[0][1],
x2=coords_proj[1][0],
y2=coords_proj[1][1],
label=self.label) | python | def project(self, from_shape, to_shape):
"""
Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates.
"""
coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],
from_shape, to_shape)
return self.copy(
x1=coords_proj[0][0],
y1=coords_proj[0][1],
x2=coords_proj[1][0],
y2=coords_proj[1][1],
label=self.label) | [
"def",
"project",
"(",
"self",
",",
"from_shape",
",",
"to_shape",
")",
":",
"coords_proj",
"=",
"project_coords",
"(",
"[",
"(",
"self",
".",
"x1",
",",
"self",
".",
"y1",
")",
",",
"(",
"self",
".",
"x2",
",",
"self",
".",
"y2",
")",
"]",
",",
"from_shape",
",",
"to_shape",
")",
"return",
"self",
".",
"copy",
"(",
"x1",
"=",
"coords_proj",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"y1",
"=",
"coords_proj",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"x2",
"=",
"coords_proj",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"y2",
"=",
"coords_proj",
"[",
"1",
"]",
"[",
"1",
"]",
",",
"label",
"=",
"self",
".",
"label",
")"
] | Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates. | [
"Project",
"the",
"bounding",
"box",
"onto",
"a",
"differently",
"shaped",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L198-L231 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.extend | def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
) | python | def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
) | [
"def",
"extend",
"(",
"self",
",",
"all_sides",
"=",
"0",
",",
"top",
"=",
"0",
",",
"right",
"=",
"0",
",",
"bottom",
"=",
"0",
",",
"left",
"=",
"0",
")",
":",
"return",
"BoundingBox",
"(",
"x1",
"=",
"self",
".",
"x1",
"-",
"all_sides",
"-",
"left",
",",
"x2",
"=",
"self",
".",
"x2",
"+",
"all_sides",
"+",
"right",
",",
"y1",
"=",
"self",
".",
"y1",
"-",
"all_sides",
"-",
"top",
",",
"y2",
"=",
"self",
".",
"y2",
"+",
"all_sides",
"+",
"bottom",
")"
] | Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box. | [
"Extend",
"the",
"size",
"of",
"the",
"bounding",
"box",
"along",
"its",
"sides",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L233-L265 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.intersection | def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i) | python | def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i) | [
"def",
"intersection",
"(",
"self",
",",
"other",
",",
"default",
"=",
"None",
")",
":",
"x1_i",
"=",
"max",
"(",
"self",
".",
"x1",
",",
"other",
".",
"x1",
")",
"y1_i",
"=",
"max",
"(",
"self",
".",
"y1",
",",
"other",
".",
"y1",
")",
"x2_i",
"=",
"min",
"(",
"self",
".",
"x2",
",",
"other",
".",
"x2",
")",
"y2_i",
"=",
"min",
"(",
"self",
".",
"y2",
",",
"other",
".",
"y2",
")",
"if",
"x1_i",
">",
"x2_i",
"or",
"y1_i",
">",
"y2_i",
":",
"return",
"default",
"else",
":",
"return",
"BoundingBox",
"(",
"x1",
"=",
"x1_i",
",",
"y1",
"=",
"y1_i",
",",
"x2",
"=",
"x2_i",
",",
"y2",
"=",
"y2_i",
")"
] | Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything. | [
"Compute",
"the",
"intersection",
"bounding",
"box",
"of",
"this",
"bounding",
"box",
"and",
"another",
"one",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L267-L296 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.union | def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
) | python | def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
) | [
"def",
"union",
"(",
"self",
",",
"other",
")",
":",
"return",
"BoundingBox",
"(",
"x1",
"=",
"min",
"(",
"self",
".",
"x1",
",",
"other",
".",
"x1",
")",
",",
"y1",
"=",
"min",
"(",
"self",
".",
"y1",
",",
"other",
".",
"y1",
")",
",",
"x2",
"=",
"max",
"(",
"self",
".",
"x2",
",",
"other",
".",
"x2",
")",
",",
"y2",
"=",
"max",
"(",
"self",
".",
"y2",
",",
"other",
".",
"y2",
")",
",",
")"
] | Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes. | [
"Compute",
"the",
"union",
"bounding",
"box",
"of",
"this",
"bounding",
"box",
"and",
"another",
"one",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L298-L321 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.iou | def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
else:
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0 | python | def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
else:
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0 | [
"def",
"iou",
"(",
"self",
",",
"other",
")",
":",
"inters",
"=",
"self",
".",
"intersection",
"(",
"other",
")",
"if",
"inters",
"is",
"None",
":",
"return",
"0.0",
"else",
":",
"area_union",
"=",
"self",
".",
"area",
"+",
"other",
".",
"area",
"-",
"inters",
".",
"area",
"return",
"inters",
".",
"area",
"/",
"area_union",
"if",
"area_union",
">",
"0",
"else",
"0.0"
] | Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes. | [
"Compute",
"the",
"IoU",
"of",
"this",
"bounding",
"box",
"with",
"another",
"one",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L323-L348 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.is_fully_within_image | def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height | python | def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height | [
"def",
"is_fully_within_image",
"(",
"self",
",",
"image",
")",
":",
"shape",
"=",
"normalize_shape",
"(",
"image",
")",
"height",
",",
"width",
"=",
"shape",
"[",
"0",
":",
"2",
"]",
"return",
"self",
".",
"x1",
">=",
"0",
"and",
"self",
".",
"x2",
"<",
"width",
"and",
"self",
".",
"y1",
">=",
"0",
"and",
"self",
".",
"y2",
"<",
"height"
] | Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise. | [
"Estimate",
"whether",
"the",
"bounding",
"box",
"is",
"fully",
"inside",
"the",
"image",
"area",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L350-L370 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.is_partly_within_image | def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None | python | def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None | [
"def",
"is_partly_within_image",
"(",
"self",
",",
"image",
")",
":",
"shape",
"=",
"normalize_shape",
"(",
"image",
")",
"height",
",",
"width",
"=",
"shape",
"[",
"0",
":",
"2",
"]",
"eps",
"=",
"np",
".",
"finfo",
"(",
"np",
".",
"float32",
")",
".",
"eps",
"img_bb",
"=",
"BoundingBox",
"(",
"x1",
"=",
"0",
",",
"x2",
"=",
"width",
"-",
"eps",
",",
"y1",
"=",
"0",
",",
"y2",
"=",
"height",
"-",
"eps",
")",
"return",
"self",
".",
"intersection",
"(",
"img_bb",
")",
"is",
"not",
"None"
] | Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise. | [
"Estimate",
"whether",
"the",
"bounding",
"box",
"is",
"at",
"least",
"partially",
"inside",
"the",
"image",
"area",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L372-L394 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.is_out_of_image | def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully | python | def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully | [
"def",
"is_out_of_image",
"(",
"self",
",",
"image",
",",
"fully",
"=",
"True",
",",
"partly",
"=",
"False",
")",
":",
"if",
"self",
".",
"is_fully_within_image",
"(",
"image",
")",
":",
"return",
"False",
"elif",
"self",
".",
"is_partly_within_image",
"(",
"image",
")",
":",
"return",
"partly",
"else",
":",
"return",
"fully"
] | Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise. | [
"Estimate",
"whether",
"the",
"bounding",
"box",
"is",
"partially",
"or",
"fully",
"outside",
"of",
"the",
"image",
"area",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L396-L425 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.clip_out_of_image | def clip_out_of_image(self, image):
"""
Clip off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
ia.do_assert(height > 0)
ia.do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
) | python | def clip_out_of_image(self, image):
"""
Clip off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
ia.do_assert(height > 0)
ia.do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
) | [
"def",
"clip_out_of_image",
"(",
"self",
",",
"image",
")",
":",
"shape",
"=",
"normalize_shape",
"(",
"image",
")",
"height",
",",
"width",
"=",
"shape",
"[",
"0",
":",
"2",
"]",
"ia",
".",
"do_assert",
"(",
"height",
">",
"0",
")",
"ia",
".",
"do_assert",
"(",
"width",
">",
"0",
")",
"eps",
"=",
"np",
".",
"finfo",
"(",
"np",
".",
"float32",
")",
".",
"eps",
"x1",
"=",
"np",
".",
"clip",
"(",
"self",
".",
"x1",
",",
"0",
",",
"width",
"-",
"eps",
")",
"x2",
"=",
"np",
".",
"clip",
"(",
"self",
".",
"x2",
",",
"0",
",",
"width",
"-",
"eps",
")",
"y1",
"=",
"np",
".",
"clip",
"(",
"self",
".",
"y1",
",",
"0",
",",
"height",
"-",
"eps",
")",
"y2",
"=",
"np",
".",
"clip",
"(",
"self",
".",
"y2",
",",
"0",
",",
"height",
"-",
"eps",
")",
"return",
"self",
".",
"copy",
"(",
"x1",
"=",
"x1",
",",
"y1",
"=",
"y1",
",",
"x2",
"=",
"x2",
",",
"y2",
"=",
"y2",
",",
"label",
"=",
"self",
".",
"label",
")"
] | Clip off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions. | [
"Clip",
"off",
"all",
"parts",
"of",
"the",
"bounding",
"box",
"that",
"are",
"outside",
"of",
"the",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L433-L468 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.shift | def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
) | python | def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
) | [
"def",
"shift",
"(",
"self",
",",
"top",
"=",
"None",
",",
"right",
"=",
"None",
",",
"bottom",
"=",
"None",
",",
"left",
"=",
"None",
")",
":",
"top",
"=",
"top",
"if",
"top",
"is",
"not",
"None",
"else",
"0",
"right",
"=",
"right",
"if",
"right",
"is",
"not",
"None",
"else",
"0",
"bottom",
"=",
"bottom",
"if",
"bottom",
"is",
"not",
"None",
"else",
"0",
"left",
"=",
"left",
"if",
"left",
"is",
"not",
"None",
"else",
"0",
"return",
"self",
".",
"copy",
"(",
"x1",
"=",
"self",
".",
"x1",
"+",
"left",
"-",
"right",
",",
"x2",
"=",
"self",
".",
"x2",
"+",
"left",
"-",
"right",
",",
"y1",
"=",
"self",
".",
"y1",
"+",
"top",
"-",
"bottom",
",",
"y2",
"=",
"self",
".",
"y2",
"+",
"top",
"-",
"bottom",
")"
] | Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box. | [
"Shift",
"the",
"bounding",
"box",
"from",
"one",
"or",
"more",
"image",
"sides",
"i",
".",
"e",
".",
"move",
"it",
"on",
"the",
"x",
"/",
"y",
"-",
"axis",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L471-L504 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.draw_on_image | def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if thickness is not None:
ia.warn_deprecated(
"Usage of argument 'thickness' in BoundingBox.draw_on_image() "
"is deprecated. The argument was renamed to 'size'."
)
size = thickness
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(size):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if ia.is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result | python | def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if thickness is not None:
ia.warn_deprecated(
"Usage of argument 'thickness' in BoundingBox.draw_on_image() "
"is deprecated. The argument was renamed to 'size'."
)
size = thickness
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(size):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if ia.is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result | [
"def",
"draw_on_image",
"(",
"self",
",",
"image",
",",
"color",
"=",
"(",
"0",
",",
"255",
",",
"0",
")",
",",
"alpha",
"=",
"1.0",
",",
"size",
"=",
"1",
",",
"copy",
"=",
"True",
",",
"raise_if_out_of_image",
"=",
"False",
",",
"thickness",
"=",
"None",
")",
":",
"if",
"thickness",
"is",
"not",
"None",
":",
"ia",
".",
"warn_deprecated",
"(",
"\"Usage of argument 'thickness' in BoundingBox.draw_on_image() \"",
"\"is deprecated. The argument was renamed to 'size'.\"",
")",
"size",
"=",
"thickness",
"if",
"raise_if_out_of_image",
"and",
"self",
".",
"is_out_of_image",
"(",
"image",
")",
":",
"raise",
"Exception",
"(",
"\"Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s.\"",
"%",
"(",
"self",
".",
"x1",
",",
"self",
".",
"y1",
",",
"self",
".",
"x2",
",",
"self",
".",
"y2",
",",
"image",
".",
"shape",
")",
")",
"result",
"=",
"np",
".",
"copy",
"(",
"image",
")",
"if",
"copy",
"else",
"image",
"if",
"isinstance",
"(",
"color",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"color",
"=",
"np",
".",
"uint8",
"(",
"color",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"y1",
",",
"y2",
",",
"x1",
",",
"x2",
"=",
"self",
".",
"y1_int",
",",
"self",
".",
"y2_int",
",",
"self",
".",
"x1_int",
",",
"self",
".",
"x2_int",
"# When y values get into the range (H-0.5, H), the *_int functions round them to H.",
"# That is technically sensible, but in the case of drawing means that the border lies",
"# just barely outside of the image, making the border disappear, even though the BB",
"# is fully inside the image. Here we correct for that because of beauty reasons.",
"# Same is the case for x coordinates.",
"if",
"self",
".",
"is_fully_within_image",
"(",
"image",
")",
":",
"y1",
"=",
"np",
".",
"clip",
"(",
"y1",
",",
"0",
",",
"image",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"y2",
"=",
"np",
".",
"clip",
"(",
"y2",
",",
"0",
",",
"image",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"x1",
"=",
"np",
".",
"clip",
"(",
"x1",
",",
"0",
",",
"image",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"x2",
"=",
"np",
".",
"clip",
"(",
"x2",
",",
"0",
",",
"image",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"y",
"=",
"[",
"y1",
"-",
"i",
",",
"y1",
"-",
"i",
",",
"y2",
"+",
"i",
",",
"y2",
"+",
"i",
"]",
"x",
"=",
"[",
"x1",
"-",
"i",
",",
"x2",
"+",
"i",
",",
"x2",
"+",
"i",
",",
"x1",
"-",
"i",
"]",
"rr",
",",
"cc",
"=",
"skimage",
".",
"draw",
".",
"polygon_perimeter",
"(",
"y",
",",
"x",
",",
"shape",
"=",
"result",
".",
"shape",
")",
"if",
"alpha",
">=",
"0.99",
":",
"result",
"[",
"rr",
",",
"cc",
",",
":",
"]",
"=",
"color",
"else",
":",
"if",
"ia",
".",
"is_float_array",
"(",
"result",
")",
":",
"result",
"[",
"rr",
",",
"cc",
",",
":",
"]",
"=",
"(",
"1",
"-",
"alpha",
")",
"*",
"result",
"[",
"rr",
",",
"cc",
",",
":",
"]",
"+",
"alpha",
"*",
"color",
"result",
"=",
"np",
".",
"clip",
"(",
"result",
",",
"0",
",",
"255",
")",
"else",
":",
"input_dtype",
"=",
"result",
".",
"dtype",
"result",
"=",
"result",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"result",
"[",
"rr",
",",
"cc",
",",
":",
"]",
"=",
"(",
"1",
"-",
"alpha",
")",
"*",
"result",
"[",
"rr",
",",
"cc",
",",
":",
"]",
"+",
"alpha",
"*",
"color",
"result",
"=",
"np",
".",
"clip",
"(",
"result",
",",
"0",
",",
"255",
")",
".",
"astype",
"(",
"input_dtype",
")",
"return",
"result"
] | Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it. | [
"Draw",
"the",
"bounding",
"box",
"on",
"an",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L507-L591 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.extract_from_image | def extract_from_image(self, image, pad=True, pad_max=None,
prevent_zero_size=True):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
fully_within = self.is_fully_within_image(image)
if fully_within:
y1, y2 = np.clip([y1, y2], 0, height-1)
x1, x2 = np.clip([x1, x2], 0, width-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
if pad:
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + pad_left
width = width + pad_left
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + pad_top
height = height + pad_top
y1 = 0
if x2 >= width:
pad_right = x2 - width
if y2 >= height:
pad_bottom = y2 - height
paddings = [pad_top, pad_right, pad_bottom, pad_left]
any_padded = any([val > 0 for val in paddings])
if any_padded:
if pad_max is None:
pad_max = max(paddings)
image = ia.pad(
image,
top=min(pad_top, pad_max),
right=min(pad_right, pad_max),
bottom=min(pad_bottom, pad_max),
left=min(pad_left, pad_max)
)
return image[y1:y2, x1:x2]
else:
within_image = (
(0, 0, 0, 0)
<= (x1, y1, x2, y2)
< (width, height, width, height)
)
out_height, out_width = (y2 - y1), (x2 - x1)
nonzero_height = (out_height > 0)
nonzero_width = (out_width > 0)
if within_image and nonzero_height and nonzero_width:
return image[y1:y2, x1:x2]
if prevent_zero_size:
out_height = 1
out_width = 1
else:
out_height = 0
out_width = 0
if image.ndim == 2:
return np.zeros((out_height, out_width), dtype=image.dtype)
return np.zeros((out_height, out_width, image.shape[-1]),
dtype=image.dtype) | python | def extract_from_image(self, image, pad=True, pad_max=None,
prevent_zero_size=True):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
fully_within = self.is_fully_within_image(image)
if fully_within:
y1, y2 = np.clip([y1, y2], 0, height-1)
x1, x2 = np.clip([x1, x2], 0, width-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
if pad:
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + pad_left
width = width + pad_left
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + pad_top
height = height + pad_top
y1 = 0
if x2 >= width:
pad_right = x2 - width
if y2 >= height:
pad_bottom = y2 - height
paddings = [pad_top, pad_right, pad_bottom, pad_left]
any_padded = any([val > 0 for val in paddings])
if any_padded:
if pad_max is None:
pad_max = max(paddings)
image = ia.pad(
image,
top=min(pad_top, pad_max),
right=min(pad_right, pad_max),
bottom=min(pad_bottom, pad_max),
left=min(pad_left, pad_max)
)
return image[y1:y2, x1:x2]
else:
within_image = (
(0, 0, 0, 0)
<= (x1, y1, x2, y2)
< (width, height, width, height)
)
out_height, out_width = (y2 - y1), (x2 - x1)
nonzero_height = (out_height > 0)
nonzero_width = (out_width > 0)
if within_image and nonzero_height and nonzero_width:
return image[y1:y2, x1:x2]
if prevent_zero_size:
out_height = 1
out_width = 1
else:
out_height = 0
out_width = 0
if image.ndim == 2:
return np.zeros((out_height, out_width), dtype=image.dtype)
return np.zeros((out_height, out_width, image.shape[-1]),
dtype=image.dtype) | [
"def",
"extract_from_image",
"(",
"self",
",",
"image",
",",
"pad",
"=",
"True",
",",
"pad_max",
"=",
"None",
",",
"prevent_zero_size",
"=",
"True",
")",
":",
"pad_top",
"=",
"0",
"pad_right",
"=",
"0",
"pad_bottom",
"=",
"0",
"pad_left",
"=",
"0",
"height",
",",
"width",
"=",
"image",
".",
"shape",
"[",
"0",
"]",
",",
"image",
".",
"shape",
"[",
"1",
"]",
"x1",
",",
"x2",
",",
"y1",
",",
"y2",
"=",
"self",
".",
"x1_int",
",",
"self",
".",
"x2_int",
",",
"self",
".",
"y1_int",
",",
"self",
".",
"y2_int",
"# When y values get into the range (H-0.5, H), the *_int functions round them to H.",
"# That is technically sensible, but in the case of extraction leads to a black border,",
"# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for",
"# that because of beauty reasons.",
"# Same is the case for x coordinates.",
"fully_within",
"=",
"self",
".",
"is_fully_within_image",
"(",
"image",
")",
"if",
"fully_within",
":",
"y1",
",",
"y2",
"=",
"np",
".",
"clip",
"(",
"[",
"y1",
",",
"y2",
"]",
",",
"0",
",",
"height",
"-",
"1",
")",
"x1",
",",
"x2",
"=",
"np",
".",
"clip",
"(",
"[",
"x1",
",",
"x2",
"]",
",",
"0",
",",
"width",
"-",
"1",
")",
"# TODO add test",
"if",
"prevent_zero_size",
":",
"if",
"abs",
"(",
"x2",
"-",
"x1",
")",
"<",
"1",
":",
"x2",
"=",
"x1",
"+",
"1",
"if",
"abs",
"(",
"y2",
"-",
"y1",
")",
"<",
"1",
":",
"y2",
"=",
"y1",
"+",
"1",
"if",
"pad",
":",
"# if the bb is outside of the image area, the following pads the image",
"# first with black pixels until the bb is inside the image",
"# and only then extracts the image area",
"# TODO probably more efficient to initialize an array of zeros",
"# and copy only the portions of the bb into that array that are",
"# natively inside the image area",
"if",
"x1",
"<",
"0",
":",
"pad_left",
"=",
"abs",
"(",
"x1",
")",
"x2",
"=",
"x2",
"+",
"pad_left",
"width",
"=",
"width",
"+",
"pad_left",
"x1",
"=",
"0",
"if",
"y1",
"<",
"0",
":",
"pad_top",
"=",
"abs",
"(",
"y1",
")",
"y2",
"=",
"y2",
"+",
"pad_top",
"height",
"=",
"height",
"+",
"pad_top",
"y1",
"=",
"0",
"if",
"x2",
">=",
"width",
":",
"pad_right",
"=",
"x2",
"-",
"width",
"if",
"y2",
">=",
"height",
":",
"pad_bottom",
"=",
"y2",
"-",
"height",
"paddings",
"=",
"[",
"pad_top",
",",
"pad_right",
",",
"pad_bottom",
",",
"pad_left",
"]",
"any_padded",
"=",
"any",
"(",
"[",
"val",
">",
"0",
"for",
"val",
"in",
"paddings",
"]",
")",
"if",
"any_padded",
":",
"if",
"pad_max",
"is",
"None",
":",
"pad_max",
"=",
"max",
"(",
"paddings",
")",
"image",
"=",
"ia",
".",
"pad",
"(",
"image",
",",
"top",
"=",
"min",
"(",
"pad_top",
",",
"pad_max",
")",
",",
"right",
"=",
"min",
"(",
"pad_right",
",",
"pad_max",
")",
",",
"bottom",
"=",
"min",
"(",
"pad_bottom",
",",
"pad_max",
")",
",",
"left",
"=",
"min",
"(",
"pad_left",
",",
"pad_max",
")",
")",
"return",
"image",
"[",
"y1",
":",
"y2",
",",
"x1",
":",
"x2",
"]",
"else",
":",
"within_image",
"=",
"(",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
"<=",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
"<",
"(",
"width",
",",
"height",
",",
"width",
",",
"height",
")",
")",
"out_height",
",",
"out_width",
"=",
"(",
"y2",
"-",
"y1",
")",
",",
"(",
"x2",
"-",
"x1",
")",
"nonzero_height",
"=",
"(",
"out_height",
">",
"0",
")",
"nonzero_width",
"=",
"(",
"out_width",
">",
"0",
")",
"if",
"within_image",
"and",
"nonzero_height",
"and",
"nonzero_width",
":",
"return",
"image",
"[",
"y1",
":",
"y2",
",",
"x1",
":",
"x2",
"]",
"if",
"prevent_zero_size",
":",
"out_height",
"=",
"1",
"out_width",
"=",
"1",
"else",
":",
"out_height",
"=",
"0",
"out_width",
"=",
"0",
"if",
"image",
".",
"ndim",
"==",
"2",
":",
"return",
"np",
".",
"zeros",
"(",
"(",
"out_height",
",",
"out_width",
")",
",",
"dtype",
"=",
"image",
".",
"dtype",
")",
"return",
"np",
".",
"zeros",
"(",
"(",
"out_height",
",",
"out_width",
",",
"image",
".",
"shape",
"[",
"-",
"1",
"]",
")",
",",
"dtype",
"=",
"image",
".",
"dtype",
")"
] | Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``. | [
"Extract",
"the",
"image",
"pixels",
"within",
"the",
"bounding",
"box",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L594-L714 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.to_keypoints | def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
] | python | def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
] | [
"def",
"to_keypoints",
"(",
"self",
")",
":",
"# TODO get rid of this deferred import",
"from",
"imgaug",
".",
"augmentables",
".",
"kps",
"import",
"Keypoint",
"return",
"[",
"Keypoint",
"(",
"x",
"=",
"self",
".",
"x1",
",",
"y",
"=",
"self",
".",
"y1",
")",
",",
"Keypoint",
"(",
"x",
"=",
"self",
".",
"x2",
",",
"y",
"=",
"self",
".",
"y1",
")",
",",
"Keypoint",
"(",
"x",
"=",
"self",
".",
"x2",
",",
"y",
"=",
"self",
".",
"y2",
")",
",",
"Keypoint",
"(",
"x",
"=",
"self",
".",
"x1",
",",
"y",
"=",
"self",
".",
"y2",
")",
"]"
] | Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints. | [
"Convert",
"the",
"corners",
"of",
"the",
"bounding",
"box",
"to",
"keypoints",
"(",
"clockwise",
"starting",
"at",
"top",
"left",
")",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L718-L736 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.copy | def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
) | python | def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
) | [
"def",
"copy",
"(",
"self",
",",
"x1",
"=",
"None",
",",
"y1",
"=",
"None",
",",
"x2",
"=",
"None",
",",
"y2",
"=",
"None",
",",
"label",
"=",
"None",
")",
":",
"return",
"BoundingBox",
"(",
"x1",
"=",
"self",
".",
"x1",
"if",
"x1",
"is",
"None",
"else",
"x1",
",",
"x2",
"=",
"self",
".",
"x2",
"if",
"x2",
"is",
"None",
"else",
"x2",
",",
"y1",
"=",
"self",
".",
"y1",
"if",
"y1",
"is",
"None",
"else",
"y1",
",",
"y2",
"=",
"self",
".",
"y2",
"if",
"y2",
"is",
"None",
"else",
"y2",
",",
"label",
"=",
"self",
".",
"label",
"if",
"label",
"is",
"None",
"else",
"label",
")"
] | Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy. | [
"Create",
"a",
"shallow",
"copy",
"of",
"the",
"BoundingBox",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L738-L771 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBox.deepcopy | def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label) | python | def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label) | [
"def",
"deepcopy",
"(",
"self",
",",
"x1",
"=",
"None",
",",
"y1",
"=",
"None",
",",
"x2",
"=",
"None",
",",
"y2",
"=",
"None",
",",
"label",
"=",
"None",
")",
":",
"return",
"self",
".",
"copy",
"(",
"x1",
"=",
"x1",
",",
"y1",
"=",
"y1",
",",
"x2",
"=",
"x2",
",",
"y2",
"=",
"y2",
",",
"label",
"=",
"label",
")"
] | Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy. | [
"Create",
"a",
"deep",
"copy",
"of",
"the",
"BoundingBox",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L773-L800 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBoxesOnImage.on | def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
bounding_boxes = [bb.project(self.shape, shape)
for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape) | python | def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
bounding_boxes = [bb.project(self.shape, shape)
for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape) | [
"def",
"on",
"(",
"self",
",",
"image",
")",
":",
"shape",
"=",
"normalize_shape",
"(",
"image",
")",
"if",
"shape",
"[",
"0",
":",
"2",
"]",
"==",
"self",
".",
"shape",
"[",
"0",
":",
"2",
"]",
":",
"return",
"self",
".",
"deepcopy",
"(",
")",
"bounding_boxes",
"=",
"[",
"bb",
".",
"project",
"(",
"self",
".",
"shape",
",",
"shape",
")",
"for",
"bb",
"in",
"self",
".",
"bounding_boxes",
"]",
"return",
"BoundingBoxesOnImage",
"(",
"bounding_boxes",
",",
"shape",
")"
] | Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes. | [
"Project",
"bounding",
"boxes",
"from",
"one",
"image",
"to",
"a",
"new",
"one",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L877-L898 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBoxesOnImage.from_xyxy_array | def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
ia.do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape) | python | def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
ia.do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape) | [
"def",
"from_xyxy_array",
"(",
"cls",
",",
"xyxy",
",",
"shape",
")",
":",
"ia",
".",
"do_assert",
"(",
"xyxy",
".",
"shape",
"[",
"1",
"]",
"==",
"4",
",",
"\"Expected input array of shape (N, 4), got shape %s.\"",
"%",
"(",
"xyxy",
".",
"shape",
",",
")",
")",
"boxes",
"=",
"[",
"BoundingBox",
"(",
"*",
"row",
")",
"for",
"row",
"in",
"xyxy",
"]",
"return",
"cls",
"(",
"boxes",
",",
"shape",
")"
] | Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates. | [
"Convert",
"an",
"(",
"N",
"4",
")",
"ndarray",
"to",
"a",
"BoundingBoxesOnImage",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L901-L927 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBoxesOnImage.to_xyxy_array | def to_xyxy_array(self, dtype=np.float32):
"""
Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype) | python | def to_xyxy_array(self, dtype=np.float32):
"""
Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype) | [
"def",
"to_xyxy_array",
"(",
"self",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
":",
"xyxy_array",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"self",
".",
"bounding_boxes",
")",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"i",
",",
"box",
"in",
"enumerate",
"(",
"self",
".",
"bounding_boxes",
")",
":",
"xyxy_array",
"[",
"i",
"]",
"=",
"[",
"box",
".",
"x1",
",",
"box",
".",
"y1",
",",
"box",
".",
"x2",
",",
"box",
".",
"y2",
"]",
"return",
"xyxy_array",
".",
"astype",
"(",
"dtype",
")"
] | Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``. | [
"Convert",
"the",
"BoundingBoxesOnImage",
"object",
"to",
"an",
"(",
"N",
"4",
")",
"ndarray",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L929-L952 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBoxesOnImage.draw_on_image | def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then
that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
size=size,
copy=False,
raise_if_out_of_image=raise_if_out_of_image,
thickness=thickness
)
return image | python | def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then
that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
size=size,
copy=False,
raise_if_out_of_image=raise_if_out_of_image,
thickness=thickness
)
return image | [
"def",
"draw_on_image",
"(",
"self",
",",
"image",
",",
"color",
"=",
"(",
"0",
",",
"255",
",",
"0",
")",
",",
"alpha",
"=",
"1.0",
",",
"size",
"=",
"1",
",",
"copy",
"=",
"True",
",",
"raise_if_out_of_image",
"=",
"False",
",",
"thickness",
"=",
"None",
")",
":",
"image",
"=",
"np",
".",
"copy",
"(",
"image",
")",
"if",
"copy",
"else",
"image",
"for",
"bb",
"in",
"self",
".",
"bounding_boxes",
":",
"image",
"=",
"bb",
".",
"draw_on_image",
"(",
"image",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
"alpha",
",",
"size",
"=",
"size",
",",
"copy",
"=",
"False",
",",
"raise_if_out_of_image",
"=",
"raise_if_out_of_image",
",",
"thickness",
"=",
"thickness",
")",
"return",
"image"
] | Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then
that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes. | [
"Draw",
"all",
"bounding",
"boxes",
"onto",
"a",
"given",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L954-L1005 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBoxesOnImage.remove_out_of_image | def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape) | python | def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape) | [
"def",
"remove_out_of_image",
"(",
"self",
",",
"fully",
"=",
"True",
",",
"partly",
"=",
"False",
")",
":",
"bbs_clean",
"=",
"[",
"bb",
"for",
"bb",
"in",
"self",
".",
"bounding_boxes",
"if",
"not",
"bb",
".",
"is_out_of_image",
"(",
"self",
".",
"shape",
",",
"fully",
"=",
"fully",
",",
"partly",
"=",
"partly",
")",
"]",
"return",
"BoundingBoxesOnImage",
"(",
"bbs_clean",
",",
"shape",
"=",
"self",
".",
"shape",
")"
] | Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed. | [
"Remove",
"all",
"bounding",
"boxes",
"that",
"are",
"fully",
"or",
"partially",
"outside",
"of",
"the",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L1007-L1028 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBoxesOnImage.clip_out_of_image | def clip_out_of_image(self):
"""
Clip off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.clip_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape) | python | def clip_out_of_image(self):
"""
Clip off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.clip_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape) | [
"def",
"clip_out_of_image",
"(",
"self",
")",
":",
"bbs_cut",
"=",
"[",
"bb",
".",
"clip_out_of_image",
"(",
"self",
".",
"shape",
")",
"for",
"bb",
"in",
"self",
".",
"bounding_boxes",
"if",
"bb",
".",
"is_partly_within_image",
"(",
"self",
".",
"shape",
")",
"]",
"return",
"BoundingBoxesOnImage",
"(",
"bbs_cut",
",",
"shape",
"=",
"self",
".",
"shape",
")"
] | Clip off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions. | [
"Clip",
"off",
"all",
"parts",
"from",
"all",
"bounding",
"boxes",
"that",
"are",
"outside",
"of",
"the",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L1036-L1048 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBoxesOnImage.shift | def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
imgaug.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape) | python | def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
imgaug.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape) | [
"def",
"shift",
"(",
"self",
",",
"top",
"=",
"None",
",",
"right",
"=",
"None",
",",
"bottom",
"=",
"None",
",",
"left",
"=",
"None",
")",
":",
"bbs_new",
"=",
"[",
"bb",
".",
"shift",
"(",
"top",
"=",
"top",
",",
"right",
"=",
"right",
",",
"bottom",
"=",
"bottom",
",",
"left",
"=",
"left",
")",
"for",
"bb",
"in",
"self",
".",
"bounding_boxes",
"]",
"return",
"BoundingBoxesOnImage",
"(",
"bbs_new",
",",
"shape",
"=",
"self",
".",
"shape",
")"
] | Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
imgaug.BoundingBoxesOnImage
Shifted bounding boxes. | [
"Shift",
"all",
"bounding",
"boxes",
"from",
"one",
"or",
"more",
"image",
"sides",
"i",
".",
"e",
".",
"move",
"them",
"on",
"the",
"x",
"/",
"y",
"-",
"axis",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L1050-L1075 | valid |
aleju/imgaug | imgaug/augmentables/bbs.py | BoundingBoxesOnImage.deepcopy | def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape)) | python | def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape)) | [
"def",
"deepcopy",
"(",
"self",
")",
":",
"# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,",
"# so use manual copy here too",
"bbs",
"=",
"[",
"bb",
".",
"deepcopy",
"(",
")",
"for",
"bb",
"in",
"self",
".",
"bounding_boxes",
"]",
"return",
"BoundingBoxesOnImage",
"(",
"bbs",
",",
"tuple",
"(",
"self",
".",
"shape",
")",
")"
] | Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Deep copy. | [
"Create",
"a",
"deep",
"copy",
"of",
"the",
"BoundingBoxesOnImage",
"object",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L1089-L1102 | valid |
aleju/imgaug | imgaug/augmenters/convolutional.py | Emboss | def Emboss(alpha=0, strength=1, name=None, deterministic=False, random_state=None):
"""
Augmenter that embosses images and overlays the result with the original
image.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard
embossing effect. Default value is 1.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
embosses an image with a variable strength in the range ``0.5 <= x <= 1.5``
and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0``
over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
strength_param = iap.handle_continuous_param(strength, "strength", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
strength_sample = strength_param.draw_sample(random_state=random_state_func)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[-1-strength_sample, 0-strength_sample, 0],
[0-strength_sample, 1, 0+strength_sample],
[0, 0+strength_sample, 1+strength_sample]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) | python | def Emboss(alpha=0, strength=1, name=None, deterministic=False, random_state=None):
"""
Augmenter that embosses images and overlays the result with the original
image.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard
embossing effect. Default value is 1.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
embosses an image with a variable strength in the range ``0.5 <= x <= 1.5``
and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0``
over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
strength_param = iap.handle_continuous_param(strength, "strength", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
strength_sample = strength_param.draw_sample(random_state=random_state_func)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[-1-strength_sample, 0-strength_sample, 0],
[0-strength_sample, 1, 0+strength_sample],
[0, 0+strength_sample, 1+strength_sample]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) | [
"def",
"Emboss",
"(",
"alpha",
"=",
"0",
",",
"strength",
"=",
"1",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"alpha_param",
"=",
"iap",
".",
"handle_continuous_param",
"(",
"alpha",
",",
"\"alpha\"",
",",
"value_range",
"=",
"(",
"0",
",",
"1.0",
")",
",",
"tuple_to_uniform",
"=",
"True",
",",
"list_to_choice",
"=",
"True",
")",
"strength_param",
"=",
"iap",
".",
"handle_continuous_param",
"(",
"strength",
",",
"\"strength\"",
",",
"value_range",
"=",
"(",
"0",
",",
"None",
")",
",",
"tuple_to_uniform",
"=",
"True",
",",
"list_to_choice",
"=",
"True",
")",
"def",
"create_matrices",
"(",
"image",
",",
"nb_channels",
",",
"random_state_func",
")",
":",
"alpha_sample",
"=",
"alpha_param",
".",
"draw_sample",
"(",
"random_state",
"=",
"random_state_func",
")",
"ia",
".",
"do_assert",
"(",
"0",
"<=",
"alpha_sample",
"<=",
"1.0",
")",
"strength_sample",
"=",
"strength_param",
".",
"draw_sample",
"(",
"random_state",
"=",
"random_state_func",
")",
"matrix_nochange",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"matrix_effect",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"-",
"1",
"-",
"strength_sample",
",",
"0",
"-",
"strength_sample",
",",
"0",
"]",
",",
"[",
"0",
"-",
"strength_sample",
",",
"1",
",",
"0",
"+",
"strength_sample",
"]",
",",
"[",
"0",
",",
"0",
"+",
"strength_sample",
",",
"1",
"+",
"strength_sample",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"matrix",
"=",
"(",
"1",
"-",
"alpha_sample",
")",
"*",
"matrix_nochange",
"+",
"alpha_sample",
"*",
"matrix_effect",
"return",
"[",
"matrix",
"]",
"*",
"nb_channels",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"Convolve",
"(",
"create_matrices",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Augmenter that embosses images and overlays the result with the original
image.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard
embossing effect. Default value is 1.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
embosses an image with a variable strength in the range ``0.5 <= x <= 1.5``
and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0``
over the old image. | [
"Augmenter",
"that",
"embosses",
"images",
"and",
"overlays",
"the",
"result",
"with",
"the",
"original",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/convolutional.py#L296-L378 | valid |
aleju/imgaug | imgaug/augmenters/convolutional.py | EdgeDetect | def EdgeDetect(alpha=0, name=None, deterministic=False, random_state=None):
"""
Augmenter that detects all edges in images, marks them in
a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = EdgeDetect(alpha=(0.0, 1.0))
detects edges in an image and overlays the result with a variable alpha
in the range ``0.0 <= a <= 1.0`` over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) | python | def EdgeDetect(alpha=0, name=None, deterministic=False, random_state=None):
"""
Augmenter that detects all edges in images, marks them in
a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = EdgeDetect(alpha=(0.0, 1.0))
detects edges in an image and overlays the result with a variable alpha
in the range ``0.0 <= a <= 1.0`` over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) | [
"def",
"EdgeDetect",
"(",
"alpha",
"=",
"0",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"alpha_param",
"=",
"iap",
".",
"handle_continuous_param",
"(",
"alpha",
",",
"\"alpha\"",
",",
"value_range",
"=",
"(",
"0",
",",
"1.0",
")",
",",
"tuple_to_uniform",
"=",
"True",
",",
"list_to_choice",
"=",
"True",
")",
"def",
"create_matrices",
"(",
"_image",
",",
"nb_channels",
",",
"random_state_func",
")",
":",
"alpha_sample",
"=",
"alpha_param",
".",
"draw_sample",
"(",
"random_state",
"=",
"random_state_func",
")",
"ia",
".",
"do_assert",
"(",
"0",
"<=",
"alpha_sample",
"<=",
"1.0",
")",
"matrix_nochange",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"matrix_effect",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"1",
",",
"0",
"]",
",",
"[",
"1",
",",
"-",
"4",
",",
"1",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"matrix",
"=",
"(",
"1",
"-",
"alpha_sample",
")",
"*",
"matrix_nochange",
"+",
"alpha_sample",
"*",
"matrix_effect",
"return",
"[",
"matrix",
"]",
"*",
"nb_channels",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"Convolve",
"(",
"create_matrices",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Augmenter that detects all edges in images, marks them in
a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = EdgeDetect(alpha=(0.0, 1.0))
detects edges in an image and overlays the result with a variable alpha
in the range ``0.0 <= a <= 1.0`` over the old image. | [
"Augmenter",
"that",
"detects",
"all",
"edges",
"in",
"images",
"marks",
"them",
"in",
"a",
"black",
"and",
"white",
"image",
"and",
"then",
"overlays",
"the",
"result",
"with",
"the",
"original",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/convolutional.py#L382-L445 | valid |
aleju/imgaug | imgaug/augmenters/convolutional.py | DirectedEdgeDetect | def DirectedEdgeDetect(alpha=0, direction=(0.0, 1.0), name=None, deterministic=False, random_state=None):
"""
Augmenter that detects edges that have certain directions and marks them
in a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle of edges to pronounce, where 0 represents 0 degrees and 1.0
represents 360 degrees (both clockwise, starting at the top).
Default value is ``(0.0, 1.0)``, i.e. pick a random angle per image.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=0)
turns input images into edge images in which edges are detected from
top side of the image (i.e. the top sides of horizontal edges are
added to the output).
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=90/360)
same as before, but detecting edges from the right (right side of each
vertical edge).
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=(0.0, 1.0))
same as before, but detecting edges from a variable direction (anything
between 0 and 1.0, i.e. 0 degrees and 360 degrees, starting from the
top and moving clockwise).
>>> aug = DirectedEdgeDetect(alpha=(0.0, 0.3), direction=0)
generates edge images (edges detected from the top) and overlays them
with the input images by a variable amount between 0 and 30 percent
(e.g. for 0.3 then ``0.7*old_image + 0.3*edge_image``).
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
direction_param = iap.handle_continuous_param(direction, "direction", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
direction_sample = direction_param.draw_sample(random_state=random_state_func)
deg = int(direction_sample * 360) % 360
rad = np.deg2rad(deg)
x = np.cos(rad - 0.5*np.pi)
y = np.sin(rad - 0.5*np.pi)
direction_vector = np.array([x, y])
matrix_effect = np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
], dtype=np.float32)
for x in [-1, 0, 1]:
for y in [-1, 0, 1]:
if (x, y) != (0, 0):
cell_vector = np.array([x, y])
distance_deg = np.rad2deg(ia.angle_between_vectors(cell_vector, direction_vector))
distance = distance_deg / 180
similarity = (1 - distance)**4
matrix_effect[y+1, x+1] = similarity
matrix_effect = matrix_effect / np.sum(matrix_effect)
matrix_effect = matrix_effect * (-1)
matrix_effect[1, 1] = 1
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) | python | def DirectedEdgeDetect(alpha=0, direction=(0.0, 1.0), name=None, deterministic=False, random_state=None):
"""
Augmenter that detects edges that have certain directions and marks them
in a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle of edges to pronounce, where 0 represents 0 degrees and 1.0
represents 360 degrees (both clockwise, starting at the top).
Default value is ``(0.0, 1.0)``, i.e. pick a random angle per image.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=0)
turns input images into edge images in which edges are detected from
top side of the image (i.e. the top sides of horizontal edges are
added to the output).
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=90/360)
same as before, but detecting edges from the right (right side of each
vertical edge).
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=(0.0, 1.0))
same as before, but detecting edges from a variable direction (anything
between 0 and 1.0, i.e. 0 degrees and 360 degrees, starting from the
top and moving clockwise).
>>> aug = DirectedEdgeDetect(alpha=(0.0, 0.3), direction=0)
generates edge images (edges detected from the top) and overlays them
with the input images by a variable amount between 0 and 30 percent
(e.g. for 0.3 then ``0.7*old_image + 0.3*edge_image``).
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
direction_param = iap.handle_continuous_param(direction, "direction", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
direction_sample = direction_param.draw_sample(random_state=random_state_func)
deg = int(direction_sample * 360) % 360
rad = np.deg2rad(deg)
x = np.cos(rad - 0.5*np.pi)
y = np.sin(rad - 0.5*np.pi)
direction_vector = np.array([x, y])
matrix_effect = np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
], dtype=np.float32)
for x in [-1, 0, 1]:
for y in [-1, 0, 1]:
if (x, y) != (0, 0):
cell_vector = np.array([x, y])
distance_deg = np.rad2deg(ia.angle_between_vectors(cell_vector, direction_vector))
distance = distance_deg / 180
similarity = (1 - distance)**4
matrix_effect[y+1, x+1] = similarity
matrix_effect = matrix_effect / np.sum(matrix_effect)
matrix_effect = matrix_effect * (-1)
matrix_effect[1, 1] = 1
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) | [
"def",
"DirectedEdgeDetect",
"(",
"alpha",
"=",
"0",
",",
"direction",
"=",
"(",
"0.0",
",",
"1.0",
")",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"alpha_param",
"=",
"iap",
".",
"handle_continuous_param",
"(",
"alpha",
",",
"\"alpha\"",
",",
"value_range",
"=",
"(",
"0",
",",
"1.0",
")",
",",
"tuple_to_uniform",
"=",
"True",
",",
"list_to_choice",
"=",
"True",
")",
"direction_param",
"=",
"iap",
".",
"handle_continuous_param",
"(",
"direction",
",",
"\"direction\"",
",",
"value_range",
"=",
"None",
",",
"tuple_to_uniform",
"=",
"True",
",",
"list_to_choice",
"=",
"True",
")",
"def",
"create_matrices",
"(",
"_image",
",",
"nb_channels",
",",
"random_state_func",
")",
":",
"alpha_sample",
"=",
"alpha_param",
".",
"draw_sample",
"(",
"random_state",
"=",
"random_state_func",
")",
"ia",
".",
"do_assert",
"(",
"0",
"<=",
"alpha_sample",
"<=",
"1.0",
")",
"direction_sample",
"=",
"direction_param",
".",
"draw_sample",
"(",
"random_state",
"=",
"random_state_func",
")",
"deg",
"=",
"int",
"(",
"direction_sample",
"*",
"360",
")",
"%",
"360",
"rad",
"=",
"np",
".",
"deg2rad",
"(",
"deg",
")",
"x",
"=",
"np",
".",
"cos",
"(",
"rad",
"-",
"0.5",
"*",
"np",
".",
"pi",
")",
"y",
"=",
"np",
".",
"sin",
"(",
"rad",
"-",
"0.5",
"*",
"np",
".",
"pi",
")",
"direction_vector",
"=",
"np",
".",
"array",
"(",
"[",
"x",
",",
"y",
"]",
")",
"matrix_effect",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"x",
"in",
"[",
"-",
"1",
",",
"0",
",",
"1",
"]",
":",
"for",
"y",
"in",
"[",
"-",
"1",
",",
"0",
",",
"1",
"]",
":",
"if",
"(",
"x",
",",
"y",
")",
"!=",
"(",
"0",
",",
"0",
")",
":",
"cell_vector",
"=",
"np",
".",
"array",
"(",
"[",
"x",
",",
"y",
"]",
")",
"distance_deg",
"=",
"np",
".",
"rad2deg",
"(",
"ia",
".",
"angle_between_vectors",
"(",
"cell_vector",
",",
"direction_vector",
")",
")",
"distance",
"=",
"distance_deg",
"/",
"180",
"similarity",
"=",
"(",
"1",
"-",
"distance",
")",
"**",
"4",
"matrix_effect",
"[",
"y",
"+",
"1",
",",
"x",
"+",
"1",
"]",
"=",
"similarity",
"matrix_effect",
"=",
"matrix_effect",
"/",
"np",
".",
"sum",
"(",
"matrix_effect",
")",
"matrix_effect",
"=",
"matrix_effect",
"*",
"(",
"-",
"1",
")",
"matrix_effect",
"[",
"1",
",",
"1",
"]",
"=",
"1",
"matrix_nochange",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"matrix",
"=",
"(",
"1",
"-",
"alpha_sample",
")",
"*",
"matrix_nochange",
"+",
"alpha_sample",
"*",
"matrix_effect",
"return",
"[",
"matrix",
"]",
"*",
"nb_channels",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"Convolve",
"(",
"create_matrices",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Augmenter that detects edges that have certain directions and marks them
in a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle of edges to pronounce, where 0 represents 0 degrees and 1.0
represents 360 degrees (both clockwise, starting at the top).
Default value is ``(0.0, 1.0)``, i.e. pick a random angle per image.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=0)
turns input images into edge images in which edges are detected from
top side of the image (i.e. the top sides of horizontal edges are
added to the output).
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=90/360)
same as before, but detecting edges from the right (right side of each
vertical edge).
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=(0.0, 1.0))
same as before, but detecting edges from a variable direction (anything
between 0 and 1.0, i.e. 0 degrees and 360 degrees, starting from the
top and moving clockwise).
>>> aug = DirectedEdgeDetect(alpha=(0.0, 0.3), direction=0)
generates edge images (edges detected from the top) and overlays them
with the input images by a variable amount between 0 and 30 percent
(e.g. for 0.3 then ``0.7*old_image + 0.3*edge_image``). | [
"Augmenter",
"that",
"detects",
"edges",
"that",
"have",
"certain",
"directions",
"and",
"marks",
"them",
"in",
"a",
"black",
"and",
"white",
"image",
"and",
"then",
"overlays",
"the",
"result",
"with",
"the",
"original",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/convolutional.py#L450-L568 | valid |
aleju/imgaug | imgaug/augmentables/utils.py | normalize_shape | def normalize_shape(shape):
"""
Normalize a shape tuple or array to a shape tuple.
Parameters
----------
shape : tuple of int or ndarray
The input to normalize. May optionally be an array.
Returns
-------
tuple of int
Shape tuple.
"""
if isinstance(shape, tuple):
return shape
assert ia.is_np_array(shape), (
"Expected tuple of ints or array, got %s." % (type(shape),))
return shape.shape | python | def normalize_shape(shape):
"""
Normalize a shape tuple or array to a shape tuple.
Parameters
----------
shape : tuple of int or ndarray
The input to normalize. May optionally be an array.
Returns
-------
tuple of int
Shape tuple.
"""
if isinstance(shape, tuple):
return shape
assert ia.is_np_array(shape), (
"Expected tuple of ints or array, got %s." % (type(shape),))
return shape.shape | [
"def",
"normalize_shape",
"(",
"shape",
")",
":",
"if",
"isinstance",
"(",
"shape",
",",
"tuple",
")",
":",
"return",
"shape",
"assert",
"ia",
".",
"is_np_array",
"(",
"shape",
")",
",",
"(",
"\"Expected tuple of ints or array, got %s.\"",
"%",
"(",
"type",
"(",
"shape",
")",
",",
")",
")",
"return",
"shape",
".",
"shape"
] | Normalize a shape tuple or array to a shape tuple.
Parameters
----------
shape : tuple of int or ndarray
The input to normalize. May optionally be an array.
Returns
-------
tuple of int
Shape tuple. | [
"Normalize",
"a",
"shape",
"tuple",
"or",
"array",
"to",
"a",
"shape",
"tuple",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/utils.py#L8-L27 | valid |
aleju/imgaug | imgaug/augmentables/utils.py | project_coords | def project_coords(coords, from_shape, to_shape):
"""
Project coordinates from one image shape to another.
This performs a relative projection, e.g. a point at 60% of the old
image width will be at 60% of the new image width after projection.
Parameters
----------
coords : ndarray or tuple of number
Coordinates to project. Either a ``(N,2)`` numpy array or a tuple
of `(x,y)` coordinates.
from_shape : tuple of int or ndarray
Old image shape.
to_shape : tuple of int or ndarray
New image shape.
Returns
-------
ndarray
Projected coordinates as ``(N,2)`` ``float32`` numpy array.
"""
from_shape = normalize_shape(from_shape)
to_shape = normalize_shape(to_shape)
if from_shape[0:2] == to_shape[0:2]:
return coords
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
assert all([v > 0 for v in [from_height, from_width, to_height, to_width]])
# make sure to not just call np.float32(coords) here as the following lines
# perform in-place changes and np.float32(.) only copies if the input
# was *not* a float32 array
coords_proj = np.array(coords).astype(np.float32)
coords_proj[:, 0] = (coords_proj[:, 0] / from_width) * to_width
coords_proj[:, 1] = (coords_proj[:, 1] / from_height) * to_height
return coords_proj | python | def project_coords(coords, from_shape, to_shape):
"""
Project coordinates from one image shape to another.
This performs a relative projection, e.g. a point at 60% of the old
image width will be at 60% of the new image width after projection.
Parameters
----------
coords : ndarray or tuple of number
Coordinates to project. Either a ``(N,2)`` numpy array or a tuple
of `(x,y)` coordinates.
from_shape : tuple of int or ndarray
Old image shape.
to_shape : tuple of int or ndarray
New image shape.
Returns
-------
ndarray
Projected coordinates as ``(N,2)`` ``float32`` numpy array.
"""
from_shape = normalize_shape(from_shape)
to_shape = normalize_shape(to_shape)
if from_shape[0:2] == to_shape[0:2]:
return coords
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
assert all([v > 0 for v in [from_height, from_width, to_height, to_width]])
# make sure to not just call np.float32(coords) here as the following lines
# perform in-place changes and np.float32(.) only copies if the input
# was *not* a float32 array
coords_proj = np.array(coords).astype(np.float32)
coords_proj[:, 0] = (coords_proj[:, 0] / from_width) * to_width
coords_proj[:, 1] = (coords_proj[:, 1] / from_height) * to_height
return coords_proj | [
"def",
"project_coords",
"(",
"coords",
",",
"from_shape",
",",
"to_shape",
")",
":",
"from_shape",
"=",
"normalize_shape",
"(",
"from_shape",
")",
"to_shape",
"=",
"normalize_shape",
"(",
"to_shape",
")",
"if",
"from_shape",
"[",
"0",
":",
"2",
"]",
"==",
"to_shape",
"[",
"0",
":",
"2",
"]",
":",
"return",
"coords",
"from_height",
",",
"from_width",
"=",
"from_shape",
"[",
"0",
":",
"2",
"]",
"to_height",
",",
"to_width",
"=",
"to_shape",
"[",
"0",
":",
"2",
"]",
"assert",
"all",
"(",
"[",
"v",
">",
"0",
"for",
"v",
"in",
"[",
"from_height",
",",
"from_width",
",",
"to_height",
",",
"to_width",
"]",
"]",
")",
"# make sure to not just call np.float32(coords) here as the following lines",
"# perform in-place changes and np.float32(.) only copies if the input",
"# was *not* a float32 array",
"coords_proj",
"=",
"np",
".",
"array",
"(",
"coords",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"coords_proj",
"[",
":",
",",
"0",
"]",
"=",
"(",
"coords_proj",
"[",
":",
",",
"0",
"]",
"/",
"from_width",
")",
"*",
"to_width",
"coords_proj",
"[",
":",
",",
"1",
"]",
"=",
"(",
"coords_proj",
"[",
":",
",",
"1",
"]",
"/",
"from_height",
")",
"*",
"to_height",
"return",
"coords_proj"
] | Project coordinates from one image shape to another.
This performs a relative projection, e.g. a point at 60% of the old
image width will be at 60% of the new image width after projection.
Parameters
----------
coords : ndarray or tuple of number
Coordinates to project. Either a ``(N,2)`` numpy array or a tuple
of `(x,y)` coordinates.
from_shape : tuple of int or ndarray
Old image shape.
to_shape : tuple of int or ndarray
New image shape.
Returns
-------
ndarray
Projected coordinates as ``(N,2)`` ``float32`` numpy array. | [
"Project",
"coordinates",
"from",
"one",
"image",
"shape",
"to",
"another",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/utils.py#L31-L71 | valid |
aleju/imgaug | imgaug/augmenters/arithmetic.py | AdditiveGaussianNoise | def AdditiveGaussianNoise(loc=0, scale=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Add gaussian noise (aka white noise) to images.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution ``N(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
loc2 = iap.handle_continuous_param(loc, "loc", value_range=None, tuple_to_uniform=True, list_to_choice=True)
scale2 = iap.handle_continuous_param(scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.Normal(loc=loc2, scale=scale2), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state) | python | def AdditiveGaussianNoise(loc=0, scale=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Add gaussian noise (aka white noise) to images.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution ``N(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
loc2 = iap.handle_continuous_param(loc, "loc", value_range=None, tuple_to_uniform=True, list_to_choice=True)
scale2 = iap.handle_continuous_param(scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.Normal(loc=loc2, scale=scale2), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state) | [
"def",
"AdditiveGaussianNoise",
"(",
"loc",
"=",
"0",
",",
"scale",
"=",
"0",
",",
"per_channel",
"=",
"False",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"loc2",
"=",
"iap",
".",
"handle_continuous_param",
"(",
"loc",
",",
"\"loc\"",
",",
"value_range",
"=",
"None",
",",
"tuple_to_uniform",
"=",
"True",
",",
"list_to_choice",
"=",
"True",
")",
"scale2",
"=",
"iap",
".",
"handle_continuous_param",
"(",
"scale",
",",
"\"scale\"",
",",
"value_range",
"=",
"(",
"0",
",",
"None",
")",
",",
"tuple_to_uniform",
"=",
"True",
",",
"list_to_choice",
"=",
"True",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"AddElementwise",
"(",
"iap",
".",
"Normal",
"(",
"loc",
"=",
"loc2",
",",
"scale",
"=",
"scale2",
")",
",",
"per_channel",
"=",
"per_channel",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Add gaussian noise (aka white noise) to images.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution ``N(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent). | [
"Add",
"gaussian",
"noise",
"(",
"aka",
"white",
"noise",
")",
"to",
"images",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/arithmetic.py#L371-L451 | valid |
aleju/imgaug | imgaug/augmenters/arithmetic.py | AdditivePoissonNoise | def AdditivePoissonNoise(lam=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Create an augmenter to add poisson noise to images.
Poisson noise is comparable to gaussian noise as in ``AdditiveGaussianNoise``, but the values are sampled from
a poisson distribution instead of a gaussian distribution. As poisson distributions produce only positive numbers,
the sign of the sampled values are here randomly flipped.
Values of around ``10.0`` for `lam` lead to visible noise (for uint8).
Values of around ``20.0`` for `lam` lead to very visible noise (for uint8).
It is recommended to usually set `per_channel` to True.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Lambda parameter of the poisson distribution. Recommended values are around ``0.0`` to ``10.0``.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditivePoissonNoise(lam=5.0)
Adds poisson noise sampled from ``Poisson(5.0)`` to images.
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0))
Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is randomly sampled per image from the
interval ``[0.0, 10.0]``.
>>> aug = iaa.AdditivePoissonNoise(lam=5.0, per_channel=True)
Adds poisson noise sampled from ``Poisson(5.0)`` to images,
where the values are different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0), per_channel=True)
Adds poisson noise sampled from ``Poisson(x)`` to images,
with ``x`` being sampled from ``uniform(0.0, 10.0)`` per image, pixel and channel.
This is the *recommended* configuration.
>>> aug = iaa.AdditivePoissonNoise(lam=2, per_channel=0.5)
Adds poisson noise sampled from the distribution ``Poisson(2)`` to images,
where the values are sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
lam2 = iap.handle_continuous_param(lam, "lam", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.RandomSign(iap.Poisson(lam=lam2)), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state) | python | def AdditivePoissonNoise(lam=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Create an augmenter to add poisson noise to images.
Poisson noise is comparable to gaussian noise as in ``AdditiveGaussianNoise``, but the values are sampled from
a poisson distribution instead of a gaussian distribution. As poisson distributions produce only positive numbers,
the sign of the sampled values are here randomly flipped.
Values of around ``10.0`` for `lam` lead to visible noise (for uint8).
Values of around ``20.0`` for `lam` lead to very visible noise (for uint8).
It is recommended to usually set `per_channel` to True.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Lambda parameter of the poisson distribution. Recommended values are around ``0.0`` to ``10.0``.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditivePoissonNoise(lam=5.0)
Adds poisson noise sampled from ``Poisson(5.0)`` to images.
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0))
Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is randomly sampled per image from the
interval ``[0.0, 10.0]``.
>>> aug = iaa.AdditivePoissonNoise(lam=5.0, per_channel=True)
Adds poisson noise sampled from ``Poisson(5.0)`` to images,
where the values are different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0), per_channel=True)
Adds poisson noise sampled from ``Poisson(x)`` to images,
with ``x`` being sampled from ``uniform(0.0, 10.0)`` per image, pixel and channel.
This is the *recommended* configuration.
>>> aug = iaa.AdditivePoissonNoise(lam=2, per_channel=0.5)
Adds poisson noise sampled from the distribution ``Poisson(2)`` to images,
where the values are sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
lam2 = iap.handle_continuous_param(lam, "lam", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.RandomSign(iap.Poisson(lam=lam2)), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state) | [
"def",
"AdditivePoissonNoise",
"(",
"lam",
"=",
"0",
",",
"per_channel",
"=",
"False",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"lam2",
"=",
"iap",
".",
"handle_continuous_param",
"(",
"lam",
",",
"\"lam\"",
",",
"value_range",
"=",
"(",
"0",
",",
"None",
")",
",",
"tuple_to_uniform",
"=",
"True",
",",
"list_to_choice",
"=",
"True",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"AddElementwise",
"(",
"iap",
".",
"RandomSign",
"(",
"iap",
".",
"Poisson",
"(",
"lam",
"=",
"lam2",
")",
")",
",",
"per_channel",
"=",
"per_channel",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Create an augmenter to add poisson noise to images.
Poisson noise is comparable to gaussian noise as in ``AdditiveGaussianNoise``, but the values are sampled from
a poisson distribution instead of a gaussian distribution. As poisson distributions produce only positive numbers,
the sign of the sampled values are here randomly flipped.
Values of around ``10.0`` for `lam` lead to visible noise (for uint8).
Values of around ``20.0`` for `lam` lead to very visible noise (for uint8).
It is recommended to usually set `per_channel` to True.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Lambda parameter of the poisson distribution. Recommended values are around ``0.0`` to ``10.0``.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditivePoissonNoise(lam=5.0)
Adds poisson noise sampled from ``Poisson(5.0)`` to images.
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0))
Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is randomly sampled per image from the
interval ``[0.0, 10.0]``.
>>> aug = iaa.AdditivePoissonNoise(lam=5.0, per_channel=True)
Adds poisson noise sampled from ``Poisson(5.0)`` to images,
where the values are different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0), per_channel=True)
Adds poisson noise sampled from ``Poisson(x)`` to images,
with ``x`` being sampled from ``uniform(0.0, 10.0)`` per image, pixel and channel.
This is the *recommended* configuration.
>>> aug = iaa.AdditivePoissonNoise(lam=2, per_channel=0.5)
Adds poisson noise sampled from the distribution ``Poisson(2)`` to images,
where the values are sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent). | [
"Create",
"an",
"augmenter",
"to",
"add",
"poisson",
"noise",
"to",
"images",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/arithmetic.py#L546-L626 | valid |
aleju/imgaug | imgaug/augmenters/arithmetic.py | Dropout | def Dropout(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p2, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | python | def Dropout(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p2, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | [
"def",
"Dropout",
"(",
"p",
"=",
"0",
",",
"per_channel",
"=",
"False",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"if",
"ia",
".",
"is_single_number",
"(",
"p",
")",
":",
"p2",
"=",
"iap",
".",
"Binomial",
"(",
"1",
"-",
"p",
")",
"elif",
"ia",
".",
"is_iterable",
"(",
"p",
")",
":",
"ia",
".",
"do_assert",
"(",
"len",
"(",
"p",
")",
"==",
"2",
")",
"ia",
".",
"do_assert",
"(",
"p",
"[",
"0",
"]",
"<",
"p",
"[",
"1",
"]",
")",
"ia",
".",
"do_assert",
"(",
"0",
"<=",
"p",
"[",
"0",
"]",
"<=",
"1.0",
")",
"ia",
".",
"do_assert",
"(",
"0",
"<=",
"p",
"[",
"1",
"]",
"<=",
"1.0",
")",
"p2",
"=",
"iap",
".",
"Binomial",
"(",
"iap",
".",
"Uniform",
"(",
"1",
"-",
"p",
"[",
"1",
"]",
",",
"1",
"-",
"p",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"p",
",",
"iap",
".",
"StochasticParameter",
")",
":",
"p2",
"=",
"p",
"else",
":",
"raise",
"Exception",
"(",
"\"Expected p to be float or int or StochasticParameter, got %s.\"",
"%",
"(",
"type",
"(",
"p",
")",
",",
")",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"MultiplyElementwise",
"(",
"p2",
",",
"per_channel",
"=",
"per_channel",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images. | [
"Augmenter",
"that",
"sets",
"a",
"certain",
"fraction",
"of",
"pixels",
"in",
"images",
"to",
"zero",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/arithmetic.py#L977-L1059 | valid |
aleju/imgaug | imgaug/augmenters/arithmetic.py | CoarseDropout | def CoarseDropout(p=0, size_px=None, size_percent=None, per_channel=False, min_size=4, name=None, deterministic=False,
random_state=None):
"""
Augmenter that sets rectangular areas within images to zero.
In contrast to Dropout, these areas can have larger sizes.
(E.g. you might end up with three large black rectangles in an image.)
Note that the current implementation leads to correlated sizes,
so when there is one large area that is dropped, there is a high likelihood
that all other dropped areas are also large.
This method is implemented by generating the dropout mask at a
lower resolution (than the image has) and then upsampling the mask
before dropping the pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all pixels. A value
of 1.0 would mean, that all pixels will be dropped. A value of
0.0 would lead to no pixels being dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a ``1x1`` low resolution mask, leading easily
to the whole image being dropped.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5)
drops 2 percent of all pixels on an lower-resolution image that has
50 percent of the original image's size, leading to dropped areas that
have roughly 2x2 pixels size.
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.05, 0.5))
generates a dropout mask at 5 to 50 percent of image's size. In that mask,
0 to 5 percent of all pixels are dropped (random per image).
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_px=(2, 16))
same as previous example, but the lower resolution image has 2 to 16 pixels
size.
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=True)
drops 2 percent of all pixels at 50 percent resolution (2x2 sizes)
in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if size_px is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_px=size_px, min_size=min_size)
elif size_percent is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p3, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | python | def CoarseDropout(p=0, size_px=None, size_percent=None, per_channel=False, min_size=4, name=None, deterministic=False,
random_state=None):
"""
Augmenter that sets rectangular areas within images to zero.
In contrast to Dropout, these areas can have larger sizes.
(E.g. you might end up with three large black rectangles in an image.)
Note that the current implementation leads to correlated sizes,
so when there is one large area that is dropped, there is a high likelihood
that all other dropped areas are also large.
This method is implemented by generating the dropout mask at a
lower resolution (than the image has) and then upsampling the mask
before dropping the pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all pixels. A value
of 1.0 would mean, that all pixels will be dropped. A value of
0.0 would lead to no pixels being dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a ``1x1`` low resolution mask, leading easily
to the whole image being dropped.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5)
drops 2 percent of all pixels on an lower-resolution image that has
50 percent of the original image's size, leading to dropped areas that
have roughly 2x2 pixels size.
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.05, 0.5))
generates a dropout mask at 5 to 50 percent of image's size. In that mask,
0 to 5 percent of all pixels are dropped (random per image).
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_px=(2, 16))
same as previous example, but the lower resolution image has 2 to 16 pixels
size.
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=True)
drops 2 percent of all pixels at 50 percent resolution (2x2 sizes)
in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if size_px is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_px=size_px, min_size=min_size)
elif size_percent is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p3, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | [
"def",
"CoarseDropout",
"(",
"p",
"=",
"0",
",",
"size_px",
"=",
"None",
",",
"size_percent",
"=",
"None",
",",
"per_channel",
"=",
"False",
",",
"min_size",
"=",
"4",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"if",
"ia",
".",
"is_single_number",
"(",
"p",
")",
":",
"p2",
"=",
"iap",
".",
"Binomial",
"(",
"1",
"-",
"p",
")",
"elif",
"ia",
".",
"is_iterable",
"(",
"p",
")",
":",
"ia",
".",
"do_assert",
"(",
"len",
"(",
"p",
")",
"==",
"2",
")",
"ia",
".",
"do_assert",
"(",
"p",
"[",
"0",
"]",
"<",
"p",
"[",
"1",
"]",
")",
"ia",
".",
"do_assert",
"(",
"0",
"<=",
"p",
"[",
"0",
"]",
"<=",
"1.0",
")",
"ia",
".",
"do_assert",
"(",
"0",
"<=",
"p",
"[",
"1",
"]",
"<=",
"1.0",
")",
"p2",
"=",
"iap",
".",
"Binomial",
"(",
"iap",
".",
"Uniform",
"(",
"1",
"-",
"p",
"[",
"1",
"]",
",",
"1",
"-",
"p",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"p",
",",
"iap",
".",
"StochasticParameter",
")",
":",
"p2",
"=",
"p",
"else",
":",
"raise",
"Exception",
"(",
"\"Expected p to be float or int or StochasticParameter, got %s.\"",
"%",
"(",
"type",
"(",
"p",
")",
",",
")",
")",
"if",
"size_px",
"is",
"not",
"None",
":",
"p3",
"=",
"iap",
".",
"FromLowerResolution",
"(",
"other_param",
"=",
"p2",
",",
"size_px",
"=",
"size_px",
",",
"min_size",
"=",
"min_size",
")",
"elif",
"size_percent",
"is",
"not",
"None",
":",
"p3",
"=",
"iap",
".",
"FromLowerResolution",
"(",
"other_param",
"=",
"p2",
",",
"size_percent",
"=",
"size_percent",
",",
"min_size",
"=",
"min_size",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Either size_px or size_percent must be set.\"",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"MultiplyElementwise",
"(",
"p3",
",",
"per_channel",
"=",
"per_channel",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Augmenter that sets rectangular areas within images to zero.
In contrast to Dropout, these areas can have larger sizes.
(E.g. you might end up with three large black rectangles in an image.)
Note that the current implementation leads to correlated sizes,
so when there is one large area that is dropped, there is a high likelihood
that all other dropped areas are also large.
This method is implemented by generating the dropout mask at a
lower resolution (than the image has) and then upsampling the mask
before dropping the pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all pixels. A value
of 1.0 would mean, that all pixels will be dropped. A value of
0.0 would lead to no pixels being dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a ``1x1`` low resolution mask, leading easily
to the whole image being dropped.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5)
drops 2 percent of all pixels on an lower-resolution image that has
50 percent of the original image's size, leading to dropped areas that
have roughly 2x2 pixels size.
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.05, 0.5))
generates a dropout mask at 5 to 50 percent of image's size. In that mask,
0 to 5 percent of all pixels are dropped (random per image).
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_px=(2, 16))
same as previous example, but the lower resolution image has 2 to 16 pixels
size.
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=True)
drops 2 percent of all pixels at 50 percent resolution (2x2 sizes)
in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images. | [
"Augmenter",
"that",
"sets",
"rectangular",
"areas",
"within",
"images",
"to",
"zero",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/arithmetic.py#L1062-L1201 | valid |
aleju/imgaug | imgaug/augmenters/arithmetic.py | ImpulseNoise | def ImpulseNoise(p=0, name=None, deterministic=False, random_state=None):
"""
Creates an augmenter to apply impulse noise to an image.
This is identical to ``SaltAndPepper``, except that per_channel is always set to True.
dtype support::
See ``imgaug.augmenters.arithmetic.SaltAndPepper``.
"""
return SaltAndPepper(p=p, per_channel=True, name=name, deterministic=deterministic, random_state=random_state) | python | def ImpulseNoise(p=0, name=None, deterministic=False, random_state=None):
"""
Creates an augmenter to apply impulse noise to an image.
This is identical to ``SaltAndPepper``, except that per_channel is always set to True.
dtype support::
See ``imgaug.augmenters.arithmetic.SaltAndPepper``.
"""
return SaltAndPepper(p=p, per_channel=True, name=name, deterministic=deterministic, random_state=random_state) | [
"def",
"ImpulseNoise",
"(",
"p",
"=",
"0",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"return",
"SaltAndPepper",
"(",
"p",
"=",
"p",
",",
"per_channel",
"=",
"True",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Creates an augmenter to apply impulse noise to an image.
This is identical to ``SaltAndPepper``, except that per_channel is always set to True.
dtype support::
See ``imgaug.augmenters.arithmetic.SaltAndPepper``. | [
"Creates",
"an",
"augmenter",
"to",
"apply",
"impulse",
"noise",
"to",
"an",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/arithmetic.py#L1360-L1371 | valid |
aleju/imgaug | imgaug/augmenters/arithmetic.py | SaltAndPepper | def SaltAndPepper(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adds salt and pepper noise to an image, i.e. some white-ish and black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to salt/pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b``.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that salt/pepper is to be added
at that location.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.SaltAndPepper(0.05)
Replaces 5 percent of all pixels with salt/pepper.
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=p,
replacement=iap.Beta(0.5, 0.5) * 255,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
) | python | def SaltAndPepper(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adds salt and pepper noise to an image, i.e. some white-ish and black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to salt/pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b``.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that salt/pepper is to be added
at that location.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.SaltAndPepper(0.05)
Replaces 5 percent of all pixels with salt/pepper.
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=p,
replacement=iap.Beta(0.5, 0.5) * 255,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
) | [
"def",
"SaltAndPepper",
"(",
"p",
"=",
"0",
",",
"per_channel",
"=",
"False",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"ReplaceElementwise",
"(",
"mask",
"=",
"p",
",",
"replacement",
"=",
"iap",
".",
"Beta",
"(",
"0.5",
",",
"0.5",
")",
"*",
"255",
",",
"per_channel",
"=",
"per_channel",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Adds salt and pepper noise to an image, i.e. some white-ish and black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to salt/pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b``.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that salt/pepper is to be added
at that location.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.SaltAndPepper(0.05)
Replaces 5 percent of all pixels with salt/pepper. | [
"Adds",
"salt",
"and",
"pepper",
"noise",
"to",
"an",
"image",
"i",
".",
"e",
".",
"some",
"white",
"-",
"ish",
"and",
"black",
"-",
"ish",
"pixels",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/arithmetic.py#L1374-L1430 | valid |
aleju/imgaug | imgaug/augmenters/arithmetic.py | Pepper | def Pepper(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adds pepper noise to an image, i.e. black-ish pixels.
This is similar to dropout, but slower and the black pixels are not uniformly black.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b``.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Pepper(0.05)
Replaces 5 percent of all pixels with pepper.
"""
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=p,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
) | python | def Pepper(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adds pepper noise to an image, i.e. black-ish pixels.
This is similar to dropout, but slower and the black pixels are not uniformly black.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b``.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Pepper(0.05)
Replaces 5 percent of all pixels with pepper.
"""
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=p,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
) | [
"def",
"Pepper",
"(",
"p",
"=",
"0",
",",
"per_channel",
"=",
"False",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"replacement01",
"=",
"iap",
".",
"ForceSign",
"(",
"iap",
".",
"Beta",
"(",
"0.5",
",",
"0.5",
")",
"-",
"0.5",
",",
"positive",
"=",
"False",
",",
"mode",
"=",
"\"invert\"",
")",
"+",
"0.5",
"replacement",
"=",
"replacement01",
"*",
"255",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"ReplaceElementwise",
"(",
"mask",
"=",
"p",
",",
"replacement",
"=",
"replacement",
",",
"per_channel",
"=",
"per_channel",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Adds pepper noise to an image, i.e. black-ish pixels.
This is similar to dropout, but slower and the black pixels are not uniformly black.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b``.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Pepper(0.05)
Replaces 5 percent of all pixels with pepper. | [
"Adds",
"pepper",
"noise",
"to",
"an",
"image",
"i",
".",
"e",
".",
"black",
"-",
"ish",
"pixels",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/arithmetic.py#L1711-L1777 | valid |
aleju/imgaug | imgaug/augmenters/arithmetic.py | CoarsePepper | def CoarsePepper(p=0, size_px=None, size_percent=None, per_channel=False, min_size=4, name=None, deterministic=False,
random_state=None):
"""
Adds coarse pepper noise to an image, i.e. rectangles that contain noisy black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b.``
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with pepper in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced.
"""
mask = iap.handle_probability_param(p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
) | python | def CoarsePepper(p=0, size_px=None, size_percent=None, per_channel=False, min_size=4, name=None, deterministic=False,
random_state=None):
"""
Adds coarse pepper noise to an image, i.e. rectangles that contain noisy black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b.``
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with pepper in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced.
"""
mask = iap.handle_probability_param(p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
) | [
"def",
"CoarsePepper",
"(",
"p",
"=",
"0",
",",
"size_px",
"=",
"None",
",",
"size_percent",
"=",
"None",
",",
"per_channel",
"=",
"False",
",",
"min_size",
"=",
"4",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"mask",
"=",
"iap",
".",
"handle_probability_param",
"(",
"p",
",",
"\"p\"",
",",
"tuple_to_uniform",
"=",
"True",
",",
"list_to_choice",
"=",
"True",
")",
"if",
"size_px",
"is",
"not",
"None",
":",
"mask_low",
"=",
"iap",
".",
"FromLowerResolution",
"(",
"other_param",
"=",
"mask",
",",
"size_px",
"=",
"size_px",
",",
"min_size",
"=",
"min_size",
")",
"elif",
"size_percent",
"is",
"not",
"None",
":",
"mask_low",
"=",
"iap",
".",
"FromLowerResolution",
"(",
"other_param",
"=",
"mask",
",",
"size_percent",
"=",
"size_percent",
",",
"min_size",
"=",
"min_size",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Either size_px or size_percent must be set.\"",
")",
"replacement01",
"=",
"iap",
".",
"ForceSign",
"(",
"iap",
".",
"Beta",
"(",
"0.5",
",",
"0.5",
")",
"-",
"0.5",
",",
"positive",
"=",
"False",
",",
"mode",
"=",
"\"invert\"",
")",
"+",
"0.5",
"replacement",
"=",
"replacement01",
"*",
"255",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Unnamed%s\"",
"%",
"(",
"ia",
".",
"caller_name",
"(",
")",
",",
")",
"return",
"ReplaceElementwise",
"(",
"mask",
"=",
"mask_low",
",",
"replacement",
"=",
"replacement",
",",
"per_channel",
"=",
"per_channel",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Adds coarse pepper noise to an image, i.e. rectangles that contain noisy black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b.``
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with pepper in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced. | [
"Adds",
"coarse",
"pepper",
"noise",
"to",
"an",
"image",
"i",
".",
"e",
".",
"rectangles",
"that",
"contain",
"noisy",
"black",
"-",
"ish",
"pixels",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/arithmetic.py#L1780-L1890 | valid |
aleju/imgaug | imgaug/augmenters/arithmetic.py | ContrastNormalization | def ContrastNormalization(alpha=1.0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Augmenter that changes the contrast of images.
dtype support:
See ``imgaug.augmenters.contrast.LinearContrast``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Strength of the contrast normalization. Higher values than 1.0
lead to higher contrast, lower values decrease the contrast.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled per image from
the range ``a <= x <= b`` and be used as the alpha value.
* If a list, then a random value will be sampled per image from
that list.
* If a StochasticParameter, then this parameter will be used to
sample the alpha value per image.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> iaa.ContrastNormalization((0.5, 1.5))
Decreases oder improves contrast per image by a random factor between
0.5 and 1.5. The factor 0.5 means that any difference from the center value
(i.e. 128) will be halved, leading to less contrast.
>>> iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)
Same as before, but for 50 percent of all images the normalization is done
independently per channel (i.e. factors can vary per channel for the same
image). In the other 50 percent of all images, the factor is the same for
all channels.
"""
# placed here to avoid cyclic dependency
from . import contrast as contrast_lib
return contrast_lib.LinearContrast(alpha=alpha, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | python | def ContrastNormalization(alpha=1.0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Augmenter that changes the contrast of images.
dtype support:
See ``imgaug.augmenters.contrast.LinearContrast``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Strength of the contrast normalization. Higher values than 1.0
lead to higher contrast, lower values decrease the contrast.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled per image from
the range ``a <= x <= b`` and be used as the alpha value.
* If a list, then a random value will be sampled per image from
that list.
* If a StochasticParameter, then this parameter will be used to
sample the alpha value per image.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> iaa.ContrastNormalization((0.5, 1.5))
Decreases oder improves contrast per image by a random factor between
0.5 and 1.5. The factor 0.5 means that any difference from the center value
(i.e. 128) will be halved, leading to less contrast.
>>> iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)
Same as before, but for 50 percent of all images the normalization is done
independently per channel (i.e. factors can vary per channel for the same
image). In the other 50 percent of all images, the factor is the same for
all channels.
"""
# placed here to avoid cyclic dependency
from . import contrast as contrast_lib
return contrast_lib.LinearContrast(alpha=alpha, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | [
"def",
"ContrastNormalization",
"(",
"alpha",
"=",
"1.0",
",",
"per_channel",
"=",
"False",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"# placed here to avoid cyclic dependency",
"from",
".",
"import",
"contrast",
"as",
"contrast_lib",
"return",
"contrast_lib",
".",
"LinearContrast",
"(",
"alpha",
"=",
"alpha",
",",
"per_channel",
"=",
"per_channel",
",",
"name",
"=",
"name",
",",
"deterministic",
"=",
"deterministic",
",",
"random_state",
"=",
"random_state",
")"
] | Augmenter that changes the contrast of images.
dtype support:
See ``imgaug.augmenters.contrast.LinearContrast``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Strength of the contrast normalization. Higher values than 1.0
lead to higher contrast, lower values decrease the contrast.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled per image from
the range ``a <= x <= b`` and be used as the alpha value.
* If a list, then a random value will be sampled per image from
that list.
* If a StochasticParameter, then this parameter will be used to
sample the alpha value per image.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> iaa.ContrastNormalization((0.5, 1.5))
Decreases oder improves contrast per image by a random factor between
0.5 and 1.5. The factor 0.5 means that any difference from the center value
(i.e. 128) will be halved, leading to less contrast.
>>> iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)
Same as before, but for 50 percent of all images the normalization is done
independently per channel (i.e. factors can vary per channel for the same
image). In the other 50 percent of all images, the factor is the same for
all channels. | [
"Augmenter",
"that",
"changes",
"the",
"contrast",
"of",
"images",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/arithmetic.py#L2151-L2207 | valid |
aleju/imgaug | imgaug/imgaug.py | is_single_float | def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool) | python | def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool) | [
"def",
"is_single_float",
"(",
"val",
")",
":",
"return",
"isinstance",
"(",
"val",
",",
"numbers",
".",
"Real",
")",
"and",
"not",
"is_single_integer",
"(",
"val",
")",
"and",
"not",
"isinstance",
"(",
"val",
",",
"bool",
")"
] | Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False. | [
"Checks",
"whether",
"a",
"variable",
"is",
"a",
"float",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L94-L109 | valid |
aleju/imgaug | imgaug/imgaug.py | is_integer_array | def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer) | python | def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer) | [
"def",
"is_integer_array",
"(",
"val",
")",
":",
"return",
"is_np_array",
"(",
"val",
")",
"and",
"issubclass",
"(",
"val",
".",
"dtype",
".",
"type",
",",
"np",
".",
"integer",
")"
] | Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False. | [
"Checks",
"whether",
"a",
"variable",
"is",
"a",
"numpy",
"integer",
"array",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L185-L200 | valid |
aleju/imgaug | imgaug/imgaug.py | is_float_array | def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating) | python | def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating) | [
"def",
"is_float_array",
"(",
"val",
")",
":",
"return",
"is_np_array",
"(",
"val",
")",
"and",
"issubclass",
"(",
"val",
".",
"dtype",
".",
"type",
",",
"np",
".",
"floating",
")"
] | Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False. | [
"Checks",
"whether",
"a",
"variable",
"is",
"a",
"numpy",
"float",
"array",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L203-L218 | valid |
aleju/imgaug | imgaug/imgaug.py | is_callable | def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val) | python | def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val) | [
"def",
"is_callable",
"(",
"val",
")",
":",
"# python 3.x with x <= 2 does not support callable(), apparently",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"3",
"and",
"sys",
".",
"version_info",
"[",
"1",
"]",
"<=",
"2",
":",
"return",
"hasattr",
"(",
"val",
",",
"'__call__'",
")",
"else",
":",
"return",
"callable",
"(",
"val",
")"
] | Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False. | [
"Checks",
"whether",
"a",
"variable",
"is",
"a",
"callable",
"e",
".",
"g",
".",
"a",
"function",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L221-L240 | valid |
aleju/imgaug | imgaug/imgaug.py | flatten | def flatten(nested_iterable):
"""
Flattens arbitrarily nested lists/tuples.
Code partially taken from https://stackoverflow.com/a/10824420.
Parameters
----------
nested_iterable
A list or tuple of arbitrarily nested values.
Yields
------
any
Non-list and non-tuple values in `nested_iterable`.
"""
# don't just check if something is iterable here, because then strings
# and arrays will be split into their characters and components
if not isinstance(nested_iterable, (list, tuple)):
yield nested_iterable
else:
for i in nested_iterable:
if isinstance(i, (list, tuple)):
for j in flatten(i):
yield j
else:
yield i | python | def flatten(nested_iterable):
"""
Flattens arbitrarily nested lists/tuples.
Code partially taken from https://stackoverflow.com/a/10824420.
Parameters
----------
nested_iterable
A list or tuple of arbitrarily nested values.
Yields
------
any
Non-list and non-tuple values in `nested_iterable`.
"""
# don't just check if something is iterable here, because then strings
# and arrays will be split into their characters and components
if not isinstance(nested_iterable, (list, tuple)):
yield nested_iterable
else:
for i in nested_iterable:
if isinstance(i, (list, tuple)):
for j in flatten(i):
yield j
else:
yield i | [
"def",
"flatten",
"(",
"nested_iterable",
")",
":",
"# don't just check if something is iterable here, because then strings",
"# and arrays will be split into their characters and components",
"if",
"not",
"isinstance",
"(",
"nested_iterable",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"yield",
"nested_iterable",
"else",
":",
"for",
"i",
"in",
"nested_iterable",
":",
"if",
"isinstance",
"(",
"i",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"j",
"in",
"flatten",
"(",
"i",
")",
":",
"yield",
"j",
"else",
":",
"yield",
"i"
] | Flattens arbitrarily nested lists/tuples.
Code partially taken from https://stackoverflow.com/a/10824420.
Parameters
----------
nested_iterable
A list or tuple of arbitrarily nested values.
Yields
------
any
Non-list and non-tuple values in `nested_iterable`. | [
"Flattens",
"arbitrarily",
"nested",
"lists",
"/",
"tuples",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L261-L288 | valid |
aleju/imgaug | imgaug/imgaug.py | new_random_state | def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return np.random.RandomState(seed) | python | def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return np.random.RandomState(seed) | [
"def",
"new_random_state",
"(",
"seed",
"=",
"None",
",",
"fully_random",
"=",
"False",
")",
":",
"if",
"seed",
"is",
"None",
":",
"if",
"not",
"fully_random",
":",
"# sample manually a seed instead of just RandomState(),",
"# because the latter one",
"# is way slower.",
"seed",
"=",
"CURRENT_RANDOM_STATE",
".",
"randint",
"(",
"SEED_MIN_VALUE",
",",
"SEED_MAX_VALUE",
",",
"1",
")",
"[",
"0",
"]",
"return",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")"
] | Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state. | [
"Returns",
"a",
"new",
"random",
"state",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L336-L363 | valid |
aleju/imgaug | imgaug/imgaug.py | copy_random_state | def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy | python | def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy | [
"def",
"copy_random_state",
"(",
"random_state",
",",
"force_copy",
"=",
"False",
")",
":",
"if",
"random_state",
"==",
"np",
".",
"random",
"and",
"not",
"force_copy",
":",
"return",
"random_state",
"else",
":",
"rs_copy",
"=",
"dummy_random_state",
"(",
")",
"orig_state",
"=",
"random_state",
".",
"get_state",
"(",
")",
"rs_copy",
".",
"set_state",
"(",
"orig_state",
")",
"return",
"rs_copy"
] | Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state. | [
"Creates",
"a",
"copy",
"of",
"a",
"random",
"state",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L379-L405 | valid |
aleju/imgaug | imgaug/imgaug.py | derive_random_states | def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)] | python | def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)] | [
"def",
"derive_random_states",
"(",
"random_state",
",",
"n",
"=",
"1",
")",
":",
"seed_",
"=",
"random_state",
".",
"randint",
"(",
"SEED_MIN_VALUE",
",",
"SEED_MAX_VALUE",
",",
"1",
")",
"[",
"0",
"]",
"return",
"[",
"new_random_state",
"(",
"seed_",
"+",
"i",
")",
"for",
"i",
"in",
"sm",
".",
"xrange",
"(",
"n",
")",
"]"
] | Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states. | [
"Create",
"N",
"new",
"random",
"states",
"based",
"on",
"an",
"existing",
"random",
"state",
"or",
"seed",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L427-L446 | valid |
aleju/imgaug | imgaug/imgaug.py | _quokka_normalize_extract | def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb | python | def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb | [
"def",
"_quokka_normalize_extract",
"(",
"extract",
")",
":",
"# TODO get rid of this deferred import",
"from",
"imgaug",
".",
"augmentables",
".",
"bbs",
"import",
"BoundingBox",
",",
"BoundingBoxesOnImage",
"if",
"extract",
"==",
"\"square\"",
":",
"bb",
"=",
"BoundingBox",
"(",
"x1",
"=",
"0",
",",
"y1",
"=",
"0",
",",
"x2",
"=",
"643",
",",
"y2",
"=",
"643",
")",
"elif",
"isinstance",
"(",
"extract",
",",
"tuple",
")",
"and",
"len",
"(",
"extract",
")",
"==",
"4",
":",
"bb",
"=",
"BoundingBox",
"(",
"x1",
"=",
"extract",
"[",
"0",
"]",
",",
"y1",
"=",
"extract",
"[",
"1",
"]",
",",
"x2",
"=",
"extract",
"[",
"2",
"]",
",",
"y2",
"=",
"extract",
"[",
"3",
"]",
")",
"elif",
"isinstance",
"(",
"extract",
",",
"BoundingBox",
")",
":",
"bb",
"=",
"extract",
"elif",
"isinstance",
"(",
"extract",
",",
"BoundingBoxesOnImage",
")",
":",
"do_assert",
"(",
"len",
"(",
"extract",
".",
"bounding_boxes",
")",
"==",
"1",
")",
"do_assert",
"(",
"extract",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"==",
"(",
"643",
",",
"960",
")",
")",
"bb",
"=",
"extract",
".",
"bounding_boxes",
"[",
"0",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage \"",
"+",
"\"for parameter 'extract', got %s.\"",
"%",
"(",
"type",
"(",
"extract",
")",
",",
")",
")",
"return",
"bb"
] | Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image. | [
"Generate",
"a",
"normalized",
"rectangle",
"to",
"be",
"extract",
"from",
"the",
"standard",
"quokka",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L464-L506 | valid |
aleju/imgaug | imgaug/imgaug.py | _compute_resized_shape | def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
do_assert(len(from_shape) in [2, 3])
do_assert(len(to_shape) in [2, 3])
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
do_assert(all([v is None or is_single_number(v) for v in to_shape[0:2]]),
"Expected the first two entries in to_shape to be None or numbers, "
+ "got types %s." % (str([type(v) for v in to_shape[0:2]]),))
for i, from_shape_i in enumerate(from_shape[0:2]):
if to_shape[i] is None:
to_shape_computed[i] = from_shape_i
elif is_single_integer(to_shape[i]):
to_shape_computed[i] = to_shape[i]
else: # float
to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return tuple(to_shape_computed) | python | def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
do_assert(len(from_shape) in [2, 3])
do_assert(len(to_shape) in [2, 3])
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
do_assert(all([v is None or is_single_number(v) for v in to_shape[0:2]]),
"Expected the first two entries in to_shape to be None or numbers, "
+ "got types %s." % (str([type(v) for v in to_shape[0:2]]),))
for i, from_shape_i in enumerate(from_shape[0:2]):
if to_shape[i] is None:
to_shape_computed[i] = from_shape_i
elif is_single_integer(to_shape[i]):
to_shape_computed[i] = to_shape[i]
else: # float
to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return tuple(to_shape_computed) | [
"def",
"_compute_resized_shape",
"(",
"from_shape",
",",
"to_shape",
")",
":",
"if",
"is_np_array",
"(",
"from_shape",
")",
":",
"from_shape",
"=",
"from_shape",
".",
"shape",
"if",
"is_np_array",
"(",
"to_shape",
")",
":",
"to_shape",
"=",
"to_shape",
".",
"shape",
"to_shape_computed",
"=",
"list",
"(",
"from_shape",
")",
"if",
"to_shape",
"is",
"None",
":",
"pass",
"elif",
"isinstance",
"(",
"to_shape",
",",
"tuple",
")",
":",
"do_assert",
"(",
"len",
"(",
"from_shape",
")",
"in",
"[",
"2",
",",
"3",
"]",
")",
"do_assert",
"(",
"len",
"(",
"to_shape",
")",
"in",
"[",
"2",
",",
"3",
"]",
")",
"if",
"len",
"(",
"from_shape",
")",
"==",
"3",
"and",
"len",
"(",
"to_shape",
")",
"==",
"3",
":",
"do_assert",
"(",
"from_shape",
"[",
"2",
"]",
"==",
"to_shape",
"[",
"2",
"]",
")",
"elif",
"len",
"(",
"to_shape",
")",
"==",
"3",
":",
"to_shape_computed",
".",
"append",
"(",
"to_shape",
"[",
"2",
"]",
")",
"do_assert",
"(",
"all",
"(",
"[",
"v",
"is",
"None",
"or",
"is_single_number",
"(",
"v",
")",
"for",
"v",
"in",
"to_shape",
"[",
"0",
":",
"2",
"]",
"]",
")",
",",
"\"Expected the first two entries in to_shape to be None or numbers, \"",
"+",
"\"got types %s.\"",
"%",
"(",
"str",
"(",
"[",
"type",
"(",
"v",
")",
"for",
"v",
"in",
"to_shape",
"[",
"0",
":",
"2",
"]",
"]",
")",
",",
")",
")",
"for",
"i",
",",
"from_shape_i",
"in",
"enumerate",
"(",
"from_shape",
"[",
"0",
":",
"2",
"]",
")",
":",
"if",
"to_shape",
"[",
"i",
"]",
"is",
"None",
":",
"to_shape_computed",
"[",
"i",
"]",
"=",
"from_shape_i",
"elif",
"is_single_integer",
"(",
"to_shape",
"[",
"i",
"]",
")",
":",
"to_shape_computed",
"[",
"i",
"]",
"=",
"to_shape",
"[",
"i",
"]",
"else",
":",
"# float",
"to_shape_computed",
"[",
"i",
"]",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"from_shape_i",
"*",
"to_shape",
"[",
"i",
"]",
")",
")",
"elif",
"is_single_integer",
"(",
"to_shape",
")",
"or",
"is_single_float",
"(",
"to_shape",
")",
":",
"to_shape_computed",
"=",
"_compute_resized_shape",
"(",
"from_shape",
",",
"(",
"to_shape",
",",
"to_shape",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int \"",
"+",
"\"or single float, got %s.\"",
"%",
"(",
"type",
"(",
"to_shape",
")",
",",
")",
")",
"return",
"tuple",
"(",
"to_shape_computed",
")"
] | Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
to_shape_computed : tuple of int
New shape. | [
"Computes",
"the",
"intended",
"new",
"shape",
"of",
"an",
"image",
"-",
"like",
"array",
"after",
"resizing",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L509-L574 | valid |
aleju/imgaug | imgaug/imgaug.py | quokka | def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img | python | def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img | [
"def",
"quokka",
"(",
"size",
"=",
"None",
",",
"extract",
"=",
"None",
")",
":",
"img",
"=",
"imageio",
".",
"imread",
"(",
"QUOKKA_FP",
",",
"pilmode",
"=",
"\"RGB\"",
")",
"if",
"extract",
"is",
"not",
"None",
":",
"bb",
"=",
"_quokka_normalize_extract",
"(",
"extract",
")",
"img",
"=",
"bb",
".",
"extract_from_image",
"(",
"img",
")",
"if",
"size",
"is",
"not",
"None",
":",
"shape_resized",
"=",
"_compute_resized_shape",
"(",
"img",
".",
"shape",
",",
"size",
")",
"img",
"=",
"imresize_single_image",
"(",
"img",
",",
"shape_resized",
"[",
"0",
":",
"2",
"]",
")",
"return",
"img"
] | Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8. | [
"Returns",
"an",
"image",
"of",
"a",
"quokka",
"as",
"a",
"numpy",
"array",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L577-L614 | valid |
aleju/imgaug | imgaug/imgaug.py | quokka_heatmap | def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
# TODO get rid of this deferred import
from imgaug.augmentables.heatmaps import HeatmapsOnImage
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,)) | python | def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
# TODO get rid of this deferred import
from imgaug.augmentables.heatmaps import HeatmapsOnImage
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,)) | [
"def",
"quokka_heatmap",
"(",
"size",
"=",
"None",
",",
"extract",
"=",
"None",
")",
":",
"# TODO get rid of this deferred import",
"from",
"imgaug",
".",
"augmentables",
".",
"heatmaps",
"import",
"HeatmapsOnImage",
"img",
"=",
"imageio",
".",
"imread",
"(",
"QUOKKA_DEPTH_MAP_HALFRES_FP",
",",
"pilmode",
"=",
"\"RGB\"",
")",
"img",
"=",
"imresize_single_image",
"(",
"img",
",",
"(",
"643",
",",
"960",
")",
",",
"interpolation",
"=",
"\"cubic\"",
")",
"if",
"extract",
"is",
"not",
"None",
":",
"bb",
"=",
"_quokka_normalize_extract",
"(",
"extract",
")",
"img",
"=",
"bb",
".",
"extract_from_image",
"(",
"img",
")",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"img",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"shape_resized",
"=",
"_compute_resized_shape",
"(",
"img",
".",
"shape",
",",
"size",
")",
"img",
"=",
"imresize_single_image",
"(",
"img",
",",
"shape_resized",
"[",
"0",
":",
"2",
"]",
")",
"img_0to1",
"=",
"img",
"[",
"...",
",",
"0",
"]",
"# depth map was saved as 3-channel RGB",
"img_0to1",
"=",
"img_0to1",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"255.0",
"img_0to1",
"=",
"1",
"-",
"img_0to1",
"# depth map was saved as 0 being furthest away",
"return",
"HeatmapsOnImage",
"(",
"img_0to1",
",",
"shape",
"=",
"img_0to1",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"+",
"(",
"3",
",",
")",
")"
] | Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects). | [
"Returns",
"a",
"heatmap",
"(",
"here",
":",
"depth",
"map",
")",
"for",
"the",
"standard",
"example",
"quokka",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L637-L675 | valid |
aleju/imgaug | imgaug/imgaug.py | quokka_segmentation_map | def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.segmaps import SegmentationMapOnImage
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.resize(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap | python | def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.segmaps import SegmentationMapOnImage
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.resize(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap | [
"def",
"quokka_segmentation_map",
"(",
"size",
"=",
"None",
",",
"extract",
"=",
"None",
")",
":",
"# TODO get rid of this deferred import",
"from",
"imgaug",
".",
"augmentables",
".",
"segmaps",
"import",
"SegmentationMapOnImage",
"with",
"open",
"(",
"QUOKKA_ANNOTATIONS_FP",
",",
"\"r\"",
")",
"as",
"f",
":",
"json_dict",
"=",
"json",
".",
"load",
"(",
"f",
")",
"xx",
"=",
"[",
"]",
"yy",
"=",
"[",
"]",
"for",
"kp_dict",
"in",
"json_dict",
"[",
"\"polygons\"",
"]",
"[",
"0",
"]",
"[",
"\"keypoints\"",
"]",
":",
"x",
"=",
"kp_dict",
"[",
"\"x\"",
"]",
"y",
"=",
"kp_dict",
"[",
"\"y\"",
"]",
"xx",
".",
"append",
"(",
"x",
")",
"yy",
".",
"append",
"(",
"y",
")",
"img_seg",
"=",
"np",
".",
"zeros",
"(",
"(",
"643",
",",
"960",
",",
"1",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"rr",
",",
"cc",
"=",
"skimage",
".",
"draw",
".",
"polygon",
"(",
"np",
".",
"array",
"(",
"yy",
")",
",",
"np",
".",
"array",
"(",
"xx",
")",
",",
"shape",
"=",
"img_seg",
".",
"shape",
")",
"img_seg",
"[",
"rr",
",",
"cc",
"]",
"=",
"1.0",
"if",
"extract",
"is",
"not",
"None",
":",
"bb",
"=",
"_quokka_normalize_extract",
"(",
"extract",
")",
"img_seg",
"=",
"bb",
".",
"extract_from_image",
"(",
"img_seg",
")",
"segmap",
"=",
"SegmentationMapOnImage",
"(",
"img_seg",
",",
"shape",
"=",
"img_seg",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"+",
"(",
"3",
",",
")",
")",
"if",
"size",
"is",
"not",
"None",
":",
"shape_resized",
"=",
"_compute_resized_shape",
"(",
"img_seg",
".",
"shape",
",",
"size",
")",
"segmap",
"=",
"segmap",
".",
"resize",
"(",
"shape_resized",
"[",
"0",
":",
"2",
"]",
")",
"segmap",
".",
"shape",
"=",
"tuple",
"(",
"shape_resized",
"[",
"0",
":",
"2",
"]",
")",
"+",
"(",
"3",
",",
")",
"return",
"segmap"
] | Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object. | [
"Returns",
"a",
"segmentation",
"map",
"for",
"the",
"standard",
"example",
"quokka",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L678-L725 | valid |
aleju/imgaug | imgaug/imgaug.py | quokka_keypoints | def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi | python | def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi | [
"def",
"quokka_keypoints",
"(",
"size",
"=",
"None",
",",
"extract",
"=",
"None",
")",
":",
"# TODO get rid of this deferred import",
"from",
"imgaug",
".",
"augmentables",
".",
"kps",
"import",
"Keypoint",
",",
"KeypointsOnImage",
"left",
",",
"top",
"=",
"0",
",",
"0",
"if",
"extract",
"is",
"not",
"None",
":",
"bb_extract",
"=",
"_quokka_normalize_extract",
"(",
"extract",
")",
"left",
"=",
"bb_extract",
".",
"x1",
"top",
"=",
"bb_extract",
".",
"y1",
"with",
"open",
"(",
"QUOKKA_ANNOTATIONS_FP",
",",
"\"r\"",
")",
"as",
"f",
":",
"json_dict",
"=",
"json",
".",
"load",
"(",
"f",
")",
"keypoints",
"=",
"[",
"]",
"for",
"kp_dict",
"in",
"json_dict",
"[",
"\"keypoints\"",
"]",
":",
"keypoints",
".",
"append",
"(",
"Keypoint",
"(",
"x",
"=",
"kp_dict",
"[",
"\"x\"",
"]",
"-",
"left",
",",
"y",
"=",
"kp_dict",
"[",
"\"y\"",
"]",
"-",
"top",
")",
")",
"if",
"extract",
"is",
"not",
"None",
":",
"shape",
"=",
"(",
"bb_extract",
".",
"height",
",",
"bb_extract",
".",
"width",
",",
"3",
")",
"else",
":",
"shape",
"=",
"(",
"643",
",",
"960",
",",
"3",
")",
"kpsoi",
"=",
"KeypointsOnImage",
"(",
"keypoints",
",",
"shape",
"=",
"shape",
")",
"if",
"size",
"is",
"not",
"None",
":",
"shape_resized",
"=",
"_compute_resized_shape",
"(",
"shape",
",",
"size",
")",
"kpsoi",
"=",
"kpsoi",
".",
"on",
"(",
"shape_resized",
")",
"return",
"kpsoi"
] | Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image. | [
"Returns",
"example",
"keypoints",
"on",
"the",
"standard",
"example",
"quokke",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L728-L771 | valid |
aleju/imgaug | imgaug/imgaug.py | quokka_bounding_boxes | def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi | python | def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi | [
"def",
"quokka_bounding_boxes",
"(",
"size",
"=",
"None",
",",
"extract",
"=",
"None",
")",
":",
"# TODO get rid of this deferred import",
"from",
"imgaug",
".",
"augmentables",
".",
"bbs",
"import",
"BoundingBox",
",",
"BoundingBoxesOnImage",
"left",
",",
"top",
"=",
"0",
",",
"0",
"if",
"extract",
"is",
"not",
"None",
":",
"bb_extract",
"=",
"_quokka_normalize_extract",
"(",
"extract",
")",
"left",
"=",
"bb_extract",
".",
"x1",
"top",
"=",
"bb_extract",
".",
"y1",
"with",
"open",
"(",
"QUOKKA_ANNOTATIONS_FP",
",",
"\"r\"",
")",
"as",
"f",
":",
"json_dict",
"=",
"json",
".",
"load",
"(",
"f",
")",
"bbs",
"=",
"[",
"]",
"for",
"bb_dict",
"in",
"json_dict",
"[",
"\"bounding_boxes\"",
"]",
":",
"bbs",
".",
"append",
"(",
"BoundingBox",
"(",
"x1",
"=",
"bb_dict",
"[",
"\"x1\"",
"]",
"-",
"left",
",",
"y1",
"=",
"bb_dict",
"[",
"\"y1\"",
"]",
"-",
"top",
",",
"x2",
"=",
"bb_dict",
"[",
"\"x2\"",
"]",
"-",
"left",
",",
"y2",
"=",
"bb_dict",
"[",
"\"y2\"",
"]",
"-",
"top",
")",
")",
"if",
"extract",
"is",
"not",
"None",
":",
"shape",
"=",
"(",
"bb_extract",
".",
"height",
",",
"bb_extract",
".",
"width",
",",
"3",
")",
"else",
":",
"shape",
"=",
"(",
"643",
",",
"960",
",",
"3",
")",
"bbsoi",
"=",
"BoundingBoxesOnImage",
"(",
"bbs",
",",
"shape",
"=",
"shape",
")",
"if",
"size",
"is",
"not",
"None",
":",
"shape_resized",
"=",
"_compute_resized_shape",
"(",
"shape",
",",
"size",
")",
"bbsoi",
"=",
"bbsoi",
".",
"on",
"(",
"shape_resized",
")",
"return",
"bbsoi"
] | Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image. | [
"Returns",
"example",
"bounding",
"boxes",
"on",
"the",
"standard",
"example",
"quokke",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L774-L824 | valid |
aleju/imgaug | imgaug/imgaug.py | quokka_polygons | def quokka_polygons(size=None, extract=None):
"""
Returns example polygons on the standard example quokke image.
The result contains one polygon, covering the quokka's outline.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the polygons are placed. If None,
then the polygons are not projected to any new size (positions on the
original image are used). Floats lead to relative size changes, ints
to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or \
imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
psoi : imgaug.PolygonsOnImage
Example polygons on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
polygons = []
for poly_json in json_dict["polygons"]:
polygons.append(
Polygon([(point["x"] - left, point["y"] - top)
for point in poly_json["keypoints"]])
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
psoi = PolygonsOnImage(polygons, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
psoi = psoi.on(shape_resized)
return psoi | python | def quokka_polygons(size=None, extract=None):
"""
Returns example polygons on the standard example quokke image.
The result contains one polygon, covering the quokka's outline.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the polygons are placed. If None,
then the polygons are not projected to any new size (positions on the
original image are used). Floats lead to relative size changes, ints
to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or \
imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
psoi : imgaug.PolygonsOnImage
Example polygons on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
polygons = []
for poly_json in json_dict["polygons"]:
polygons.append(
Polygon([(point["x"] - left, point["y"] - top)
for point in poly_json["keypoints"]])
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
psoi = PolygonsOnImage(polygons, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
psoi = psoi.on(shape_resized)
return psoi | [
"def",
"quokka_polygons",
"(",
"size",
"=",
"None",
",",
"extract",
"=",
"None",
")",
":",
"# TODO get rid of this deferred import",
"from",
"imgaug",
".",
"augmentables",
".",
"polys",
"import",
"Polygon",
",",
"PolygonsOnImage",
"left",
",",
"top",
"=",
"0",
",",
"0",
"if",
"extract",
"is",
"not",
"None",
":",
"bb_extract",
"=",
"_quokka_normalize_extract",
"(",
"extract",
")",
"left",
"=",
"bb_extract",
".",
"x1",
"top",
"=",
"bb_extract",
".",
"y1",
"with",
"open",
"(",
"QUOKKA_ANNOTATIONS_FP",
",",
"\"r\"",
")",
"as",
"f",
":",
"json_dict",
"=",
"json",
".",
"load",
"(",
"f",
")",
"polygons",
"=",
"[",
"]",
"for",
"poly_json",
"in",
"json_dict",
"[",
"\"polygons\"",
"]",
":",
"polygons",
".",
"append",
"(",
"Polygon",
"(",
"[",
"(",
"point",
"[",
"\"x\"",
"]",
"-",
"left",
",",
"point",
"[",
"\"y\"",
"]",
"-",
"top",
")",
"for",
"point",
"in",
"poly_json",
"[",
"\"keypoints\"",
"]",
"]",
")",
")",
"if",
"extract",
"is",
"not",
"None",
":",
"shape",
"=",
"(",
"bb_extract",
".",
"height",
",",
"bb_extract",
".",
"width",
",",
"3",
")",
"else",
":",
"shape",
"=",
"(",
"643",
",",
"960",
",",
"3",
")",
"psoi",
"=",
"PolygonsOnImage",
"(",
"polygons",
",",
"shape",
"=",
"shape",
")",
"if",
"size",
"is",
"not",
"None",
":",
"shape_resized",
"=",
"_compute_resized_shape",
"(",
"shape",
",",
"size",
")",
"psoi",
"=",
"psoi",
".",
"on",
"(",
"shape_resized",
")",
"return",
"psoi"
] | Returns example polygons on the standard example quokke image.
The result contains one polygon, covering the quokka's outline.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the polygons are placed. If None,
then the polygons are not projected to any new size (positions on the
original image are used). Floats lead to relative size changes, ints
to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or \
imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
psoi : imgaug.PolygonsOnImage
Example polygons on the quokka image. | [
"Returns",
"example",
"polygons",
"on",
"the",
"standard",
"example",
"quokke",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L827-L875 | valid |
aleju/imgaug | imgaug/imgaug.py | angle_between_vectors | def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.570796...
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592...
"""
l1 = np.linalg.norm(v1)
l2 = np.linalg.norm(v2)
v1_u = (v1 / l1) if l1 > 0 else np.float32(v1) * 0
v2_u = (v2 / l2) if l2 > 0 else np.float32(v2) * 0
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) | python | def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.570796...
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592...
"""
l1 = np.linalg.norm(v1)
l2 = np.linalg.norm(v2)
v1_u = (v1 / l1) if l1 > 0 else np.float32(v1) * 0
v2_u = (v2 / l2) if l2 > 0 else np.float32(v2) * 0
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) | [
"def",
"angle_between_vectors",
"(",
"v1",
",",
"v2",
")",
":",
"l1",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"v1",
")",
"l2",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"v2",
")",
"v1_u",
"=",
"(",
"v1",
"/",
"l1",
")",
"if",
"l1",
">",
"0",
"else",
"np",
".",
"float32",
"(",
"v1",
")",
"*",
"0",
"v2_u",
"=",
"(",
"v2",
"/",
"l2",
")",
"if",
"l2",
">",
"0",
"else",
"np",
".",
"float32",
"(",
"v2",
")",
"*",
"0",
"return",
"np",
".",
"arccos",
"(",
"np",
".",
"clip",
"(",
"np",
".",
"dot",
"(",
"v1_u",
",",
"v2_u",
")",
",",
"-",
"1.0",
",",
"1.0",
")",
")"
] | Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.570796...
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592... | [
"Returns",
"the",
"angle",
"in",
"radians",
"between",
"vectors",
"v1",
"and",
"v2",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L878-L913 | valid |
aleju/imgaug | imgaug/imgaug.py | compute_line_intersection_point | def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
"""
Compute the intersection point of two lines.
Taken from https://stackoverflow.com/a/20679579 .
Parameters
----------
x1 : number
x coordinate of the first point on line 1. (The lines extends beyond this point.)
y1 : number
y coordinate of the first point on line 1. (The lines extends beyond this point.)
x2 : number
x coordinate of the second point on line 1. (The lines extends beyond this point.)
y2 : number
y coordinate of the second point on line 1. (The lines extends beyond this point.)
x3 : number
x coordinate of the first point on line 2. (The lines extends beyond this point.)
y3 : number
y coordinate of the first point on line 2. (The lines extends beyond this point.)
x4 : number
x coordinate of the second point on line 2. (The lines extends beyond this point.)
y4 : number
y coordinate of the second point on line 2. (The lines extends beyond this point.)
Returns
-------
tuple of number or bool
The coordinate of the intersection point as a tuple ``(x, y)``.
If the lines are parallel (no intersection point or an infinite number of them), the result is False.
"""
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False | python | def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
"""
Compute the intersection point of two lines.
Taken from https://stackoverflow.com/a/20679579 .
Parameters
----------
x1 : number
x coordinate of the first point on line 1. (The lines extends beyond this point.)
y1 : number
y coordinate of the first point on line 1. (The lines extends beyond this point.)
x2 : number
x coordinate of the second point on line 1. (The lines extends beyond this point.)
y2 : number
y coordinate of the second point on line 1. (The lines extends beyond this point.)
x3 : number
x coordinate of the first point on line 2. (The lines extends beyond this point.)
y3 : number
y coordinate of the first point on line 2. (The lines extends beyond this point.)
x4 : number
x coordinate of the second point on line 2. (The lines extends beyond this point.)
y4 : number
y coordinate of the second point on line 2. (The lines extends beyond this point.)
Returns
-------
tuple of number or bool
The coordinate of the intersection point as a tuple ``(x, y)``.
If the lines are parallel (no intersection point or an infinite number of them), the result is False.
"""
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False | [
"def",
"compute_line_intersection_point",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"x3",
",",
"y3",
",",
"x4",
",",
"y4",
")",
":",
"def",
"_make_line",
"(",
"p1",
",",
"p2",
")",
":",
"A",
"=",
"(",
"p1",
"[",
"1",
"]",
"-",
"p2",
"[",
"1",
"]",
")",
"B",
"=",
"(",
"p2",
"[",
"0",
"]",
"-",
"p1",
"[",
"0",
"]",
")",
"C",
"=",
"(",
"p1",
"[",
"0",
"]",
"*",
"p2",
"[",
"1",
"]",
"-",
"p2",
"[",
"0",
"]",
"*",
"p1",
"[",
"1",
"]",
")",
"return",
"A",
",",
"B",
",",
"-",
"C",
"L1",
"=",
"_make_line",
"(",
"(",
"x1",
",",
"y1",
")",
",",
"(",
"x2",
",",
"y2",
")",
")",
"L2",
"=",
"_make_line",
"(",
"(",
"x3",
",",
"y3",
")",
",",
"(",
"x4",
",",
"y4",
")",
")",
"D",
"=",
"L1",
"[",
"0",
"]",
"*",
"L2",
"[",
"1",
"]",
"-",
"L1",
"[",
"1",
"]",
"*",
"L2",
"[",
"0",
"]",
"Dx",
"=",
"L1",
"[",
"2",
"]",
"*",
"L2",
"[",
"1",
"]",
"-",
"L1",
"[",
"1",
"]",
"*",
"L2",
"[",
"2",
"]",
"Dy",
"=",
"L1",
"[",
"0",
"]",
"*",
"L2",
"[",
"2",
"]",
"-",
"L1",
"[",
"2",
"]",
"*",
"L2",
"[",
"0",
"]",
"if",
"D",
"!=",
"0",
":",
"x",
"=",
"Dx",
"/",
"D",
"y",
"=",
"Dy",
"/",
"D",
"return",
"x",
",",
"y",
"else",
":",
"return",
"False"
] | Compute the intersection point of two lines.
Taken from https://stackoverflow.com/a/20679579 .
Parameters
----------
x1 : number
x coordinate of the first point on line 1. (The lines extends beyond this point.)
y1 : number
y coordinate of the first point on line 1. (The lines extends beyond this point.)
x2 : number
x coordinate of the second point on line 1. (The lines extends beyond this point.)
y2 : number
y coordinate of the second point on line 1. (The lines extends beyond this point.)
x3 : number
x coordinate of the first point on line 2. (The lines extends beyond this point.)
y3 : number
y coordinate of the first point on line 2. (The lines extends beyond this point.)
x4 : number
x coordinate of the second point on line 2. (The lines extends beyond this point.)
y4 : number
y coordinate of the second point on line 2. (The lines extends beyond this point.)
Returns
-------
tuple of number or bool
The coordinate of the intersection point as a tuple ``(x, y)``.
If the lines are parallel (no intersection point or an infinite number of them), the result is False. | [
"Compute",
"the",
"intersection",
"point",
"of",
"two",
"lines",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L917-L973 | valid |
aleju/imgaug | imgaug/imgaug.py | draw_text | def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
# PIL/asarray returns read only array
if not img_np.flags["WRITEABLE"]:
try:
# this seems to no longer work with np 1.16 (or was pillow updated?)
img_np.setflags(write=True)
except ValueError as ex:
if "cannot set WRITEABLE flag to True of this array" in str(ex):
img_np = np.copy(img_np)
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np | python | def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
# PIL/asarray returns read only array
if not img_np.flags["WRITEABLE"]:
try:
# this seems to no longer work with np 1.16 (or was pillow updated?)
img_np.setflags(write=True)
except ValueError as ex:
if "cannot set WRITEABLE flag to True of this array" in str(ex):
img_np = np.copy(img_np)
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np | [
"def",
"draw_text",
"(",
"img",
",",
"y",
",",
"x",
",",
"text",
",",
"color",
"=",
"(",
"0",
",",
"255",
",",
"0",
")",
",",
"size",
"=",
"25",
")",
":",
"do_assert",
"(",
"img",
".",
"dtype",
"in",
"[",
"np",
".",
"uint8",
",",
"np",
".",
"float32",
"]",
")",
"input_dtype",
"=",
"img",
".",
"dtype",
"if",
"img",
".",
"dtype",
"==",
"np",
".",
"float32",
":",
"img",
"=",
"img",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"img",
"=",
"PIL_Image",
".",
"fromarray",
"(",
"img",
")",
"font",
"=",
"PIL_ImageFont",
".",
"truetype",
"(",
"DEFAULT_FONT_FP",
",",
"size",
")",
"context",
"=",
"PIL_ImageDraw",
".",
"Draw",
"(",
"img",
")",
"context",
".",
"text",
"(",
"(",
"x",
",",
"y",
")",
",",
"text",
",",
"fill",
"=",
"tuple",
"(",
"color",
")",
",",
"font",
"=",
"font",
")",
"img_np",
"=",
"np",
".",
"asarray",
"(",
"img",
")",
"# PIL/asarray returns read only array",
"if",
"not",
"img_np",
".",
"flags",
"[",
"\"WRITEABLE\"",
"]",
":",
"try",
":",
"# this seems to no longer work with np 1.16 (or was pillow updated?)",
"img_np",
".",
"setflags",
"(",
"write",
"=",
"True",
")",
"except",
"ValueError",
"as",
"ex",
":",
"if",
"\"cannot set WRITEABLE flag to True of this array\"",
"in",
"str",
"(",
"ex",
")",
":",
"img_np",
"=",
"np",
".",
"copy",
"(",
"img_np",
")",
"if",
"img_np",
".",
"dtype",
"!=",
"input_dtype",
":",
"img_np",
"=",
"img_np",
".",
"astype",
"(",
"input_dtype",
")",
"return",
"img_np"
] | Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it. | [
"Draw",
"text",
"on",
"an",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L977-L1052 | valid |
aleju/imgaug | imgaug/imgaug.py | imresize_many_images | def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: limited; tested (4)
* ``int64``: no (2)
* ``float16``: yes; tested (5)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (6)
- (1) rejected by ``cv2.imresize``
- (2) results too inaccurate
- (3) mapped internally to ``int16`` when interpolation!="nearest"
- (4) only supported for interpolation="nearest", other interpolations lead to cv2 error
- (5) mapped internally to ``float32``
- (6) mapped internally to ``uint8``
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
# we just do nothing if the input contains zero images
# one could also argue that an exception would be appropriate here
if len(images) == 0:
return images
# verify that all input images have height/width > 0
do_assert(
all([image.shape[0] > 0 and image.shape[1] > 0 for image in images]),
("Cannot resize images, because at least one image has a height and/or width of zero. "
+ "Observed shapes were: %s.") % (str([image.shape for image in images]),)
)
# verify that sizes contains only values >0
if is_single_number(sizes) and sizes <= 0:
raise Exception(
"Cannot resize to the target size %.8f, because the value is zero or lower than zero." % (sizes,))
elif isinstance(sizes, tuple) and (sizes[0] <= 0 or sizes[1] <= 0):
sizes_str = [
"int %d" % (sizes[0],) if is_single_integer(sizes[0]) else "float %.8f" % (sizes[0],),
"int %d" % (sizes[1],) if is_single_integer(sizes[1]) else "float %.8f" % (sizes[1],),
]
sizes_str = "(%s, %s)" % (sizes_str[0], sizes_str[1])
raise Exception(
"Cannot resize to the target sizes %s. At least one value is zero or lower than zero." % (sizes_str,))
# change after the validation to make the above error messages match the original input
if is_single_number(sizes):
sizes = (sizes, sizes)
else:
do_assert(len(sizes) == 2, "Expected tuple with exactly two entries, got %d entries." % (len(sizes),))
do_assert(all([is_single_number(val) for val in sizes]),
"Expected tuple with two ints or floats, got types %s." % (str([type(val) for val in sizes]),))
# if input is a list, call this function N times for N images
# but check beforehand if all images have the same shape, then just convert to a single array and de-convert
# afterwards
if isinstance(images, list):
nb_shapes = len(set([image.shape for image in images]))
if nb_shapes == 1:
return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))
else:
return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[0, ...]
for image in images]
shape = images.shape
do_assert(images.ndim in [3, 4], "Expected array of shape (N, H, W, [C]), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3] if images.ndim > 3 else None
height, width = sizes[0], sizes[1]
height = int(np.round(im_height * height)) if is_single_float(height) else height
width = int(np.round(im_width * width)) if is_single_float(width) else width
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
# TODO find more beautiful way to avoid circular imports
from . import dtypes as iadt
if ip == cv2.INTER_NEAREST:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "int32", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
else:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int32", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
result_shape = (nb_images, height, width)
if nb_channels is not None:
result_shape = result_shape + (nb_channels,)
result = np.zeros(result_shape, dtype=images.dtype)
for i, image in enumerate(images):
input_dtype = image.dtype
if image.dtype.type == np.bool_:
image = image.astype(np.uint8) * 255
elif image.dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
image = image.astype(np.int16)
elif image.dtype.type == np.float16:
image = image.astype(np.float32)
result_img = cv2.resize(image, (width, height), interpolation=ip)
assert result_img.dtype == image.dtype
# cv2 removes the channel axis if input was (H, W, 1)
# we re-add it (but only if input was not (H, W))
if len(result_img.shape) == 2 and nb_channels is not None and nb_channels == 1:
result_img = result_img[:, :, np.newaxis]
if input_dtype.type == np.bool_:
result_img = result_img > 127
elif input_dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
# TODO somehow better avoid circular imports here
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.int8)
elif input_dtype.type == np.float16:
# TODO see above
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.float16)
result[i] = result_img
return result | python | def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: limited; tested (4)
* ``int64``: no (2)
* ``float16``: yes; tested (5)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (6)
- (1) rejected by ``cv2.imresize``
- (2) results too inaccurate
- (3) mapped internally to ``int16`` when interpolation!="nearest"
- (4) only supported for interpolation="nearest", other interpolations lead to cv2 error
- (5) mapped internally to ``float32``
- (6) mapped internally to ``uint8``
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
# we just do nothing if the input contains zero images
# one could also argue that an exception would be appropriate here
if len(images) == 0:
return images
# verify that all input images have height/width > 0
do_assert(
all([image.shape[0] > 0 and image.shape[1] > 0 for image in images]),
("Cannot resize images, because at least one image has a height and/or width of zero. "
+ "Observed shapes were: %s.") % (str([image.shape for image in images]),)
)
# verify that sizes contains only values >0
if is_single_number(sizes) and sizes <= 0:
raise Exception(
"Cannot resize to the target size %.8f, because the value is zero or lower than zero." % (sizes,))
elif isinstance(sizes, tuple) and (sizes[0] <= 0 or sizes[1] <= 0):
sizes_str = [
"int %d" % (sizes[0],) if is_single_integer(sizes[0]) else "float %.8f" % (sizes[0],),
"int %d" % (sizes[1],) if is_single_integer(sizes[1]) else "float %.8f" % (sizes[1],),
]
sizes_str = "(%s, %s)" % (sizes_str[0], sizes_str[1])
raise Exception(
"Cannot resize to the target sizes %s. At least one value is zero or lower than zero." % (sizes_str,))
# change after the validation to make the above error messages match the original input
if is_single_number(sizes):
sizes = (sizes, sizes)
else:
do_assert(len(sizes) == 2, "Expected tuple with exactly two entries, got %d entries." % (len(sizes),))
do_assert(all([is_single_number(val) for val in sizes]),
"Expected tuple with two ints or floats, got types %s." % (str([type(val) for val in sizes]),))
# if input is a list, call this function N times for N images
# but check beforehand if all images have the same shape, then just convert to a single array and de-convert
# afterwards
if isinstance(images, list):
nb_shapes = len(set([image.shape for image in images]))
if nb_shapes == 1:
return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))
else:
return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[0, ...]
for image in images]
shape = images.shape
do_assert(images.ndim in [3, 4], "Expected array of shape (N, H, W, [C]), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3] if images.ndim > 3 else None
height, width = sizes[0], sizes[1]
height = int(np.round(im_height * height)) if is_single_float(height) else height
width = int(np.round(im_width * width)) if is_single_float(width) else width
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
# TODO find more beautiful way to avoid circular imports
from . import dtypes as iadt
if ip == cv2.INTER_NEAREST:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "int32", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
else:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int32", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
result_shape = (nb_images, height, width)
if nb_channels is not None:
result_shape = result_shape + (nb_channels,)
result = np.zeros(result_shape, dtype=images.dtype)
for i, image in enumerate(images):
input_dtype = image.dtype
if image.dtype.type == np.bool_:
image = image.astype(np.uint8) * 255
elif image.dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
image = image.astype(np.int16)
elif image.dtype.type == np.float16:
image = image.astype(np.float32)
result_img = cv2.resize(image, (width, height), interpolation=ip)
assert result_img.dtype == image.dtype
# cv2 removes the channel axis if input was (H, W, 1)
# we re-add it (but only if input was not (H, W))
if len(result_img.shape) == 2 and nb_channels is not None and nb_channels == 1:
result_img = result_img[:, :, np.newaxis]
if input_dtype.type == np.bool_:
result_img = result_img > 127
elif input_dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
# TODO somehow better avoid circular imports here
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.int8)
elif input_dtype.type == np.float16:
# TODO see above
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.float16)
result[i] = result_img
return result | [
"def",
"imresize_many_images",
"(",
"images",
",",
"sizes",
"=",
"None",
",",
"interpolation",
"=",
"None",
")",
":",
"# we just do nothing if the input contains zero images",
"# one could also argue that an exception would be appropriate here",
"if",
"len",
"(",
"images",
")",
"==",
"0",
":",
"return",
"images",
"# verify that all input images have height/width > 0",
"do_assert",
"(",
"all",
"(",
"[",
"image",
".",
"shape",
"[",
"0",
"]",
">",
"0",
"and",
"image",
".",
"shape",
"[",
"1",
"]",
">",
"0",
"for",
"image",
"in",
"images",
"]",
")",
",",
"(",
"\"Cannot resize images, because at least one image has a height and/or width of zero. \"",
"+",
"\"Observed shapes were: %s.\"",
")",
"%",
"(",
"str",
"(",
"[",
"image",
".",
"shape",
"for",
"image",
"in",
"images",
"]",
")",
",",
")",
")",
"# verify that sizes contains only values >0",
"if",
"is_single_number",
"(",
"sizes",
")",
"and",
"sizes",
"<=",
"0",
":",
"raise",
"Exception",
"(",
"\"Cannot resize to the target size %.8f, because the value is zero or lower than zero.\"",
"%",
"(",
"sizes",
",",
")",
")",
"elif",
"isinstance",
"(",
"sizes",
",",
"tuple",
")",
"and",
"(",
"sizes",
"[",
"0",
"]",
"<=",
"0",
"or",
"sizes",
"[",
"1",
"]",
"<=",
"0",
")",
":",
"sizes_str",
"=",
"[",
"\"int %d\"",
"%",
"(",
"sizes",
"[",
"0",
"]",
",",
")",
"if",
"is_single_integer",
"(",
"sizes",
"[",
"0",
"]",
")",
"else",
"\"float %.8f\"",
"%",
"(",
"sizes",
"[",
"0",
"]",
",",
")",
",",
"\"int %d\"",
"%",
"(",
"sizes",
"[",
"1",
"]",
",",
")",
"if",
"is_single_integer",
"(",
"sizes",
"[",
"1",
"]",
")",
"else",
"\"float %.8f\"",
"%",
"(",
"sizes",
"[",
"1",
"]",
",",
")",
",",
"]",
"sizes_str",
"=",
"\"(%s, %s)\"",
"%",
"(",
"sizes_str",
"[",
"0",
"]",
",",
"sizes_str",
"[",
"1",
"]",
")",
"raise",
"Exception",
"(",
"\"Cannot resize to the target sizes %s. At least one value is zero or lower than zero.\"",
"%",
"(",
"sizes_str",
",",
")",
")",
"# change after the validation to make the above error messages match the original input",
"if",
"is_single_number",
"(",
"sizes",
")",
":",
"sizes",
"=",
"(",
"sizes",
",",
"sizes",
")",
"else",
":",
"do_assert",
"(",
"len",
"(",
"sizes",
")",
"==",
"2",
",",
"\"Expected tuple with exactly two entries, got %d entries.\"",
"%",
"(",
"len",
"(",
"sizes",
")",
",",
")",
")",
"do_assert",
"(",
"all",
"(",
"[",
"is_single_number",
"(",
"val",
")",
"for",
"val",
"in",
"sizes",
"]",
")",
",",
"\"Expected tuple with two ints or floats, got types %s.\"",
"%",
"(",
"str",
"(",
"[",
"type",
"(",
"val",
")",
"for",
"val",
"in",
"sizes",
"]",
")",
",",
")",
")",
"# if input is a list, call this function N times for N images",
"# but check beforehand if all images have the same shape, then just convert to a single array and de-convert",
"# afterwards",
"if",
"isinstance",
"(",
"images",
",",
"list",
")",
":",
"nb_shapes",
"=",
"len",
"(",
"set",
"(",
"[",
"image",
".",
"shape",
"for",
"image",
"in",
"images",
"]",
")",
")",
"if",
"nb_shapes",
"==",
"1",
":",
"return",
"list",
"(",
"imresize_many_images",
"(",
"np",
".",
"array",
"(",
"images",
")",
",",
"sizes",
"=",
"sizes",
",",
"interpolation",
"=",
"interpolation",
")",
")",
"else",
":",
"return",
"[",
"imresize_many_images",
"(",
"image",
"[",
"np",
".",
"newaxis",
",",
"...",
"]",
",",
"sizes",
"=",
"sizes",
",",
"interpolation",
"=",
"interpolation",
")",
"[",
"0",
",",
"...",
"]",
"for",
"image",
"in",
"images",
"]",
"shape",
"=",
"images",
".",
"shape",
"do_assert",
"(",
"images",
".",
"ndim",
"in",
"[",
"3",
",",
"4",
"]",
",",
"\"Expected array of shape (N, H, W, [C]), got shape %s\"",
"%",
"(",
"str",
"(",
"shape",
")",
",",
")",
")",
"nb_images",
"=",
"shape",
"[",
"0",
"]",
"im_height",
",",
"im_width",
"=",
"shape",
"[",
"1",
"]",
",",
"shape",
"[",
"2",
"]",
"nb_channels",
"=",
"shape",
"[",
"3",
"]",
"if",
"images",
".",
"ndim",
">",
"3",
"else",
"None",
"height",
",",
"width",
"=",
"sizes",
"[",
"0",
"]",
",",
"sizes",
"[",
"1",
"]",
"height",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"im_height",
"*",
"height",
")",
")",
"if",
"is_single_float",
"(",
"height",
")",
"else",
"height",
"width",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"im_width",
"*",
"width",
")",
")",
"if",
"is_single_float",
"(",
"width",
")",
"else",
"width",
"if",
"height",
"==",
"im_height",
"and",
"width",
"==",
"im_width",
":",
"return",
"np",
".",
"copy",
"(",
"images",
")",
"ip",
"=",
"interpolation",
"do_assert",
"(",
"ip",
"is",
"None",
"or",
"ip",
"in",
"IMRESIZE_VALID_INTERPOLATIONS",
")",
"if",
"ip",
"is",
"None",
":",
"if",
"height",
">",
"im_height",
"or",
"width",
">",
"im_width",
":",
"ip",
"=",
"cv2",
".",
"INTER_AREA",
"else",
":",
"ip",
"=",
"cv2",
".",
"INTER_LINEAR",
"elif",
"ip",
"in",
"[",
"\"nearest\"",
",",
"cv2",
".",
"INTER_NEAREST",
"]",
":",
"ip",
"=",
"cv2",
".",
"INTER_NEAREST",
"elif",
"ip",
"in",
"[",
"\"linear\"",
",",
"cv2",
".",
"INTER_LINEAR",
"]",
":",
"ip",
"=",
"cv2",
".",
"INTER_LINEAR",
"elif",
"ip",
"in",
"[",
"\"area\"",
",",
"cv2",
".",
"INTER_AREA",
"]",
":",
"ip",
"=",
"cv2",
".",
"INTER_AREA",
"else",
":",
"# if ip in [\"cubic\", cv2.INTER_CUBIC]:",
"ip",
"=",
"cv2",
".",
"INTER_CUBIC",
"# TODO find more beautiful way to avoid circular imports",
"from",
".",
"import",
"dtypes",
"as",
"iadt",
"if",
"ip",
"==",
"cv2",
".",
"INTER_NEAREST",
":",
"iadt",
".",
"gate_dtypes",
"(",
"images",
",",
"allowed",
"=",
"[",
"\"bool\"",
",",
"\"uint8\"",
",",
"\"uint16\"",
",",
"\"int8\"",
",",
"\"int16\"",
",",
"\"int32\"",
",",
"\"float16\"",
",",
"\"float32\"",
",",
"\"float64\"",
"]",
",",
"disallowed",
"=",
"[",
"\"uint32\"",
",",
"\"uint64\"",
",",
"\"uint128\"",
",",
"\"uint256\"",
",",
"\"int64\"",
",",
"\"int128\"",
",",
"\"int256\"",
",",
"\"float96\"",
",",
"\"float128\"",
",",
"\"float256\"",
"]",
",",
"augmenter",
"=",
"None",
")",
"else",
":",
"iadt",
".",
"gate_dtypes",
"(",
"images",
",",
"allowed",
"=",
"[",
"\"bool\"",
",",
"\"uint8\"",
",",
"\"uint16\"",
",",
"\"int8\"",
",",
"\"int16\"",
",",
"\"float16\"",
",",
"\"float32\"",
",",
"\"float64\"",
"]",
",",
"disallowed",
"=",
"[",
"\"uint32\"",
",",
"\"uint64\"",
",",
"\"uint128\"",
",",
"\"uint256\"",
",",
"\"int32\"",
",",
"\"int64\"",
",",
"\"int128\"",
",",
"\"int256\"",
",",
"\"float96\"",
",",
"\"float128\"",
",",
"\"float256\"",
"]",
",",
"augmenter",
"=",
"None",
")",
"result_shape",
"=",
"(",
"nb_images",
",",
"height",
",",
"width",
")",
"if",
"nb_channels",
"is",
"not",
"None",
":",
"result_shape",
"=",
"result_shape",
"+",
"(",
"nb_channels",
",",
")",
"result",
"=",
"np",
".",
"zeros",
"(",
"result_shape",
",",
"dtype",
"=",
"images",
".",
"dtype",
")",
"for",
"i",
",",
"image",
"in",
"enumerate",
"(",
"images",
")",
":",
"input_dtype",
"=",
"image",
".",
"dtype",
"if",
"image",
".",
"dtype",
".",
"type",
"==",
"np",
".",
"bool_",
":",
"image",
"=",
"image",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"*",
"255",
"elif",
"image",
".",
"dtype",
".",
"type",
"==",
"np",
".",
"int8",
"and",
"ip",
"!=",
"cv2",
".",
"INTER_NEAREST",
":",
"image",
"=",
"image",
".",
"astype",
"(",
"np",
".",
"int16",
")",
"elif",
"image",
".",
"dtype",
".",
"type",
"==",
"np",
".",
"float16",
":",
"image",
"=",
"image",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"result_img",
"=",
"cv2",
".",
"resize",
"(",
"image",
",",
"(",
"width",
",",
"height",
")",
",",
"interpolation",
"=",
"ip",
")",
"assert",
"result_img",
".",
"dtype",
"==",
"image",
".",
"dtype",
"# cv2 removes the channel axis if input was (H, W, 1)",
"# we re-add it (but only if input was not (H, W))",
"if",
"len",
"(",
"result_img",
".",
"shape",
")",
"==",
"2",
"and",
"nb_channels",
"is",
"not",
"None",
"and",
"nb_channels",
"==",
"1",
":",
"result_img",
"=",
"result_img",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"if",
"input_dtype",
".",
"type",
"==",
"np",
".",
"bool_",
":",
"result_img",
"=",
"result_img",
">",
"127",
"elif",
"input_dtype",
".",
"type",
"==",
"np",
".",
"int8",
"and",
"ip",
"!=",
"cv2",
".",
"INTER_NEAREST",
":",
"# TODO somehow better avoid circular imports here",
"from",
".",
"import",
"dtypes",
"as",
"iadt",
"result_img",
"=",
"iadt",
".",
"restore_dtypes_",
"(",
"result_img",
",",
"np",
".",
"int8",
")",
"elif",
"input_dtype",
".",
"type",
"==",
"np",
".",
"float16",
":",
"# TODO see above",
"from",
".",
"import",
"dtypes",
"as",
"iadt",
"result_img",
"=",
"iadt",
".",
"restore_dtypes_",
"(",
"result_img",
",",
"np",
".",
"float16",
")",
"result",
"[",
"i",
"]",
"=",
"result_img",
"return",
"result"
] | Resize many images to a specified size.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: limited; tested (4)
* ``int64``: no (2)
* ``float16``: yes; tested (5)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (6)
- (1) rejected by ``cv2.imresize``
- (2) results too inaccurate
- (3) mapped internally to ``int16`` when interpolation!="nearest"
- (4) only supported for interpolation="nearest", other interpolations lead to cv2 error
- (5) mapped internally to ``float32``
- (6) mapped internally to ``uint8``
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64. | [
"Resize",
"many",
"images",
"to",
"a",
"specified",
"size",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1056-L1254 | valid |
aleju/imgaug | imgaug/imgaug.py | imresize_single_image | def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
dtype support::
See :func:`imgaug.imgaug.imresize_many_images`.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...] | python | def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
dtype support::
See :func:`imgaug.imgaug.imresize_many_images`.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...] | [
"def",
"imresize_single_image",
"(",
"image",
",",
"sizes",
",",
"interpolation",
"=",
"None",
")",
":",
"grayscale",
"=",
"False",
"if",
"image",
".",
"ndim",
"==",
"2",
":",
"grayscale",
"=",
"True",
"image",
"=",
"image",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"do_assert",
"(",
"len",
"(",
"image",
".",
"shape",
")",
"==",
"3",
",",
"image",
".",
"shape",
")",
"rs",
"=",
"imresize_many_images",
"(",
"image",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
",",
":",
"]",
",",
"sizes",
",",
"interpolation",
"=",
"interpolation",
")",
"if",
"grayscale",
":",
"return",
"np",
".",
"squeeze",
"(",
"rs",
"[",
"0",
",",
":",
",",
":",
",",
"0",
"]",
")",
"else",
":",
"return",
"rs",
"[",
"0",
",",
"...",
"]"
] | Resizes a single image.
dtype support::
See :func:`imgaug.imgaug.imresize_many_images`.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image. | [
"Resizes",
"a",
"single",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1257-L1293 | valid |
aleju/imgaug | imgaug/imgaug.py | pad | def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
dtype support::
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; fully tested (1)
* ``uint32``: yes; fully tested (2) (3)
* ``uint64``: yes; fully tested (2) (3)
* ``int8``: yes; fully tested (1)
* ``int16``: yes; fully tested (1)
* ``int32``: yes; fully tested (1)
* ``int64``: yes; fully tested (2) (3)
* ``float16``: yes; fully tested (2) (3)
* ``float32``: yes; fully tested (1)
* ``float64``: yes; fully tested (1)
* ``float128``: yes; fully tested (2) (3)
* ``bool``: yes; tested (2) (3)
- (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``, ``"reflect"``, ``"symmetric"``.
Otherwise uses ``numpy``.
- (2) Uses ``numpy``.
- (3) Rejected by ``cv2``.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
The cval is expected to match the input array's dtype and value range.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
mapping_mode_np_to_cv2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"linear_ramp": None,
"maximum": None,
"mean": None,
"median": None,
"minimum": None,
"reflect": cv2.BORDER_REFLECT_101,
"symmetric": cv2.BORDER_REFLECT,
"wrap": None,
cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,
cv2.BORDER_REFLECT: cv2.BORDER_REFLECT
}
bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None
# these datatypes all simply generate a "TypeError: src data type = X is not supported" error
bad_datatype_cv2 = arr.dtype.name in ["uint32", "uint64", "int64", "float16", "float128", "bool"]
if not bad_datatype_cv2 and not bad_mode_cv2:
cval = float(cval) if arr.dtype.kind == "f" else int(cval) # results in TypeError otherwise for np inputs
if arr.ndim == 2 or arr.shape[2] <= 4:
# without this, only the first channel is padded with the cval, all following channels with 0
if arr.ndim == 3:
cval = tuple([cval] * arr.shape[2])
arr_pad = cv2.copyMakeBorder(arr, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval)
if arr.ndim == 3 and arr_pad.ndim == 2:
arr_pad = arr_pad[..., np.newaxis]
else:
result = []
channel_start_idx = 0
while channel_start_idx < arr.shape[2]:
arr_c = arr[..., channel_start_idx:channel_start_idx+4]
cval_c = tuple([cval] * arr_c.shape[2])
arr_pad_c = cv2.copyMakeBorder(arr_c, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval_c)
arr_pad_c = np.atleast_3d(arr_pad_c)
result.append(arr_pad_c)
channel_start_idx += 4
arr_pad = np.concatenate(result, axis=2)
else:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)
elif mode == "linear_ramp":
arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr) | python | def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
dtype support::
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; fully tested (1)
* ``uint32``: yes; fully tested (2) (3)
* ``uint64``: yes; fully tested (2) (3)
* ``int8``: yes; fully tested (1)
* ``int16``: yes; fully tested (1)
* ``int32``: yes; fully tested (1)
* ``int64``: yes; fully tested (2) (3)
* ``float16``: yes; fully tested (2) (3)
* ``float32``: yes; fully tested (1)
* ``float64``: yes; fully tested (1)
* ``float128``: yes; fully tested (2) (3)
* ``bool``: yes; tested (2) (3)
- (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``, ``"reflect"``, ``"symmetric"``.
Otherwise uses ``numpy``.
- (2) Uses ``numpy``.
- (3) Rejected by ``cv2``.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
The cval is expected to match the input array's dtype and value range.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
mapping_mode_np_to_cv2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"linear_ramp": None,
"maximum": None,
"mean": None,
"median": None,
"minimum": None,
"reflect": cv2.BORDER_REFLECT_101,
"symmetric": cv2.BORDER_REFLECT,
"wrap": None,
cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,
cv2.BORDER_REFLECT: cv2.BORDER_REFLECT
}
bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None
# these datatypes all simply generate a "TypeError: src data type = X is not supported" error
bad_datatype_cv2 = arr.dtype.name in ["uint32", "uint64", "int64", "float16", "float128", "bool"]
if not bad_datatype_cv2 and not bad_mode_cv2:
cval = float(cval) if arr.dtype.kind == "f" else int(cval) # results in TypeError otherwise for np inputs
if arr.ndim == 2 or arr.shape[2] <= 4:
# without this, only the first channel is padded with the cval, all following channels with 0
if arr.ndim == 3:
cval = tuple([cval] * arr.shape[2])
arr_pad = cv2.copyMakeBorder(arr, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval)
if arr.ndim == 3 and arr_pad.ndim == 2:
arr_pad = arr_pad[..., np.newaxis]
else:
result = []
channel_start_idx = 0
while channel_start_idx < arr.shape[2]:
arr_c = arr[..., channel_start_idx:channel_start_idx+4]
cval_c = tuple([cval] * arr_c.shape[2])
arr_pad_c = cv2.copyMakeBorder(arr_c, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval_c)
arr_pad_c = np.atleast_3d(arr_pad_c)
result.append(arr_pad_c)
channel_start_idx += 4
arr_pad = np.concatenate(result, axis=2)
else:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)
elif mode == "linear_ramp":
arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr) | [
"def",
"pad",
"(",
"arr",
",",
"top",
"=",
"0",
",",
"right",
"=",
"0",
",",
"bottom",
"=",
"0",
",",
"left",
"=",
"0",
",",
"mode",
"=",
"\"constant\"",
",",
"cval",
"=",
"0",
")",
":",
"do_assert",
"(",
"arr",
".",
"ndim",
"in",
"[",
"2",
",",
"3",
"]",
")",
"do_assert",
"(",
"top",
">=",
"0",
")",
"do_assert",
"(",
"right",
">=",
"0",
")",
"do_assert",
"(",
"bottom",
">=",
"0",
")",
"do_assert",
"(",
"left",
">=",
"0",
")",
"if",
"top",
">",
"0",
"or",
"right",
">",
"0",
"or",
"bottom",
">",
"0",
"or",
"left",
">",
"0",
":",
"mapping_mode_np_to_cv2",
"=",
"{",
"\"constant\"",
":",
"cv2",
".",
"BORDER_CONSTANT",
",",
"\"edge\"",
":",
"cv2",
".",
"BORDER_REPLICATE",
",",
"\"linear_ramp\"",
":",
"None",
",",
"\"maximum\"",
":",
"None",
",",
"\"mean\"",
":",
"None",
",",
"\"median\"",
":",
"None",
",",
"\"minimum\"",
":",
"None",
",",
"\"reflect\"",
":",
"cv2",
".",
"BORDER_REFLECT_101",
",",
"\"symmetric\"",
":",
"cv2",
".",
"BORDER_REFLECT",
",",
"\"wrap\"",
":",
"None",
",",
"cv2",
".",
"BORDER_CONSTANT",
":",
"cv2",
".",
"BORDER_CONSTANT",
",",
"cv2",
".",
"BORDER_REPLICATE",
":",
"cv2",
".",
"BORDER_REPLICATE",
",",
"cv2",
".",
"BORDER_REFLECT_101",
":",
"cv2",
".",
"BORDER_REFLECT_101",
",",
"cv2",
".",
"BORDER_REFLECT",
":",
"cv2",
".",
"BORDER_REFLECT",
"}",
"bad_mode_cv2",
"=",
"mapping_mode_np_to_cv2",
".",
"get",
"(",
"mode",
",",
"None",
")",
"is",
"None",
"# these datatypes all simply generate a \"TypeError: src data type = X is not supported\" error",
"bad_datatype_cv2",
"=",
"arr",
".",
"dtype",
".",
"name",
"in",
"[",
"\"uint32\"",
",",
"\"uint64\"",
",",
"\"int64\"",
",",
"\"float16\"",
",",
"\"float128\"",
",",
"\"bool\"",
"]",
"if",
"not",
"bad_datatype_cv2",
"and",
"not",
"bad_mode_cv2",
":",
"cval",
"=",
"float",
"(",
"cval",
")",
"if",
"arr",
".",
"dtype",
".",
"kind",
"==",
"\"f\"",
"else",
"int",
"(",
"cval",
")",
"# results in TypeError otherwise for np inputs",
"if",
"arr",
".",
"ndim",
"==",
"2",
"or",
"arr",
".",
"shape",
"[",
"2",
"]",
"<=",
"4",
":",
"# without this, only the first channel is padded with the cval, all following channels with 0",
"if",
"arr",
".",
"ndim",
"==",
"3",
":",
"cval",
"=",
"tuple",
"(",
"[",
"cval",
"]",
"*",
"arr",
".",
"shape",
"[",
"2",
"]",
")",
"arr_pad",
"=",
"cv2",
".",
"copyMakeBorder",
"(",
"arr",
",",
"top",
"=",
"top",
",",
"bottom",
"=",
"bottom",
",",
"left",
"=",
"left",
",",
"right",
"=",
"right",
",",
"borderType",
"=",
"mapping_mode_np_to_cv2",
"[",
"mode",
"]",
",",
"value",
"=",
"cval",
")",
"if",
"arr",
".",
"ndim",
"==",
"3",
"and",
"arr_pad",
".",
"ndim",
"==",
"2",
":",
"arr_pad",
"=",
"arr_pad",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"else",
":",
"result",
"=",
"[",
"]",
"channel_start_idx",
"=",
"0",
"while",
"channel_start_idx",
"<",
"arr",
".",
"shape",
"[",
"2",
"]",
":",
"arr_c",
"=",
"arr",
"[",
"...",
",",
"channel_start_idx",
":",
"channel_start_idx",
"+",
"4",
"]",
"cval_c",
"=",
"tuple",
"(",
"[",
"cval",
"]",
"*",
"arr_c",
".",
"shape",
"[",
"2",
"]",
")",
"arr_pad_c",
"=",
"cv2",
".",
"copyMakeBorder",
"(",
"arr_c",
",",
"top",
"=",
"top",
",",
"bottom",
"=",
"bottom",
",",
"left",
"=",
"left",
",",
"right",
"=",
"right",
",",
"borderType",
"=",
"mapping_mode_np_to_cv2",
"[",
"mode",
"]",
",",
"value",
"=",
"cval_c",
")",
"arr_pad_c",
"=",
"np",
".",
"atleast_3d",
"(",
"arr_pad_c",
")",
"result",
".",
"append",
"(",
"arr_pad_c",
")",
"channel_start_idx",
"+=",
"4",
"arr_pad",
"=",
"np",
".",
"concatenate",
"(",
"result",
",",
"axis",
"=",
"2",
")",
"else",
":",
"paddings_np",
"=",
"[",
"(",
"top",
",",
"bottom",
")",
",",
"(",
"left",
",",
"right",
")",
"]",
"# paddings for 2d case",
"if",
"arr",
".",
"ndim",
"==",
"3",
":",
"paddings_np",
".",
"append",
"(",
"(",
"0",
",",
"0",
")",
")",
"# add paddings for 3d case",
"if",
"mode",
"==",
"\"constant\"",
":",
"arr_pad",
"=",
"np",
".",
"pad",
"(",
"arr",
",",
"paddings_np",
",",
"mode",
"=",
"mode",
",",
"constant_values",
"=",
"cval",
")",
"elif",
"mode",
"==",
"\"linear_ramp\"",
":",
"arr_pad",
"=",
"np",
".",
"pad",
"(",
"arr",
",",
"paddings_np",
",",
"mode",
"=",
"mode",
",",
"end_values",
"=",
"cval",
")",
"else",
":",
"arr_pad",
"=",
"np",
".",
"pad",
"(",
"arr",
",",
"paddings_np",
",",
"mode",
"=",
"mode",
")",
"return",
"arr_pad",
"return",
"np",
".",
"copy",
"(",
"arr",
")"
] | Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
dtype support::
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; fully tested (1)
* ``uint32``: yes; fully tested (2) (3)
* ``uint64``: yes; fully tested (2) (3)
* ``int8``: yes; fully tested (1)
* ``int16``: yes; fully tested (1)
* ``int32``: yes; fully tested (1)
* ``int64``: yes; fully tested (2) (3)
* ``float16``: yes; fully tested (2) (3)
* ``float32``: yes; fully tested (1)
* ``float64``: yes; fully tested (1)
* ``float128``: yes; fully tested (2) (3)
* ``bool``: yes; tested (2) (3)
- (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``, ``"reflect"``, ``"symmetric"``.
Otherwise uses ``numpy``.
- (2) Uses ``numpy``.
- (3) Rejected by ``cv2``.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
The cval is expected to match the input array's dtype and value range.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``. | [
"Pad",
"an",
"image",
"-",
"like",
"array",
"on",
"its",
"top",
"/",
"right",
"/",
"bottom",
"/",
"left",
"side",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1297-L1422 | valid |
aleju/imgaug | imgaug/imgaug.py | compute_paddings_for_aspect_ratio | def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.floor(diff / 2))
pad_bottom = int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left | python | def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.floor(diff / 2))
pad_bottom = int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left | [
"def",
"compute_paddings_for_aspect_ratio",
"(",
"arr",
",",
"aspect_ratio",
")",
":",
"do_assert",
"(",
"arr",
".",
"ndim",
"in",
"[",
"2",
",",
"3",
"]",
")",
"do_assert",
"(",
"aspect_ratio",
">",
"0",
")",
"height",
",",
"width",
"=",
"arr",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"do_assert",
"(",
"height",
">",
"0",
")",
"aspect_ratio_current",
"=",
"width",
"/",
"height",
"pad_top",
"=",
"0",
"pad_right",
"=",
"0",
"pad_bottom",
"=",
"0",
"pad_left",
"=",
"0",
"if",
"aspect_ratio_current",
"<",
"aspect_ratio",
":",
"# vertical image, height > width",
"diff",
"=",
"(",
"aspect_ratio",
"*",
"height",
")",
"-",
"width",
"pad_right",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"diff",
"/",
"2",
")",
")",
"pad_left",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"diff",
"/",
"2",
")",
")",
"elif",
"aspect_ratio_current",
">",
"aspect_ratio",
":",
"# horizontal image, width > height",
"diff",
"=",
"(",
"(",
"1",
"/",
"aspect_ratio",
")",
"*",
"width",
")",
"-",
"height",
"pad_top",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"diff",
"/",
"2",
")",
")",
"pad_bottom",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"diff",
"/",
"2",
")",
")",
"return",
"pad_top",
",",
"pad_right",
",",
"pad_bottom",
",",
"pad_left"
] | Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``. | [
"Compute",
"the",
"amount",
"of",
"pixels",
"by",
"which",
"an",
"array",
"has",
"to",
"be",
"padded",
"to",
"fulfill",
"an",
"aspect",
"ratio",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1426-L1473 | valid |
aleju/imgaug | imgaug/imgaug.py | pad_to_aspect_ratio | def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
dtype support::
See :func:`imgaug.imgaug.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded | python | def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
dtype support::
See :func:`imgaug.imgaug.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded | [
"def",
"pad_to_aspect_ratio",
"(",
"arr",
",",
"aspect_ratio",
",",
"mode",
"=",
"\"constant\"",
",",
"cval",
"=",
"0",
",",
"return_pad_amounts",
"=",
"False",
")",
":",
"pad_top",
",",
"pad_right",
",",
"pad_bottom",
",",
"pad_left",
"=",
"compute_paddings_for_aspect_ratio",
"(",
"arr",
",",
"aspect_ratio",
")",
"arr_padded",
"=",
"pad",
"(",
"arr",
",",
"top",
"=",
"pad_top",
",",
"right",
"=",
"pad_right",
",",
"bottom",
"=",
"pad_bottom",
",",
"left",
"=",
"pad_left",
",",
"mode",
"=",
"mode",
",",
"cval",
"=",
"cval",
")",
"if",
"return_pad_amounts",
":",
"return",
"arr_padded",
",",
"(",
"pad_top",
",",
"pad_right",
",",
"pad_bottom",
",",
"pad_left",
")",
"else",
":",
"return",
"arr_padded"
] | Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
dtype support::
See :func:`imgaug.imgaug.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned. | [
"Pad",
"an",
"image",
"-",
"like",
"array",
"on",
"its",
"sides",
"so",
"that",
"it",
"matches",
"a",
"target",
"aspect",
"ratio",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1476-L1534 | valid |
aleju/imgaug | imgaug/imgaug.py | pool | def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Resize an array by pooling values within blocks.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (2)
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (2)
* ``int64``: no (1)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested (2)
* ``bool``: yes; tested
- (1) results too inaccurate (at least when using np.average as func)
- (2) Note that scikit-image documentation says that the wrapped pooling function converts
inputs to float64. Actual tests showed no indication of that happening (at least when
using preserve_dtype=True).
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling.
"""
# TODO find better way to avoid circular import
from . import dtypes as iadt
iadt.gate_dtypes(arr,
allowed=["bool", "uint8", "uint16", "uint32", "int8", "int16", "int32",
"float16", "float32", "float64", "float128"],
disallowed=["uint64", "uint128", "uint256", "int64", "int128", "int256",
"float256"],
augmenter=None)
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced | python | def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Resize an array by pooling values within blocks.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (2)
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (2)
* ``int64``: no (1)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested (2)
* ``bool``: yes; tested
- (1) results too inaccurate (at least when using np.average as func)
- (2) Note that scikit-image documentation says that the wrapped pooling function converts
inputs to float64. Actual tests showed no indication of that happening (at least when
using preserve_dtype=True).
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling.
"""
# TODO find better way to avoid circular import
from . import dtypes as iadt
iadt.gate_dtypes(arr,
allowed=["bool", "uint8", "uint16", "uint32", "int8", "int16", "int32",
"float16", "float32", "float64", "float128"],
disallowed=["uint64", "uint128", "uint256", "int64", "int128", "int256",
"float256"],
augmenter=None)
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced | [
"def",
"pool",
"(",
"arr",
",",
"block_size",
",",
"func",
",",
"cval",
"=",
"0",
",",
"preserve_dtype",
"=",
"True",
")",
":",
"# TODO find better way to avoid circular import",
"from",
".",
"import",
"dtypes",
"as",
"iadt",
"iadt",
".",
"gate_dtypes",
"(",
"arr",
",",
"allowed",
"=",
"[",
"\"bool\"",
",",
"\"uint8\"",
",",
"\"uint16\"",
",",
"\"uint32\"",
",",
"\"int8\"",
",",
"\"int16\"",
",",
"\"int32\"",
",",
"\"float16\"",
",",
"\"float32\"",
",",
"\"float64\"",
",",
"\"float128\"",
"]",
",",
"disallowed",
"=",
"[",
"\"uint64\"",
",",
"\"uint128\"",
",",
"\"uint256\"",
",",
"\"int64\"",
",",
"\"int128\"",
",",
"\"int256\"",
",",
"\"float256\"",
"]",
",",
"augmenter",
"=",
"None",
")",
"do_assert",
"(",
"arr",
".",
"ndim",
"in",
"[",
"2",
",",
"3",
"]",
")",
"is_valid_int",
"=",
"is_single_integer",
"(",
"block_size",
")",
"and",
"block_size",
">=",
"1",
"is_valid_tuple",
"=",
"is_iterable",
"(",
"block_size",
")",
"and",
"len",
"(",
"block_size",
")",
"in",
"[",
"2",
",",
"3",
"]",
"and",
"[",
"is_single_integer",
"(",
"val",
")",
"and",
"val",
">=",
"1",
"for",
"val",
"in",
"block_size",
"]",
"do_assert",
"(",
"is_valid_int",
"or",
"is_valid_tuple",
")",
"if",
"is_single_integer",
"(",
"block_size",
")",
":",
"block_size",
"=",
"[",
"block_size",
",",
"block_size",
"]",
"if",
"len",
"(",
"block_size",
")",
"<",
"arr",
".",
"ndim",
":",
"block_size",
"=",
"list",
"(",
"block_size",
")",
"+",
"[",
"1",
"]",
"input_dtype",
"=",
"arr",
".",
"dtype",
"arr_reduced",
"=",
"skimage",
".",
"measure",
".",
"block_reduce",
"(",
"arr",
",",
"tuple",
"(",
"block_size",
")",
",",
"func",
",",
"cval",
"=",
"cval",
")",
"if",
"preserve_dtype",
"and",
"arr_reduced",
".",
"dtype",
".",
"type",
"!=",
"input_dtype",
":",
"arr_reduced",
"=",
"arr_reduced",
".",
"astype",
"(",
"input_dtype",
")",
"return",
"arr_reduced"
] | Resize an array by pooling values within blocks.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (2)
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (2)
* ``int64``: no (1)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested (2)
* ``bool``: yes; tested
- (1) results too inaccurate (at least when using np.average as func)
- (2) Note that scikit-image documentation says that the wrapped pooling function converts
inputs to float64. Actual tests showed no indication of that happening (at least when
using preserve_dtype=True).
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling. | [
"Resize",
"an",
"array",
"by",
"pooling",
"values",
"within",
"blocks",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1537-L1616 | valid |
aleju/imgaug | imgaug/imgaug.py | avg_pool | def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using average pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype) | python | def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using average pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype) | [
"def",
"avg_pool",
"(",
"arr",
",",
"block_size",
",",
"cval",
"=",
"0",
",",
"preserve_dtype",
"=",
"True",
")",
":",
"return",
"pool",
"(",
"arr",
",",
"block_size",
",",
"np",
".",
"average",
",",
"cval",
"=",
"cval",
",",
"preserve_dtype",
"=",
"preserve_dtype",
")"
] | Resize an array using average pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling. | [
"Resize",
"an",
"array",
"using",
"average",
"pooling",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1619-L1647 | valid |
aleju/imgaug | imgaug/imgaug.py | max_pool | def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype) | python | def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype) | [
"def",
"max_pool",
"(",
"arr",
",",
"block_size",
",",
"cval",
"=",
"0",
",",
"preserve_dtype",
"=",
"True",
")",
":",
"return",
"pool",
"(",
"arr",
",",
"block_size",
",",
"np",
".",
"max",
",",
"cval",
"=",
"cval",
",",
"preserve_dtype",
"=",
"preserve_dtype",
")"
] | Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling. | [
"Resize",
"an",
"array",
"using",
"max",
"-",
"pooling",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1650-L1678 | valid |
aleju/imgaug | imgaug/imgaug.py | draw_grid | def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested
* ``float128``: yes; fully tested
* ``bool``: yes; fully tested
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
nb_images = len(images)
do_assert(nb_images > 0)
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
dts = [image.dtype.name for image in images]
nb_dtypes = len(set(dts))
do_assert(nb_dtypes == 1, ("All images provided to draw_grid() must have the same dtype, "
+ "found %d dtypes (%s)") % (nb_dtypes, ", ".join(dts)))
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(
len(channels) == 1,
"All images are expected to have the same number of channels, "
+ "but got channel set %s with length %d instead." % (str(channels), len(channels))
)
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
dt = images.dtype if is_np_array(images) else images[0].dtype
grid = np.zeros((height, width, nb_channels), dtype=dt)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid | python | def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested
* ``float128``: yes; fully tested
* ``bool``: yes; fully tested
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
nb_images = len(images)
do_assert(nb_images > 0)
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
dts = [image.dtype.name for image in images]
nb_dtypes = len(set(dts))
do_assert(nb_dtypes == 1, ("All images provided to draw_grid() must have the same dtype, "
+ "found %d dtypes (%s)") % (nb_dtypes, ", ".join(dts)))
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(
len(channels) == 1,
"All images are expected to have the same number of channels, "
+ "but got channel set %s with length %d instead." % (str(channels), len(channels))
)
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
dt = images.dtype if is_np_array(images) else images[0].dtype
grid = np.zeros((height, width, nb_channels), dtype=dt)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid | [
"def",
"draw_grid",
"(",
"images",
",",
"rows",
"=",
"None",
",",
"cols",
"=",
"None",
")",
":",
"nb_images",
"=",
"len",
"(",
"images",
")",
"do_assert",
"(",
"nb_images",
">",
"0",
")",
"if",
"is_np_array",
"(",
"images",
")",
":",
"do_assert",
"(",
"images",
".",
"ndim",
"==",
"4",
")",
"else",
":",
"do_assert",
"(",
"is_iterable",
"(",
"images",
")",
"and",
"is_np_array",
"(",
"images",
"[",
"0",
"]",
")",
"and",
"images",
"[",
"0",
"]",
".",
"ndim",
"==",
"3",
")",
"dts",
"=",
"[",
"image",
".",
"dtype",
".",
"name",
"for",
"image",
"in",
"images",
"]",
"nb_dtypes",
"=",
"len",
"(",
"set",
"(",
"dts",
")",
")",
"do_assert",
"(",
"nb_dtypes",
"==",
"1",
",",
"(",
"\"All images provided to draw_grid() must have the same dtype, \"",
"+",
"\"found %d dtypes (%s)\"",
")",
"%",
"(",
"nb_dtypes",
",",
"\", \"",
".",
"join",
"(",
"dts",
")",
")",
")",
"cell_height",
"=",
"max",
"(",
"[",
"image",
".",
"shape",
"[",
"0",
"]",
"for",
"image",
"in",
"images",
"]",
")",
"cell_width",
"=",
"max",
"(",
"[",
"image",
".",
"shape",
"[",
"1",
"]",
"for",
"image",
"in",
"images",
"]",
")",
"channels",
"=",
"set",
"(",
"[",
"image",
".",
"shape",
"[",
"2",
"]",
"for",
"image",
"in",
"images",
"]",
")",
"do_assert",
"(",
"len",
"(",
"channels",
")",
"==",
"1",
",",
"\"All images are expected to have the same number of channels, \"",
"+",
"\"but got channel set %s with length %d instead.\"",
"%",
"(",
"str",
"(",
"channels",
")",
",",
"len",
"(",
"channels",
")",
")",
")",
"nb_channels",
"=",
"list",
"(",
"channels",
")",
"[",
"0",
"]",
"if",
"rows",
"is",
"None",
"and",
"cols",
"is",
"None",
":",
"rows",
"=",
"cols",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"math",
".",
"sqrt",
"(",
"nb_images",
")",
")",
")",
"elif",
"rows",
"is",
"not",
"None",
":",
"cols",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"nb_images",
"/",
"rows",
")",
")",
"elif",
"cols",
"is",
"not",
"None",
":",
"rows",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"nb_images",
"/",
"cols",
")",
")",
"do_assert",
"(",
"rows",
"*",
"cols",
">=",
"nb_images",
")",
"width",
"=",
"cell_width",
"*",
"cols",
"height",
"=",
"cell_height",
"*",
"rows",
"dt",
"=",
"images",
".",
"dtype",
"if",
"is_np_array",
"(",
"images",
")",
"else",
"images",
"[",
"0",
"]",
".",
"dtype",
"grid",
"=",
"np",
".",
"zeros",
"(",
"(",
"height",
",",
"width",
",",
"nb_channels",
")",
",",
"dtype",
"=",
"dt",
")",
"cell_idx",
"=",
"0",
"for",
"row_idx",
"in",
"sm",
".",
"xrange",
"(",
"rows",
")",
":",
"for",
"col_idx",
"in",
"sm",
".",
"xrange",
"(",
"cols",
")",
":",
"if",
"cell_idx",
"<",
"nb_images",
":",
"image",
"=",
"images",
"[",
"cell_idx",
"]",
"cell_y1",
"=",
"cell_height",
"*",
"row_idx",
"cell_y2",
"=",
"cell_y1",
"+",
"image",
".",
"shape",
"[",
"0",
"]",
"cell_x1",
"=",
"cell_width",
"*",
"col_idx",
"cell_x2",
"=",
"cell_x1",
"+",
"image",
".",
"shape",
"[",
"1",
"]",
"grid",
"[",
"cell_y1",
":",
"cell_y2",
",",
"cell_x1",
":",
"cell_x2",
",",
":",
"]",
"=",
"image",
"cell_idx",
"+=",
"1",
"return",
"grid"
] | Converts multiple input images into a single image showing them in a grid.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested
* ``float128``: yes; fully tested
* ``bool``: yes; fully tested
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid. | [
"Converts",
"multiple",
"input",
"images",
"into",
"a",
"single",
"image",
"showing",
"them",
"in",
"a",
"grid",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1681-L1765 | valid |
aleju/imgaug | imgaug/imgaug.py | show_grid | def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid) | python | def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid) | [
"def",
"show_grid",
"(",
"images",
",",
"rows",
"=",
"None",
",",
"cols",
"=",
"None",
")",
":",
"grid",
"=",
"draw_grid",
"(",
"images",
",",
"rows",
"=",
"rows",
",",
"cols",
"=",
"cols",
")",
"imshow",
"(",
"grid",
")"
] | Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`. | [
"Converts",
"the",
"input",
"images",
"to",
"a",
"grid",
"image",
"and",
"shows",
"it",
"in",
"a",
"new",
"window",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1768-L1792 | valid |
aleju/imgaug | imgaug/imgaug.py | imshow | def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
dpi = 96
h, w = image.shape[0] / dpi, image.shape[1] / dpi
w = max(w, 6) # if the figure is too narrow, the footer may appear and make the fig suddenly wider (ugly)
fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)
fig.canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
ax.imshow(image, cmap="gray") # cmap is only activate for grayscale images
plt.show() | python | def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
dpi = 96
h, w = image.shape[0] / dpi, image.shape[1] / dpi
w = max(w, 6) # if the figure is too narrow, the footer may appear and make the fig suddenly wider (ugly)
fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)
fig.canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
ax.imshow(image, cmap="gray") # cmap is only activate for grayscale images
plt.show() | [
"def",
"imshow",
"(",
"image",
",",
"backend",
"=",
"IMSHOW_BACKEND_DEFAULT",
")",
":",
"do_assert",
"(",
"backend",
"in",
"[",
"\"matplotlib\"",
",",
"\"cv2\"",
"]",
",",
"\"Expected backend 'matplotlib' or 'cv2', got %s.\"",
"%",
"(",
"backend",
",",
")",
")",
"if",
"backend",
"==",
"\"cv2\"",
":",
"image_bgr",
"=",
"image",
"if",
"image",
".",
"ndim",
"==",
"3",
"and",
"image",
".",
"shape",
"[",
"2",
"]",
"in",
"[",
"3",
",",
"4",
"]",
":",
"image_bgr",
"=",
"image",
"[",
"...",
",",
"0",
":",
"3",
"]",
"[",
"...",
",",
":",
":",
"-",
"1",
"]",
"win_name",
"=",
"\"imgaug-default-window\"",
"cv2",
".",
"namedWindow",
"(",
"win_name",
",",
"cv2",
".",
"WINDOW_NORMAL",
")",
"cv2",
".",
"imshow",
"(",
"win_name",
",",
"image_bgr",
")",
"cv2",
".",
"waitKey",
"(",
"0",
")",
"cv2",
".",
"destroyWindow",
"(",
"win_name",
")",
"else",
":",
"# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"dpi",
"=",
"96",
"h",
",",
"w",
"=",
"image",
".",
"shape",
"[",
"0",
"]",
"/",
"dpi",
",",
"image",
".",
"shape",
"[",
"1",
"]",
"/",
"dpi",
"w",
"=",
"max",
"(",
"w",
",",
"6",
")",
"# if the figure is too narrow, the footer may appear and make the fig suddenly wider (ugly)",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"(",
"w",
",",
"h",
")",
",",
"dpi",
"=",
"dpi",
")",
"fig",
".",
"canvas",
".",
"set_window_title",
"(",
"\"imgaug.imshow(%s)\"",
"%",
"(",
"image",
".",
"shape",
",",
")",
")",
"ax",
".",
"imshow",
"(",
"image",
",",
"cmap",
"=",
"\"gray\"",
")",
"# cmap is only activate for grayscale images",
"plt",
".",
"show",
"(",
")"
] | Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues. | [
"Shows",
"an",
"image",
"in",
"a",
"window",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1795-L1847 | valid |
aleju/imgaug | imgaug/imgaug.py | warn_deprecated | def warn_deprecated(msg, stacklevel=2):
"""Generate a non-silent deprecation warning with stacktrace.
The used warning is ``imgaug.imgaug.DeprecationWarning``.
Parameters
----------
msg : str
The message of the warning.
stacklevel : int, optional
How many steps above this function to "jump" in the stacktrace for
the displayed file and line number of the error message.
Usually 2.
"""
import warnings
warnings.warn(msg,
category=DeprecationWarning,
stacklevel=stacklevel) | python | def warn_deprecated(msg, stacklevel=2):
"""Generate a non-silent deprecation warning with stacktrace.
The used warning is ``imgaug.imgaug.DeprecationWarning``.
Parameters
----------
msg : str
The message of the warning.
stacklevel : int, optional
How many steps above this function to "jump" in the stacktrace for
the displayed file and line number of the error message.
Usually 2.
"""
import warnings
warnings.warn(msg,
category=DeprecationWarning,
stacklevel=stacklevel) | [
"def",
"warn_deprecated",
"(",
"msg",
",",
"stacklevel",
"=",
"2",
")",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"msg",
",",
"category",
"=",
"DeprecationWarning",
",",
"stacklevel",
"=",
"stacklevel",
")"
] | Generate a non-silent deprecation warning with stacktrace.
The used warning is ``imgaug.imgaug.DeprecationWarning``.
Parameters
----------
msg : str
The message of the warning.
stacklevel : int, optional
How many steps above this function to "jump" in the stacktrace for
the displayed file and line number of the error message.
Usually 2. | [
"Generate",
"a",
"non",
"-",
"silent",
"deprecation",
"warning",
"with",
"stacktrace",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L2046-L2065 | valid |
aleju/imgaug | imgaug/imgaug.py | HooksImages.is_activated | def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default) | python | def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default) | [
"def",
"is_activated",
"(",
"self",
",",
"images",
",",
"augmenter",
",",
"parents",
",",
"default",
")",
":",
"if",
"self",
".",
"activator",
"is",
"None",
":",
"return",
"default",
"else",
":",
"return",
"self",
".",
"activator",
"(",
"images",
",",
"augmenter",
",",
"parents",
",",
"default",
")"
] | Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed. | [
"Returns",
"whether",
"an",
"augmenter",
"may",
"be",
"executed",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1941-L1954 | valid |
aleju/imgaug | imgaug/imgaug.py | HooksImages.is_propagating | def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default) | python | def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default) | [
"def",
"is_propagating",
"(",
"self",
",",
"images",
",",
"augmenter",
",",
"parents",
",",
"default",
")",
":",
"if",
"self",
".",
"propagator",
"is",
"None",
":",
"return",
"default",
"else",
":",
"return",
"self",
".",
"propagator",
"(",
"images",
",",
"augmenter",
",",
"parents",
",",
"default",
")"
] | Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not. | [
"Returns",
"whether",
"an",
"augmenter",
"may",
"call",
"its",
"children",
"to",
"augment",
"an",
"image",
".",
"This",
"is",
"independent",
"of",
"the",
"augmenter",
"itself",
"possible",
"changing",
"the",
"image",
"without",
"calling",
"its",
"children",
".",
"(",
"Most",
"(",
"all?",
")",
"augmenters",
"with",
"children",
"currently",
"dont",
"perform",
"any",
"changes",
"themselves",
".",
")"
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1956-L1972 | valid |
aleju/imgaug | imgaug/imgaug.py | HooksImages.preprocess | def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents) | python | def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents) | [
"def",
"preprocess",
"(",
"self",
",",
"images",
",",
"augmenter",
",",
"parents",
")",
":",
"if",
"self",
".",
"preprocessor",
"is",
"None",
":",
"return",
"images",
"else",
":",
"return",
"self",
".",
"preprocessor",
"(",
"images",
",",
"augmenter",
",",
"parents",
")"
] | A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified. | [
"A",
"function",
"to",
"be",
"called",
"before",
"the",
"augmentation",
"of",
"images",
"starts",
"(",
"per",
"augmenter",
")",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1974-L1987 | valid |
aleju/imgaug | imgaug/imgaug.py | HooksImages.postprocess | def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents) | python | def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents) | [
"def",
"postprocess",
"(",
"self",
",",
"images",
",",
"augmenter",
",",
"parents",
")",
":",
"if",
"self",
".",
"postprocessor",
"is",
"None",
":",
"return",
"images",
"else",
":",
"return",
"self",
".",
"postprocessor",
"(",
"images",
",",
"augmenter",
",",
"parents",
")"
] | A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified. | [
"A",
"function",
"to",
"be",
"called",
"after",
"the",
"augmentation",
"of",
"images",
"was",
"performed",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1989-L2003 | valid |
aleju/imgaug | imgaug/multicore.py | Pool.pool | def pool(self):
"""Return the multiprocessing.Pool instance or create it if not done yet.
Returns
-------
multiprocessing.Pool
The multiprocessing.Pool used internally by this imgaug.multicore.Pool.
"""
if self._pool is None:
processes = self.processes
if processes is not None and processes < 0:
try:
# cpu count includes the hyperthreads, e.g. 8 for 4 cores + hyperthreading
processes = multiprocessing.cpu_count() - abs(processes)
processes = max(processes, 1)
except (ImportError, NotImplementedError):
processes = None
self._pool = multiprocessing.Pool(processes,
initializer=_Pool_initialize_worker,
initargs=(self.augseq, self.seed),
maxtasksperchild=self.maxtasksperchild)
return self._pool | python | def pool(self):
"""Return the multiprocessing.Pool instance or create it if not done yet.
Returns
-------
multiprocessing.Pool
The multiprocessing.Pool used internally by this imgaug.multicore.Pool.
"""
if self._pool is None:
processes = self.processes
if processes is not None and processes < 0:
try:
# cpu count includes the hyperthreads, e.g. 8 for 4 cores + hyperthreading
processes = multiprocessing.cpu_count() - abs(processes)
processes = max(processes, 1)
except (ImportError, NotImplementedError):
processes = None
self._pool = multiprocessing.Pool(processes,
initializer=_Pool_initialize_worker,
initargs=(self.augseq, self.seed),
maxtasksperchild=self.maxtasksperchild)
return self._pool | [
"def",
"pool",
"(",
"self",
")",
":",
"if",
"self",
".",
"_pool",
"is",
"None",
":",
"processes",
"=",
"self",
".",
"processes",
"if",
"processes",
"is",
"not",
"None",
"and",
"processes",
"<",
"0",
":",
"try",
":",
"# cpu count includes the hyperthreads, e.g. 8 for 4 cores + hyperthreading",
"processes",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"-",
"abs",
"(",
"processes",
")",
"processes",
"=",
"max",
"(",
"processes",
",",
"1",
")",
"except",
"(",
"ImportError",
",",
"NotImplementedError",
")",
":",
"processes",
"=",
"None",
"self",
".",
"_pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"processes",
",",
"initializer",
"=",
"_Pool_initialize_worker",
",",
"initargs",
"=",
"(",
"self",
".",
"augseq",
",",
"self",
".",
"seed",
")",
",",
"maxtasksperchild",
"=",
"self",
".",
"maxtasksperchild",
")",
"return",
"self",
".",
"_pool"
] | Return the multiprocessing.Pool instance or create it if not done yet.
Returns
-------
multiprocessing.Pool
The multiprocessing.Pool used internally by this imgaug.multicore.Pool. | [
"Return",
"the",
"multiprocessing",
".",
"Pool",
"instance",
"or",
"create",
"it",
"if",
"not",
"done",
"yet",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L85-L108 | valid |
aleju/imgaug | imgaug/multicore.py | Pool.map_batches | def map_batches(self, batches, chunksize=None):
"""
Augment batches.
Parameters
----------
batches : list of imgaug.augmentables.batches.Batch
The batches to augment.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Returns
-------
list of imgaug.augmentables.batches.Batch
Augmented batches.
"""
assert isinstance(batches, list), ("Expected to get a list as 'batches', got type %s. "
+ "Call imap_batches() if you use generators.") % (type(batches),)
return self.pool.map(_Pool_starworker, self._handle_batch_ids(batches), chunksize=chunksize) | python | def map_batches(self, batches, chunksize=None):
"""
Augment batches.
Parameters
----------
batches : list of imgaug.augmentables.batches.Batch
The batches to augment.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Returns
-------
list of imgaug.augmentables.batches.Batch
Augmented batches.
"""
assert isinstance(batches, list), ("Expected to get a list as 'batches', got type %s. "
+ "Call imap_batches() if you use generators.") % (type(batches),)
return self.pool.map(_Pool_starworker, self._handle_batch_ids(batches), chunksize=chunksize) | [
"def",
"map_batches",
"(",
"self",
",",
"batches",
",",
"chunksize",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"batches",
",",
"list",
")",
",",
"(",
"\"Expected to get a list as 'batches', got type %s. \"",
"+",
"\"Call imap_batches() if you use generators.\"",
")",
"%",
"(",
"type",
"(",
"batches",
")",
",",
")",
"return",
"self",
".",
"pool",
".",
"map",
"(",
"_Pool_starworker",
",",
"self",
".",
"_handle_batch_ids",
"(",
"batches",
")",
",",
"chunksize",
"=",
"chunksize",
")"
] | Augment batches.
Parameters
----------
batches : list of imgaug.augmentables.batches.Batch
The batches to augment.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Returns
-------
list of imgaug.augmentables.batches.Batch
Augmented batches. | [
"Augment",
"batches",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L110-L131 | valid |
aleju/imgaug | imgaug/multicore.py | Pool.map_batches_async | def map_batches_async(self, batches, chunksize=None, callback=None, error_callback=None):
"""
Augment batches asynchonously.
Parameters
----------
batches : list of imgaug.augmentables.batches.Batch
The batches to augment.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
callback : None or callable, optional
Function to call upon finish. See `multiprocessing.Pool`.
error_callback : None or callable, optional
Function to call upon errors. See `multiprocessing.Pool`.
Returns
-------
multiprocessing.MapResult
Asynchonous result. See `multiprocessing.Pool`.
"""
assert isinstance(batches, list), ("Expected to get a list as 'batches', got type %s. "
+ "Call imap_batches() if you use generators.") % (type(batches),)
return self.pool.map_async(_Pool_starworker, self._handle_batch_ids(batches),
chunksize=chunksize, callback=callback, error_callback=error_callback) | python | def map_batches_async(self, batches, chunksize=None, callback=None, error_callback=None):
"""
Augment batches asynchonously.
Parameters
----------
batches : list of imgaug.augmentables.batches.Batch
The batches to augment.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
callback : None or callable, optional
Function to call upon finish. See `multiprocessing.Pool`.
error_callback : None or callable, optional
Function to call upon errors. See `multiprocessing.Pool`.
Returns
-------
multiprocessing.MapResult
Asynchonous result. See `multiprocessing.Pool`.
"""
assert isinstance(batches, list), ("Expected to get a list as 'batches', got type %s. "
+ "Call imap_batches() if you use generators.") % (type(batches),)
return self.pool.map_async(_Pool_starworker, self._handle_batch_ids(batches),
chunksize=chunksize, callback=callback, error_callback=error_callback) | [
"def",
"map_batches_async",
"(",
"self",
",",
"batches",
",",
"chunksize",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"error_callback",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"batches",
",",
"list",
")",
",",
"(",
"\"Expected to get a list as 'batches', got type %s. \"",
"+",
"\"Call imap_batches() if you use generators.\"",
")",
"%",
"(",
"type",
"(",
"batches",
")",
",",
")",
"return",
"self",
".",
"pool",
".",
"map_async",
"(",
"_Pool_starworker",
",",
"self",
".",
"_handle_batch_ids",
"(",
"batches",
")",
",",
"chunksize",
"=",
"chunksize",
",",
"callback",
"=",
"callback",
",",
"error_callback",
"=",
"error_callback",
")"
] | Augment batches asynchonously.
Parameters
----------
batches : list of imgaug.augmentables.batches.Batch
The batches to augment.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
callback : None or callable, optional
Function to call upon finish. See `multiprocessing.Pool`.
error_callback : None or callable, optional
Function to call upon errors. See `multiprocessing.Pool`.
Returns
-------
multiprocessing.MapResult
Asynchonous result. See `multiprocessing.Pool`. | [
"Augment",
"batches",
"asynchonously",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L133-L161 | valid |
aleju/imgaug | imgaug/multicore.py | Pool.imap_batches | def imap_batches(self, batches, chunksize=1):
"""
Augment batches from a generator.
Parameters
----------
batches : generator of imgaug.augmentables.batches.Batch
The batches to augment, provided as a generator. Each call to the generator should yield exactly one
batch.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Yields
------
imgaug.augmentables.batches.Batch
Augmented batch.
"""
assert ia.is_generator(batches), ("Expected to get a generator as 'batches', got type %s. "
+ "Call map_batches() if you use lists.") % (type(batches),)
# TODO change this to 'yield from' once switched to 3.3+
gen = self.pool.imap(_Pool_starworker, self._handle_batch_ids_gen(batches), chunksize=chunksize)
for batch in gen:
yield batch | python | def imap_batches(self, batches, chunksize=1):
"""
Augment batches from a generator.
Parameters
----------
batches : generator of imgaug.augmentables.batches.Batch
The batches to augment, provided as a generator. Each call to the generator should yield exactly one
batch.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Yields
------
imgaug.augmentables.batches.Batch
Augmented batch.
"""
assert ia.is_generator(batches), ("Expected to get a generator as 'batches', got type %s. "
+ "Call map_batches() if you use lists.") % (type(batches),)
# TODO change this to 'yield from' once switched to 3.3+
gen = self.pool.imap(_Pool_starworker, self._handle_batch_ids_gen(batches), chunksize=chunksize)
for batch in gen:
yield batch | [
"def",
"imap_batches",
"(",
"self",
",",
"batches",
",",
"chunksize",
"=",
"1",
")",
":",
"assert",
"ia",
".",
"is_generator",
"(",
"batches",
")",
",",
"(",
"\"Expected to get a generator as 'batches', got type %s. \"",
"+",
"\"Call map_batches() if you use lists.\"",
")",
"%",
"(",
"type",
"(",
"batches",
")",
",",
")",
"# TODO change this to 'yield from' once switched to 3.3+",
"gen",
"=",
"self",
".",
"pool",
".",
"imap",
"(",
"_Pool_starworker",
",",
"self",
".",
"_handle_batch_ids_gen",
"(",
"batches",
")",
",",
"chunksize",
"=",
"chunksize",
")",
"for",
"batch",
"in",
"gen",
":",
"yield",
"batch"
] | Augment batches from a generator.
Parameters
----------
batches : generator of imgaug.augmentables.batches.Batch
The batches to augment, provided as a generator. Each call to the generator should yield exactly one
batch.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Yields
------
imgaug.augmentables.batches.Batch
Augmented batch. | [
"Augment",
"batches",
"from",
"a",
"generator",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L163-L188 | valid |
aleju/imgaug | imgaug/multicore.py | Pool.imap_batches_unordered | def imap_batches_unordered(self, batches, chunksize=1):
"""
Augment batches from a generator in a way that does not guarantee to preserve order.
Parameters
----------
batches : generator of imgaug.augmentables.batches.Batch
The batches to augment, provided as a generator. Each call to the generator should yield exactly one
batch.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Yields
------
imgaug.augmentables.batches.Batch
Augmented batch.
"""
assert ia.is_generator(batches), ("Expected to get a generator as 'batches', got type %s. "
+ "Call map_batches() if you use lists.") % (type(batches),)
# TODO change this to 'yield from' once switched to 3.3+
gen = self.pool.imap_unordered(_Pool_starworker, self._handle_batch_ids_gen(batches), chunksize=chunksize)
for batch in gen:
yield batch | python | def imap_batches_unordered(self, batches, chunksize=1):
"""
Augment batches from a generator in a way that does not guarantee to preserve order.
Parameters
----------
batches : generator of imgaug.augmentables.batches.Batch
The batches to augment, provided as a generator. Each call to the generator should yield exactly one
batch.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Yields
------
imgaug.augmentables.batches.Batch
Augmented batch.
"""
assert ia.is_generator(batches), ("Expected to get a generator as 'batches', got type %s. "
+ "Call map_batches() if you use lists.") % (type(batches),)
# TODO change this to 'yield from' once switched to 3.3+
gen = self.pool.imap_unordered(_Pool_starworker, self._handle_batch_ids_gen(batches), chunksize=chunksize)
for batch in gen:
yield batch | [
"def",
"imap_batches_unordered",
"(",
"self",
",",
"batches",
",",
"chunksize",
"=",
"1",
")",
":",
"assert",
"ia",
".",
"is_generator",
"(",
"batches",
")",
",",
"(",
"\"Expected to get a generator as 'batches', got type %s. \"",
"+",
"\"Call map_batches() if you use lists.\"",
")",
"%",
"(",
"type",
"(",
"batches",
")",
",",
")",
"# TODO change this to 'yield from' once switched to 3.3+",
"gen",
"=",
"self",
".",
"pool",
".",
"imap_unordered",
"(",
"_Pool_starworker",
",",
"self",
".",
"_handle_batch_ids_gen",
"(",
"batches",
")",
",",
"chunksize",
"=",
"chunksize",
")",
"for",
"batch",
"in",
"gen",
":",
"yield",
"batch"
] | Augment batches from a generator in a way that does not guarantee to preserve order.
Parameters
----------
batches : generator of imgaug.augmentables.batches.Batch
The batches to augment, provided as a generator. Each call to the generator should yield exactly one
batch.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Yields
------
imgaug.augmentables.batches.Batch
Augmented batch. | [
"Augment",
"batches",
"from",
"a",
"generator",
"in",
"a",
"way",
"that",
"does",
"not",
"guarantee",
"to",
"preserve",
"order",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L190-L215 | valid |
aleju/imgaug | imgaug/multicore.py | Pool.terminate | def terminate(self):
"""Terminate the pool immediately."""
if self._pool is not None:
self._pool.terminate()
self._pool.join()
self._pool = None | python | def terminate(self):
"""Terminate the pool immediately."""
if self._pool is not None:
self._pool.terminate()
self._pool.join()
self._pool = None | [
"def",
"terminate",
"(",
"self",
")",
":",
"if",
"self",
".",
"_pool",
"is",
"not",
"None",
":",
"self",
".",
"_pool",
".",
"terminate",
"(",
")",
"self",
".",
"_pool",
".",
"join",
"(",
")",
"self",
".",
"_pool",
"=",
"None"
] | Terminate the pool immediately. | [
"Terminate",
"the",
"pool",
"immediately",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L233-L238 | valid |
aleju/imgaug | imgaug/multicore.py | BatchLoader.terminate | def terminate(self):
"""Stop all workers."""
if not self.join_signal.is_set():
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.01)
if self.main_worker_thread.is_alive():
self.main_worker_thread.join()
if self.threaded:
for worker in self.workers:
if worker.is_alive():
worker.join()
else:
for worker in self.workers:
if worker.is_alive():
worker.terminate()
worker.join()
# wait until all workers are fully terminated
while not self.all_finished():
time.sleep(0.001)
# empty queue until at least one element can be added and place None as signal that BL finished
if self.queue.full():
self.queue.get()
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self._queue_internal.get(timeout=0.005)
except QueueEmpty:
break
if not self._queue_internal._closed:
self._queue_internal.close()
if not self.queue._closed:
self.queue.close()
self._queue_internal.join_thread()
self.queue.join_thread()
time.sleep(0.025) | python | def terminate(self):
"""Stop all workers."""
if not self.join_signal.is_set():
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.01)
if self.main_worker_thread.is_alive():
self.main_worker_thread.join()
if self.threaded:
for worker in self.workers:
if worker.is_alive():
worker.join()
else:
for worker in self.workers:
if worker.is_alive():
worker.terminate()
worker.join()
# wait until all workers are fully terminated
while not self.all_finished():
time.sleep(0.001)
# empty queue until at least one element can be added and place None as signal that BL finished
if self.queue.full():
self.queue.get()
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self._queue_internal.get(timeout=0.005)
except QueueEmpty:
break
if not self._queue_internal._closed:
self._queue_internal.close()
if not self.queue._closed:
self.queue.close()
self._queue_internal.join_thread()
self.queue.join_thread()
time.sleep(0.025) | [
"def",
"terminate",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"join_signal",
".",
"is_set",
"(",
")",
":",
"self",
".",
"join_signal",
".",
"set",
"(",
")",
"# give minimal time to put generated batches in queue and gracefully shut down",
"time",
".",
"sleep",
"(",
"0.01",
")",
"if",
"self",
".",
"main_worker_thread",
".",
"is_alive",
"(",
")",
":",
"self",
".",
"main_worker_thread",
".",
"join",
"(",
")",
"if",
"self",
".",
"threaded",
":",
"for",
"worker",
"in",
"self",
".",
"workers",
":",
"if",
"worker",
".",
"is_alive",
"(",
")",
":",
"worker",
".",
"join",
"(",
")",
"else",
":",
"for",
"worker",
"in",
"self",
".",
"workers",
":",
"if",
"worker",
".",
"is_alive",
"(",
")",
":",
"worker",
".",
"terminate",
"(",
")",
"worker",
".",
"join",
"(",
")",
"# wait until all workers are fully terminated",
"while",
"not",
"self",
".",
"all_finished",
"(",
")",
":",
"time",
".",
"sleep",
"(",
"0.001",
")",
"# empty queue until at least one element can be added and place None as signal that BL finished",
"if",
"self",
".",
"queue",
".",
"full",
"(",
")",
":",
"self",
".",
"queue",
".",
"get",
"(",
")",
"self",
".",
"queue",
".",
"put",
"(",
"pickle",
".",
"dumps",
"(",
"None",
",",
"protocol",
"=",
"-",
"1",
")",
")",
"time",
".",
"sleep",
"(",
"0.01",
")",
"# clean the queue, this reportedly prevents hanging threads",
"while",
"True",
":",
"try",
":",
"self",
".",
"_queue_internal",
".",
"get",
"(",
"timeout",
"=",
"0.005",
")",
"except",
"QueueEmpty",
":",
"break",
"if",
"not",
"self",
".",
"_queue_internal",
".",
"_closed",
":",
"self",
".",
"_queue_internal",
".",
"close",
"(",
")",
"if",
"not",
"self",
".",
"queue",
".",
"_closed",
":",
"self",
".",
"queue",
".",
"close",
"(",
")",
"self",
".",
"_queue_internal",
".",
"join_thread",
"(",
")",
"self",
".",
"queue",
".",
"join_thread",
"(",
")",
"time",
".",
"sleep",
"(",
"0.025",
")"
] | Stop all workers. | [
"Stop",
"all",
"workers",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L442-L485 | valid |
aleju/imgaug | imgaug/multicore.py | BackgroundAugmenter.get_batch | def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or imgaug.Batch
One batch or None if all workers have finished.
"""
if self.all_finished():
return None
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished >= self.nb_workers:
try:
self.queue_source.get(timeout=0.001) # remove the None from the source queue
except QueueEmpty:
pass
return None
else:
return self.get_batch() | python | def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or imgaug.Batch
One batch or None if all workers have finished.
"""
if self.all_finished():
return None
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished >= self.nb_workers:
try:
self.queue_source.get(timeout=0.001) # remove the None from the source queue
except QueueEmpty:
pass
return None
else:
return self.get_batch() | [
"def",
"get_batch",
"(",
"self",
")",
":",
"if",
"self",
".",
"all_finished",
"(",
")",
":",
"return",
"None",
"batch_str",
"=",
"self",
".",
"queue_result",
".",
"get",
"(",
")",
"batch",
"=",
"pickle",
".",
"loads",
"(",
"batch_str",
")",
"if",
"batch",
"is",
"not",
"None",
":",
"return",
"batch",
"else",
":",
"self",
".",
"nb_workers_finished",
"+=",
"1",
"if",
"self",
".",
"nb_workers_finished",
">=",
"self",
".",
"nb_workers",
":",
"try",
":",
"self",
".",
"queue_source",
".",
"get",
"(",
"timeout",
"=",
"0.001",
")",
"# remove the None from the source queue",
"except",
"QueueEmpty",
":",
"pass",
"return",
"None",
"else",
":",
"return",
"self",
".",
"get_batch",
"(",
")"
] | Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or imgaug.Batch
One batch or None if all workers have finished. | [
"Returns",
"a",
"batch",
"from",
"the",
"queue",
"of",
"augmented",
"batches",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L557-L586 | valid |
aleju/imgaug | imgaug/multicore.py | BackgroundAugmenter._augment_images_worker | def _augment_images_worker(self, augseq, queue_source, queue_result, seedval):
"""
Augment endlessly images in the source queue.
This is a worker function for that endlessly queries the source queue (input batches),
augments batches in it and sends the result to the output queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
ia.seed(seedval)
loader_finished = False
while not loader_finished:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
if batch is None:
loader_finished = True
# put it back in so that other workers know that the loading queue is finished
queue_source.put(pickle.dumps(None, protocol=-1))
else:
batch_aug = augseq.augment_batch(batch)
# send augmented batch to output queue
batch_str = pickle.dumps(batch_aug, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
queue_result.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01) | python | def _augment_images_worker(self, augseq, queue_source, queue_result, seedval):
"""
Augment endlessly images in the source queue.
This is a worker function for that endlessly queries the source queue (input batches),
augments batches in it and sends the result to the output queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
ia.seed(seedval)
loader_finished = False
while not loader_finished:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
if batch is None:
loader_finished = True
# put it back in so that other workers know that the loading queue is finished
queue_source.put(pickle.dumps(None, protocol=-1))
else:
batch_aug = augseq.augment_batch(batch)
# send augmented batch to output queue
batch_str = pickle.dumps(batch_aug, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
queue_result.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01) | [
"def",
"_augment_images_worker",
"(",
"self",
",",
"augseq",
",",
"queue_source",
",",
"queue_result",
",",
"seedval",
")",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seedval",
")",
"random",
".",
"seed",
"(",
"seedval",
")",
"augseq",
".",
"reseed",
"(",
"seedval",
")",
"ia",
".",
"seed",
"(",
"seedval",
")",
"loader_finished",
"=",
"False",
"while",
"not",
"loader_finished",
":",
"# wait for a new batch in the source queue and load it",
"try",
":",
"batch_str",
"=",
"queue_source",
".",
"get",
"(",
"timeout",
"=",
"0.1",
")",
"batch",
"=",
"pickle",
".",
"loads",
"(",
"batch_str",
")",
"if",
"batch",
"is",
"None",
":",
"loader_finished",
"=",
"True",
"# put it back in so that other workers know that the loading queue is finished",
"queue_source",
".",
"put",
"(",
"pickle",
".",
"dumps",
"(",
"None",
",",
"protocol",
"=",
"-",
"1",
")",
")",
"else",
":",
"batch_aug",
"=",
"augseq",
".",
"augment_batch",
"(",
"batch",
")",
"# send augmented batch to output queue",
"batch_str",
"=",
"pickle",
".",
"dumps",
"(",
"batch_aug",
",",
"protocol",
"=",
"-",
"1",
")",
"queue_result",
".",
"put",
"(",
"batch_str",
")",
"except",
"QueueEmpty",
":",
"time",
".",
"sleep",
"(",
"0.01",
")",
"queue_result",
".",
"put",
"(",
"pickle",
".",
"dumps",
"(",
"None",
",",
"protocol",
"=",
"-",
"1",
")",
")",
"time",
".",
"sleep",
"(",
"0.01",
")"
] | Augment endlessly images in the source queue.
This is a worker function for that endlessly queries the source queue (input batches),
augments batches in it and sends the result to the output queue. | [
"Augment",
"endlessly",
"images",
"in",
"the",
"source",
"queue",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L588-L622 | valid |
aleju/imgaug | imgaug/multicore.py | BackgroundAugmenter.terminate | def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
if worker.is_alive():
worker.terminate()
self.nb_workers_finished = len(self.workers)
if not self.queue_result._closed:
self.queue_result.close()
time.sleep(0.01) | python | def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
if worker.is_alive():
worker.terminate()
self.nb_workers_finished = len(self.workers)
if not self.queue_result._closed:
self.queue_result.close()
time.sleep(0.01) | [
"def",
"terminate",
"(",
"self",
")",
":",
"for",
"worker",
"in",
"self",
".",
"workers",
":",
"if",
"worker",
".",
"is_alive",
"(",
")",
":",
"worker",
".",
"terminate",
"(",
")",
"self",
".",
"nb_workers_finished",
"=",
"len",
"(",
"self",
".",
"workers",
")",
"if",
"not",
"self",
".",
"queue_result",
".",
"_closed",
":",
"self",
".",
"queue_result",
".",
"close",
"(",
")",
"time",
".",
"sleep",
"(",
"0.01",
")"
] | Terminates all background processes immediately.
This will also free their RAM. | [
"Terminates",
"all",
"background",
"processes",
"immediately",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L624-L638 | valid |
aleju/imgaug | imgaug/augmentables/batches.py | UnnormalizedBatch.to_normalized_batch | def to_normalized_batch(self):
"""Convert this unnormalized batch to an instance of Batch.
As this method is intended to be called before augmentation, it
assumes that none of the ``*_aug`` attributes is yet set.
It will produce an AssertionError otherwise.
The newly created Batch's ``*_unaug`` attributes will match the ones
in this batch, just in normalized form.
Returns
-------
imgaug.augmentables.batches.Batch
The batch, with ``*_unaug`` attributes being normalized.
"""
assert all([
attr is None for attr_name, attr in self.__dict__.items()
if attr_name.endswith("_aug")]), \
"Expected UnnormalizedBatch to not contain any augmented data " \
"before normalization, but at least one '*_aug' attribute was " \
"already set."
images_unaug = nlib.normalize_images(self.images_unaug)
shapes = None
if images_unaug is not None:
shapes = [image.shape for image in images_unaug]
return Batch(
images=images_unaug,
heatmaps=nlib.normalize_heatmaps(
self.heatmaps_unaug, shapes),
segmentation_maps=nlib.normalize_segmentation_maps(
self.segmentation_maps_unaug, shapes),
keypoints=nlib.normalize_keypoints(
self.keypoints_unaug, shapes),
bounding_boxes=nlib.normalize_bounding_boxes(
self.bounding_boxes_unaug, shapes),
polygons=nlib.normalize_polygons(
self.polygons_unaug, shapes),
line_strings=nlib.normalize_line_strings(
self.line_strings_unaug, shapes),
data=self.data
) | python | def to_normalized_batch(self):
"""Convert this unnormalized batch to an instance of Batch.
As this method is intended to be called before augmentation, it
assumes that none of the ``*_aug`` attributes is yet set.
It will produce an AssertionError otherwise.
The newly created Batch's ``*_unaug`` attributes will match the ones
in this batch, just in normalized form.
Returns
-------
imgaug.augmentables.batches.Batch
The batch, with ``*_unaug`` attributes being normalized.
"""
assert all([
attr is None for attr_name, attr in self.__dict__.items()
if attr_name.endswith("_aug")]), \
"Expected UnnormalizedBatch to not contain any augmented data " \
"before normalization, but at least one '*_aug' attribute was " \
"already set."
images_unaug = nlib.normalize_images(self.images_unaug)
shapes = None
if images_unaug is not None:
shapes = [image.shape for image in images_unaug]
return Batch(
images=images_unaug,
heatmaps=nlib.normalize_heatmaps(
self.heatmaps_unaug, shapes),
segmentation_maps=nlib.normalize_segmentation_maps(
self.segmentation_maps_unaug, shapes),
keypoints=nlib.normalize_keypoints(
self.keypoints_unaug, shapes),
bounding_boxes=nlib.normalize_bounding_boxes(
self.bounding_boxes_unaug, shapes),
polygons=nlib.normalize_polygons(
self.polygons_unaug, shapes),
line_strings=nlib.normalize_line_strings(
self.line_strings_unaug, shapes),
data=self.data
) | [
"def",
"to_normalized_batch",
"(",
"self",
")",
":",
"assert",
"all",
"(",
"[",
"attr",
"is",
"None",
"for",
"attr_name",
",",
"attr",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"attr_name",
".",
"endswith",
"(",
"\"_aug\"",
")",
"]",
")",
",",
"\"Expected UnnormalizedBatch to not contain any augmented data \"",
"\"before normalization, but at least one '*_aug' attribute was \"",
"\"already set.\"",
"images_unaug",
"=",
"nlib",
".",
"normalize_images",
"(",
"self",
".",
"images_unaug",
")",
"shapes",
"=",
"None",
"if",
"images_unaug",
"is",
"not",
"None",
":",
"shapes",
"=",
"[",
"image",
".",
"shape",
"for",
"image",
"in",
"images_unaug",
"]",
"return",
"Batch",
"(",
"images",
"=",
"images_unaug",
",",
"heatmaps",
"=",
"nlib",
".",
"normalize_heatmaps",
"(",
"self",
".",
"heatmaps_unaug",
",",
"shapes",
")",
",",
"segmentation_maps",
"=",
"nlib",
".",
"normalize_segmentation_maps",
"(",
"self",
".",
"segmentation_maps_unaug",
",",
"shapes",
")",
",",
"keypoints",
"=",
"nlib",
".",
"normalize_keypoints",
"(",
"self",
".",
"keypoints_unaug",
",",
"shapes",
")",
",",
"bounding_boxes",
"=",
"nlib",
".",
"normalize_bounding_boxes",
"(",
"self",
".",
"bounding_boxes_unaug",
",",
"shapes",
")",
",",
"polygons",
"=",
"nlib",
".",
"normalize_polygons",
"(",
"self",
".",
"polygons_unaug",
",",
"shapes",
")",
",",
"line_strings",
"=",
"nlib",
".",
"normalize_line_strings",
"(",
"self",
".",
"line_strings_unaug",
",",
"shapes",
")",
",",
"data",
"=",
"self",
".",
"data",
")"
] | Convert this unnormalized batch to an instance of Batch.
As this method is intended to be called before augmentation, it
assumes that none of the ``*_aug`` attributes is yet set.
It will produce an AssertionError otherwise.
The newly created Batch's ``*_unaug`` attributes will match the ones
in this batch, just in normalized form.
Returns
-------
imgaug.augmentables.batches.Batch
The batch, with ``*_unaug`` attributes being normalized. | [
"Convert",
"this",
"unnormalized",
"batch",
"to",
"an",
"instance",
"of",
"Batch",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/batches.py#L180-L223 | valid |