repo
stringclasses 679
values | path
stringlengths 6
122
| func_name
stringlengths 2
76
| original_string
stringlengths 87
70.9k
| language
stringclasses 1
value | code
stringlengths 87
70.9k
| code_tokens
sequencelengths 20
6.91k
| docstring
stringlengths 1
21.7k
| docstring_tokens
sequencelengths 1
1.6k
| sha
stringclasses 679
values | url
stringlengths 92
213
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
tensorlayer/tensorlayer | tensorlayer/prepro.py | obj_box_zoom | def obj_box_zoom(
im, classes=None, coords=None, zoom_range=(0.9,
1.1), row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
"""Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
# tl.logging.info(zx, zy)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = im.shape[row_index], im.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
im_new = affine_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
# modified from obj_box_crop
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
# ======= pixel unit format and upleft, w, h ==========
x = (coord[0] - im.shape[1] / 2) / zy + im.shape[1] / 2 # only change this
y = (coord[1] - im.shape[0] / 2) / zx + im.shape[0] / 2 # only change this
w = coord[2] / zy # only change this
h = coord[3] / zx # only change thisS
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
# convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new | python | def obj_box_zoom(
im, classes=None, coords=None, zoom_range=(0.9,
1.1), row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
"""Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
# tl.logging.info(zx, zy)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = im.shape[row_index], im.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
im_new = affine_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
# modified from obj_box_crop
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
# ======= pixel unit format and upleft, w, h ==========
x = (coord[0] - im.shape[1] / 2) / zy + im.shape[1] / 2 # only change this
y = (coord[1] - im.shape[0] / 2) / zx + im.shape[0] / 2 # only change this
w = coord[2] / zy # only change this
h = coord[3] / zx # only change thisS
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
# convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new | [
"def",
"obj_box_zoom",
"(",
"im",
",",
"classes",
"=",
"None",
",",
"coords",
"=",
"None",
",",
"zoom_range",
"=",
"(",
"0.9",
",",
"1.1",
")",
",",
"row_index",
"=",
"0",
",",
"col_index",
"=",
"1",
",",
"channel_index",
"=",
"2",
",",
"fill_mode",
"=",
"'nearest'",
",",
"cval",
"=",
"0.",
",",
"order",
"=",
"1",
",",
"is_rescale",
"=",
"False",
",",
"is_center",
"=",
"False",
",",
"is_random",
"=",
"False",
",",
"thresh_wh",
"=",
"0.02",
",",
"thresh_wh2",
"=",
"12.",
")",
":",
"if",
"classes",
"is",
"None",
":",
"classes",
"=",
"[",
"]",
"if",
"coords",
"is",
"None",
":",
"coords",
"=",
"[",
"]",
"if",
"len",
"(",
"zoom_range",
")",
"!=",
"2",
":",
"raise",
"Exception",
"(",
"'zoom_range should be a tuple or list of two floats. '",
"'Received arg: '",
",",
"zoom_range",
")",
"if",
"is_random",
":",
"if",
"zoom_range",
"[",
"0",
"]",
"==",
"1",
"and",
"zoom_range",
"[",
"1",
"]",
"==",
"1",
":",
"zx",
",",
"zy",
"=",
"1",
",",
"1",
"tl",
".",
"logging",
".",
"info",
"(",
"\" random_zoom : not zoom in/out\"",
")",
"else",
":",
"zx",
",",
"zy",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"zoom_range",
"[",
"0",
"]",
",",
"zoom_range",
"[",
"1",
"]",
",",
"2",
")",
"else",
":",
"zx",
",",
"zy",
"=",
"zoom_range",
"# tl.logging.info(zx, zy)",
"zoom_matrix",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"zx",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"zy",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"1",
"]",
"]",
")",
"h",
",",
"w",
"=",
"im",
".",
"shape",
"[",
"row_index",
"]",
",",
"im",
".",
"shape",
"[",
"col_index",
"]",
"transform_matrix",
"=",
"transform_matrix_offset_center",
"(",
"zoom_matrix",
",",
"h",
",",
"w",
")",
"im_new",
"=",
"affine_transform",
"(",
"im",
",",
"transform_matrix",
",",
"channel_index",
",",
"fill_mode",
",",
"cval",
",",
"order",
")",
"# modified from obj_box_crop",
"def",
"_get_coord",
"(",
"coord",
")",
":",
"\"\"\"Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,\n before getting the new coordinates.\n Boxes outsides the cropped image will be removed.\n\n \"\"\"",
"if",
"is_center",
":",
"coord",
"=",
"obj_box_coord_centroid_to_upleft",
"(",
"coord",
")",
"# ======= pixel unit format and upleft, w, h ==========",
"x",
"=",
"(",
"coord",
"[",
"0",
"]",
"-",
"im",
".",
"shape",
"[",
"1",
"]",
"/",
"2",
")",
"/",
"zy",
"+",
"im",
".",
"shape",
"[",
"1",
"]",
"/",
"2",
"# only change this",
"y",
"=",
"(",
"coord",
"[",
"1",
"]",
"-",
"im",
".",
"shape",
"[",
"0",
"]",
"/",
"2",
")",
"/",
"zx",
"+",
"im",
".",
"shape",
"[",
"0",
"]",
"/",
"2",
"# only change this",
"w",
"=",
"coord",
"[",
"2",
"]",
"/",
"zy",
"# only change this",
"h",
"=",
"coord",
"[",
"3",
"]",
"/",
"zx",
"# only change thisS",
"if",
"x",
"<",
"0",
":",
"if",
"x",
"+",
"w",
"<=",
"0",
":",
"return",
"None",
"w",
"=",
"w",
"+",
"x",
"x",
"=",
"0",
"elif",
"x",
">",
"im_new",
".",
"shape",
"[",
"1",
"]",
":",
"# object outside the cropped image",
"return",
"None",
"if",
"y",
"<",
"0",
":",
"if",
"y",
"+",
"h",
"<=",
"0",
":",
"return",
"None",
"h",
"=",
"h",
"+",
"y",
"y",
"=",
"0",
"elif",
"y",
">",
"im_new",
".",
"shape",
"[",
"0",
"]",
":",
"# object outside the cropped image",
"return",
"None",
"if",
"(",
"x",
"is",
"not",
"None",
")",
"and",
"(",
"x",
"+",
"w",
">",
"im_new",
".",
"shape",
"[",
"1",
"]",
")",
":",
"# box outside the cropped image",
"w",
"=",
"im_new",
".",
"shape",
"[",
"1",
"]",
"-",
"x",
"if",
"(",
"y",
"is",
"not",
"None",
")",
"and",
"(",
"y",
"+",
"h",
">",
"im_new",
".",
"shape",
"[",
"0",
"]",
")",
":",
"# box outside the cropped image",
"h",
"=",
"im_new",
".",
"shape",
"[",
"0",
"]",
"-",
"y",
"if",
"(",
"w",
"/",
"(",
"h",
"+",
"1.",
")",
">",
"thresh_wh2",
")",
"or",
"(",
"h",
"/",
"(",
"w",
"+",
"1.",
")",
">",
"thresh_wh2",
")",
":",
"# object shape strange: too narrow",
"# tl.logging.info('xx', w, h)",
"return",
"None",
"if",
"(",
"w",
"/",
"(",
"im_new",
".",
"shape",
"[",
"1",
"]",
"*",
"1.",
")",
"<",
"thresh_wh",
")",
"or",
"(",
"h",
"/",
"(",
"im_new",
".",
"shape",
"[",
"0",
"]",
"*",
"1.",
")",
"<",
"thresh_wh",
")",
":",
"# object shape strange: too narrow",
"# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])",
"return",
"None",
"coord",
"=",
"[",
"x",
",",
"y",
",",
"w",
",",
"h",
"]",
"# convert back if input format is center.",
"if",
"is_center",
":",
"coord",
"=",
"obj_box_coord_upleft_to_centroid",
"(",
"coord",
")",
"return",
"coord",
"coords_new",
"=",
"list",
"(",
")",
"classes_new",
"=",
"list",
"(",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"coords",
")",
":",
"coord",
"=",
"coords",
"[",
"i",
"]",
"if",
"len",
"(",
"coord",
")",
"!=",
"4",
":",
"raise",
"AssertionError",
"(",
"\"coordinate should be 4 values : [x, y, w, h]\"",
")",
"if",
"is_rescale",
":",
"# for scaled coord, upscaled before process and scale back in the end.",
"coord",
"=",
"obj_box_coord_scale_to_pixelunit",
"(",
"coord",
",",
"im",
".",
"shape",
")",
"coord",
"=",
"_get_coord",
"(",
"coord",
")",
"if",
"coord",
"is",
"not",
"None",
":",
"coord",
"=",
"obj_box_coord_rescale",
"(",
"coord",
",",
"im_new",
".",
"shape",
")",
"coords_new",
".",
"append",
"(",
"coord",
")",
"classes_new",
".",
"append",
"(",
"classes",
"[",
"i",
"]",
")",
"else",
":",
"coord",
"=",
"_get_coord",
"(",
"coord",
")",
"if",
"coord",
"is",
"not",
"None",
":",
"coords_new",
".",
"append",
"(",
"coord",
")",
"classes_new",
".",
"append",
"(",
"classes",
"[",
"i",
"]",
")",
"return",
"im_new",
",",
"classes_new",
",",
"coords_new"
] | Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes. | [
"Zoom",
"in",
"and",
"out",
"of",
"a",
"single",
"image",
"randomly",
"or",
"non",
"-",
"randomly",
"and",
"compute",
"the",
"new",
"bounding",
"box",
"coordinates",
".",
"Objects",
"outside",
"the",
"cropped",
"image",
"will",
"be",
"removed",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3147-L3281 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | pad_sequences | def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='pre', value=0.):
"""Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
Parameters
----------
sequences : list of list of int
All sequences where each row is a sequence.
maxlen : int
Maximum length.
dtype : numpy.dtype or str
Data type to cast the resulting sequence.
padding : str
Either 'pre' or 'post', pad either before or after each sequence.
truncating : str
Either 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence
value : float
Value to pad the sequences to the desired value.
Returns
----------
x : numpy.array
With dimensions (number_of_sequences, maxlen)
Examples
----------
>>> sequences = [[1,1,1,1,1],[2,2,2],[3,3]]
>>> sequences = pad_sequences(sequences, maxlen=None, dtype='int32',
... padding='post', truncating='pre', value=0.)
[[1 1 1 1 1]
[2 2 2 0 0]
[3 3 0 0 0]]
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
'Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape)
)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x.tolist() | python | def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='pre', value=0.):
"""Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
Parameters
----------
sequences : list of list of int
All sequences where each row is a sequence.
maxlen : int
Maximum length.
dtype : numpy.dtype or str
Data type to cast the resulting sequence.
padding : str
Either 'pre' or 'post', pad either before or after each sequence.
truncating : str
Either 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence
value : float
Value to pad the sequences to the desired value.
Returns
----------
x : numpy.array
With dimensions (number_of_sequences, maxlen)
Examples
----------
>>> sequences = [[1,1,1,1,1],[2,2,2],[3,3]]
>>> sequences = pad_sequences(sequences, maxlen=None, dtype='int32',
... padding='post', truncating='pre', value=0.)
[[1 1 1 1 1]
[2 2 2 0 0]
[3 3 0 0 0]]
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
'Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape)
)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x.tolist() | [
"def",
"pad_sequences",
"(",
"sequences",
",",
"maxlen",
"=",
"None",
",",
"dtype",
"=",
"'int32'",
",",
"padding",
"=",
"'post'",
",",
"truncating",
"=",
"'pre'",
",",
"value",
"=",
"0.",
")",
":",
"lengths",
"=",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"sequences",
"]",
"nb_samples",
"=",
"len",
"(",
"sequences",
")",
"if",
"maxlen",
"is",
"None",
":",
"maxlen",
"=",
"np",
".",
"max",
"(",
"lengths",
")",
"# take the sample shape from the first non empty sequence",
"# checking for consistency in the main loop below.",
"sample_shape",
"=",
"tuple",
"(",
")",
"for",
"s",
"in",
"sequences",
":",
"if",
"len",
"(",
"s",
")",
">",
"0",
":",
"sample_shape",
"=",
"np",
".",
"asarray",
"(",
"s",
")",
".",
"shape",
"[",
"1",
":",
"]",
"break",
"x",
"=",
"(",
"np",
".",
"ones",
"(",
"(",
"nb_samples",
",",
"maxlen",
")",
"+",
"sample_shape",
")",
"*",
"value",
")",
".",
"astype",
"(",
"dtype",
")",
"for",
"idx",
",",
"s",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"if",
"len",
"(",
"s",
")",
"==",
"0",
":",
"continue",
"# empty list was found",
"if",
"truncating",
"==",
"'pre'",
":",
"trunc",
"=",
"s",
"[",
"-",
"maxlen",
":",
"]",
"elif",
"truncating",
"==",
"'post'",
":",
"trunc",
"=",
"s",
"[",
":",
"maxlen",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Truncating type \"%s\" not understood'",
"%",
"truncating",
")",
"# check `trunc` has expected shape",
"trunc",
"=",
"np",
".",
"asarray",
"(",
"trunc",
",",
"dtype",
"=",
"dtype",
")",
"if",
"trunc",
".",
"shape",
"[",
"1",
":",
"]",
"!=",
"sample_shape",
":",
"raise",
"ValueError",
"(",
"'Shape of sample %s of sequence at position %s is different from expected shape %s'",
"%",
"(",
"trunc",
".",
"shape",
"[",
"1",
":",
"]",
",",
"idx",
",",
"sample_shape",
")",
")",
"if",
"padding",
"==",
"'post'",
":",
"x",
"[",
"idx",
",",
":",
"len",
"(",
"trunc",
")",
"]",
"=",
"trunc",
"elif",
"padding",
"==",
"'pre'",
":",
"x",
"[",
"idx",
",",
"-",
"len",
"(",
"trunc",
")",
":",
"]",
"=",
"trunc",
"else",
":",
"raise",
"ValueError",
"(",
"'Padding type \"%s\" not understood'",
"%",
"padding",
")",
"return",
"x",
".",
"tolist",
"(",
")"
] | Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
Parameters
----------
sequences : list of list of int
All sequences where each row is a sequence.
maxlen : int
Maximum length.
dtype : numpy.dtype or str
Data type to cast the resulting sequence.
padding : str
Either 'pre' or 'post', pad either before or after each sequence.
truncating : str
Either 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence
value : float
Value to pad the sequences to the desired value.
Returns
----------
x : numpy.array
With dimensions (number_of_sequences, maxlen)
Examples
----------
>>> sequences = [[1,1,1,1,1],[2,2,2],[3,3]]
>>> sequences = pad_sequences(sequences, maxlen=None, dtype='int32',
... padding='post', truncating='pre', value=0.)
[[1 1 1 1 1]
[2 2 2 0 0]
[3 3 0 0 0]] | [
"Pads",
"each",
"sequence",
"to",
"the",
"same",
"length",
":",
"the",
"length",
"of",
"the",
"longest",
"sequence",
".",
"If",
"maxlen",
"is",
"provided",
"any",
"sequence",
"longer",
"than",
"maxlen",
"is",
"truncated",
"to",
"maxlen",
".",
"Truncation",
"happens",
"off",
"either",
"the",
"beginning",
"(",
"default",
")",
"or",
"the",
"end",
"of",
"the",
"sequence",
".",
"Supports",
"post",
"-",
"padding",
"and",
"pre",
"-",
"padding",
"(",
"default",
")",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3284-L3362 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | remove_pad_sequences | def remove_pad_sequences(sequences, pad_id=0):
"""Remove padding.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
----------
>>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]]
>>> print(remove_pad_sequences(sequences, pad_id=0))
[[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]]
"""
sequences_out = copy.deepcopy(sequences)
for i, _ in enumerate(sequences):
# for j in range(len(sequences[i])):
# if sequences[i][j] == pad_id:
# sequences_out[i] = sequences_out[i][:j]
# break
for j in range(1, len(sequences[i])):
if sequences[i][-j] != pad_id:
sequences_out[i] = sequences_out[i][0:-j + 1]
break
return sequences_out | python | def remove_pad_sequences(sequences, pad_id=0):
"""Remove padding.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
----------
>>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]]
>>> print(remove_pad_sequences(sequences, pad_id=0))
[[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]]
"""
sequences_out = copy.deepcopy(sequences)
for i, _ in enumerate(sequences):
# for j in range(len(sequences[i])):
# if sequences[i][j] == pad_id:
# sequences_out[i] = sequences_out[i][:j]
# break
for j in range(1, len(sequences[i])):
if sequences[i][-j] != pad_id:
sequences_out[i] = sequences_out[i][0:-j + 1]
break
return sequences_out | [
"def",
"remove_pad_sequences",
"(",
"sequences",
",",
"pad_id",
"=",
"0",
")",
":",
"sequences_out",
"=",
"copy",
".",
"deepcopy",
"(",
"sequences",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"# for j in range(len(sequences[i])):",
"# if sequences[i][j] == pad_id:",
"# sequences_out[i] = sequences_out[i][:j]",
"# break",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"sequences",
"[",
"i",
"]",
")",
")",
":",
"if",
"sequences",
"[",
"i",
"]",
"[",
"-",
"j",
"]",
"!=",
"pad_id",
":",
"sequences_out",
"[",
"i",
"]",
"=",
"sequences_out",
"[",
"i",
"]",
"[",
"0",
":",
"-",
"j",
"+",
"1",
"]",
"break",
"return",
"sequences_out"
] | Remove padding.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
----------
>>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]]
>>> print(remove_pad_sequences(sequences, pad_id=0))
[[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]] | [
"Remove",
"padding",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3365-L3399 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | process_sequences | def process_sequences(sequences, end_id=0, pad_val=0, is_shorten=True, remain_end_id=False):
"""Set all tokens(ids) after END token to the padding value, and then shorten (option) it to the maximum sequence length in this batch.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The special token for END.
pad_val : int
Replace the `end_id` and the IDs after `end_id` to this value.
is_shorten : boolean
Shorten the sequences. Default is True.
remain_end_id : boolean
Keep an `end_id` in the end. Default is False.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4, 3, 5, 3, 2, 2, 2, 2], <-- end_id is 2
... [5, 3, 9, 4, 9, 2, 2, 3]] <-- end_id is 2
>>> sentences_ids = precess_sequences(sentences_ids, end_id=vocab.end_id, pad_val=0, is_shorten=True)
[[4, 3, 5, 3, 0], [5, 3, 9, 4, 9]]
"""
max_length = 0
for _, seq in enumerate(sequences):
is_end = False
for i_w, n in enumerate(seq):
if n == end_id and is_end == False: # 1st time to see end_id
is_end = True
if max_length < i_w:
max_length = i_w
if remain_end_id is False:
seq[i_w] = pad_val # set end_id to pad_val
elif is_end ==True:
seq[i_w] = pad_val
if remain_end_id is True:
max_length += 1
if is_shorten:
for i, seq in enumerate(sequences):
sequences[i] = seq[:max_length]
return sequences | python | def process_sequences(sequences, end_id=0, pad_val=0, is_shorten=True, remain_end_id=False):
"""Set all tokens(ids) after END token to the padding value, and then shorten (option) it to the maximum sequence length in this batch.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The special token for END.
pad_val : int
Replace the `end_id` and the IDs after `end_id` to this value.
is_shorten : boolean
Shorten the sequences. Default is True.
remain_end_id : boolean
Keep an `end_id` in the end. Default is False.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4, 3, 5, 3, 2, 2, 2, 2], <-- end_id is 2
... [5, 3, 9, 4, 9, 2, 2, 3]] <-- end_id is 2
>>> sentences_ids = precess_sequences(sentences_ids, end_id=vocab.end_id, pad_val=0, is_shorten=True)
[[4, 3, 5, 3, 0], [5, 3, 9, 4, 9]]
"""
max_length = 0
for _, seq in enumerate(sequences):
is_end = False
for i_w, n in enumerate(seq):
if n == end_id and is_end == False: # 1st time to see end_id
is_end = True
if max_length < i_w:
max_length = i_w
if remain_end_id is False:
seq[i_w] = pad_val # set end_id to pad_val
elif is_end ==True:
seq[i_w] = pad_val
if remain_end_id is True:
max_length += 1
if is_shorten:
for i, seq in enumerate(sequences):
sequences[i] = seq[:max_length]
return sequences | [
"def",
"process_sequences",
"(",
"sequences",
",",
"end_id",
"=",
"0",
",",
"pad_val",
"=",
"0",
",",
"is_shorten",
"=",
"True",
",",
"remain_end_id",
"=",
"False",
")",
":",
"max_length",
"=",
"0",
"for",
"_",
",",
"seq",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"is_end",
"=",
"False",
"for",
"i_w",
",",
"n",
"in",
"enumerate",
"(",
"seq",
")",
":",
"if",
"n",
"==",
"end_id",
"and",
"is_end",
"==",
"False",
":",
"# 1st time to see end_id",
"is_end",
"=",
"True",
"if",
"max_length",
"<",
"i_w",
":",
"max_length",
"=",
"i_w",
"if",
"remain_end_id",
"is",
"False",
":",
"seq",
"[",
"i_w",
"]",
"=",
"pad_val",
"# set end_id to pad_val",
"elif",
"is_end",
"==",
"True",
":",
"seq",
"[",
"i_w",
"]",
"=",
"pad_val",
"if",
"remain_end_id",
"is",
"True",
":",
"max_length",
"+=",
"1",
"if",
"is_shorten",
":",
"for",
"i",
",",
"seq",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"sequences",
"[",
"i",
"]",
"=",
"seq",
"[",
":",
"max_length",
"]",
"return",
"sequences"
] | Set all tokens(ids) after END token to the padding value, and then shorten (option) it to the maximum sequence length in this batch.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The special token for END.
pad_val : int
Replace the `end_id` and the IDs after `end_id` to this value.
is_shorten : boolean
Shorten the sequences. Default is True.
remain_end_id : boolean
Keep an `end_id` in the end. Default is False.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4, 3, 5, 3, 2, 2, 2, 2], <-- end_id is 2
... [5, 3, 9, 4, 9, 2, 2, 3]] <-- end_id is 2
>>> sentences_ids = precess_sequences(sentences_ids, end_id=vocab.end_id, pad_val=0, is_shorten=True)
[[4, 3, 5, 3, 0], [5, 3, 9, 4, 9]] | [
"Set",
"all",
"tokens",
"(",
"ids",
")",
"after",
"END",
"token",
"to",
"the",
"padding",
"value",
"and",
"then",
"shorten",
"(",
"option",
")",
"it",
"to",
"the",
"maximum",
"sequence",
"length",
"in",
"this",
"batch",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3402-L3449 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | sequences_add_start_id | def sequences_add_start_id(sequences, start_id=0, remove_last=False):
"""Add special start token(id) in the beginning of each sequence.
Parameters
------------
sequences : list of list of int
All sequences where each row is a sequence.
start_id : int
The start ID.
remove_last : boolean
Remove the last value of each sequences. Usually be used for removing the end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2)
[[2, 4, 3, 5, 3, 2, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2, 3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2, remove_last=True)
[[2, 4, 3, 5, 3, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2]]
For Seq2seq
>>> input = [a, b, c]
>>> target = [x, y, z]
>>> decode_seq = [start_id, a, b] <-- sequences_add_start_id(input, start_id, True)
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
if remove_last:
sequences_out[i] = [start_id] + sequences[i][:-1]
else:
sequences_out[i] = [start_id] + sequences[i]
return sequences_out | python | def sequences_add_start_id(sequences, start_id=0, remove_last=False):
"""Add special start token(id) in the beginning of each sequence.
Parameters
------------
sequences : list of list of int
All sequences where each row is a sequence.
start_id : int
The start ID.
remove_last : boolean
Remove the last value of each sequences. Usually be used for removing the end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2)
[[2, 4, 3, 5, 3, 2, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2, 3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2, remove_last=True)
[[2, 4, 3, 5, 3, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2]]
For Seq2seq
>>> input = [a, b, c]
>>> target = [x, y, z]
>>> decode_seq = [start_id, a, b] <-- sequences_add_start_id(input, start_id, True)
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
if remove_last:
sequences_out[i] = [start_id] + sequences[i][:-1]
else:
sequences_out[i] = [start_id] + sequences[i]
return sequences_out | [
"def",
"sequences_add_start_id",
"(",
"sequences",
",",
"start_id",
"=",
"0",
",",
"remove_last",
"=",
"False",
")",
":",
"sequences_out",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"sequences",
")",
")",
"]",
"#[[]] * len(sequences)",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"if",
"remove_last",
":",
"sequences_out",
"[",
"i",
"]",
"=",
"[",
"start_id",
"]",
"+",
"sequences",
"[",
"i",
"]",
"[",
":",
"-",
"1",
"]",
"else",
":",
"sequences_out",
"[",
"i",
"]",
"=",
"[",
"start_id",
"]",
"+",
"sequences",
"[",
"i",
"]",
"return",
"sequences_out"
] | Add special start token(id) in the beginning of each sequence.
Parameters
------------
sequences : list of list of int
All sequences where each row is a sequence.
start_id : int
The start ID.
remove_last : boolean
Remove the last value of each sequences. Usually be used for removing the end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2)
[[2, 4, 3, 5, 3, 2, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2, 3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2, remove_last=True)
[[2, 4, 3, 5, 3, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2]]
For Seq2seq
>>> input = [a, b, c]
>>> target = [x, y, z]
>>> decode_seq = [start_id, a, b] <-- sequences_add_start_id(input, start_id, True) | [
"Add",
"special",
"start",
"token",
"(",
"id",
")",
"in",
"the",
"beginning",
"of",
"each",
"sequence",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3452-L3490 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | sequences_add_end_id | def sequences_add_end_id(sequences, end_id=888):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,3],[4,5,6,7]]
>>> print(sequences_add_end_id(sequences, end_id=999))
[[1, 2, 3, 999], [4, 5, 6, 999]]
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
sequences_out[i] = sequences[i] + [end_id]
return sequences_out | python | def sequences_add_end_id(sequences, end_id=888):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,3],[4,5,6,7]]
>>> print(sequences_add_end_id(sequences, end_id=999))
[[1, 2, 3, 999], [4, 5, 6, 999]]
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
sequences_out[i] = sequences[i] + [end_id]
return sequences_out | [
"def",
"sequences_add_end_id",
"(",
"sequences",
",",
"end_id",
"=",
"888",
")",
":",
"sequences_out",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"sequences",
")",
")",
"]",
"#[[]] * len(sequences)",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"sequences_out",
"[",
"i",
"]",
"=",
"sequences",
"[",
"i",
"]",
"+",
"[",
"end_id",
"]",
"return",
"sequences_out"
] | Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,3],[4,5,6,7]]
>>> print(sequences_add_end_id(sequences, end_id=999))
[[1, 2, 3, 999], [4, 5, 6, 999]] | [
"Add",
"special",
"end",
"token",
"(",
"id",
")",
"in",
"the",
"end",
"of",
"each",
"sequence",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3493-L3518 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | sequences_add_end_id_after_pad | def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]]
>>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0))
[[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]]
"""
# sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences)
sequences_out = copy.deepcopy(sequences)
# # add a pad to all
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i].append(pad_id)
# # pad -- > end
# max_len = 0
for i, v in enumerate(sequences):
for j, _v2 in enumerate(v):
if sequences[i][j] == pad_id:
sequences_out[i][j] = end_id
# if j > max_len:
# max_len = j
break
# # remove pad if too long
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i] = sequences_out[i][:max_len+1]
return sequences_out | python | def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]]
>>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0))
[[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]]
"""
# sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences)
sequences_out = copy.deepcopy(sequences)
# # add a pad to all
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i].append(pad_id)
# # pad -- > end
# max_len = 0
for i, v in enumerate(sequences):
for j, _v2 in enumerate(v):
if sequences[i][j] == pad_id:
sequences_out[i][j] = end_id
# if j > max_len:
# max_len = j
break
# # remove pad if too long
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i] = sequences_out[i][:max_len+1]
return sequences_out | [
"def",
"sequences_add_end_id_after_pad",
"(",
"sequences",
",",
"end_id",
"=",
"888",
",",
"pad_id",
"=",
"0",
")",
":",
"# sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences)",
"sequences_out",
"=",
"copy",
".",
"deepcopy",
"(",
"sequences",
")",
"# # add a pad to all",
"# for i in range(len(sequences)):",
"# for j in range(len(sequences[i])):",
"# sequences_out[i].append(pad_id)",
"# # pad -- > end",
"# max_len = 0",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"for",
"j",
",",
"_v2",
"in",
"enumerate",
"(",
"v",
")",
":",
"if",
"sequences",
"[",
"i",
"]",
"[",
"j",
"]",
"==",
"pad_id",
":",
"sequences_out",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"end_id",
"# if j > max_len:",
"# max_len = j",
"break",
"# # remove pad if too long",
"# for i in range(len(sequences)):",
"# for j in range(len(sequences[i])):",
"# sequences_out[i] = sequences_out[i][:max_len+1]",
"return",
"sequences_out"
] | Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]]
>>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0))
[[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]] | [
"Add",
"special",
"end",
"token",
"(",
"id",
")",
"in",
"the",
"end",
"of",
"each",
"sequence",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3521-L3567 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | sequences_get_mask | def sequences_get_mask(sequences, pad_val=0):
"""Return mask for sequences.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_val : int
The pad value.
Returns
----------
list of list of int
The mask.
Examples
---------
>>> sentences_ids = [[4, 0, 5, 3, 0, 0],
... [5, 3, 9, 4, 9, 0]]
>>> mask = sequences_get_mask(sentences_ids, pad_val=0)
[[1 1 1 1 0 0]
[1 1 1 1 1 0]]
"""
mask = np.ones_like(sequences)
for i, seq in enumerate(sequences):
for i_w in reversed(range(len(seq))):
if seq[i_w] == pad_val:
mask[i, i_w] = 0
else:
break # <-- exit the for loop, prepcess next sequence
return mask | python | def sequences_get_mask(sequences, pad_val=0):
"""Return mask for sequences.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_val : int
The pad value.
Returns
----------
list of list of int
The mask.
Examples
---------
>>> sentences_ids = [[4, 0, 5, 3, 0, 0],
... [5, 3, 9, 4, 9, 0]]
>>> mask = sequences_get_mask(sentences_ids, pad_val=0)
[[1 1 1 1 0 0]
[1 1 1 1 1 0]]
"""
mask = np.ones_like(sequences)
for i, seq in enumerate(sequences):
for i_w in reversed(range(len(seq))):
if seq[i_w] == pad_val:
mask[i, i_w] = 0
else:
break # <-- exit the for loop, prepcess next sequence
return mask | [
"def",
"sequences_get_mask",
"(",
"sequences",
",",
"pad_val",
"=",
"0",
")",
":",
"mask",
"=",
"np",
".",
"ones_like",
"(",
"sequences",
")",
"for",
"i",
",",
"seq",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"for",
"i_w",
"in",
"reversed",
"(",
"range",
"(",
"len",
"(",
"seq",
")",
")",
")",
":",
"if",
"seq",
"[",
"i_w",
"]",
"==",
"pad_val",
":",
"mask",
"[",
"i",
",",
"i_w",
"]",
"=",
"0",
"else",
":",
"break",
"# <-- exit the for loop, prepcess next sequence",
"return",
"mask"
] | Return mask for sequences.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_val : int
The pad value.
Returns
----------
list of list of int
The mask.
Examples
---------
>>> sentences_ids = [[4, 0, 5, 3, 0, 0],
... [5, 3, 9, 4, 9, 0]]
>>> mask = sequences_get_mask(sentences_ids, pad_val=0)
[[1 1 1 1 0 0]
[1 1 1 1 1 0]] | [
"Return",
"mask",
"for",
"sequences",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3570-L3601 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | keypoint_random_crop | def keypoint_random_crop(image, annos, mask=None, size=(368, 368)):
"""Randomly crop an image and corresponding keypoints without influence scales, given by ``keypoint_random_resize_shortestedge``.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
size : tuple of int
The size of returned image.
Returns
----------
preprocessed image, annotation, mask
"""
_target_height = size[0]
_target_width = size[1]
target_size = (_target_width, _target_height)
if len(np.shape(image)) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
height, width, _ = np.shape(image)
for _ in range(50):
x = random.randrange(0, width - target_size[0]) if width > target_size[0] else 0
y = random.randrange(0, height - target_size[1]) if height > target_size[1] else 0
# check whether any face is inside the box to generate a reasonably-balanced datasets
for joint in annos:
if x <= joint[0][0] < x + target_size[0] and y <= joint[0][1] < y + target_size[1]:
break
def pose_crop(image, annos, mask, x, y, w, h): # TODO : speed up with affine transform
# adjust image
target_size = (w, h)
img = image
resized = img[y:y + target_size[1], x:x + target_size[0], :]
resized_mask = mask[y:y + target_size[1], x:x + target_size[0]]
# adjust meta data
adjust_joint_list = []
for joint in annos:
adjust_joint = []
for point in joint:
if point[0] < -10 or point[1] < -10:
adjust_joint.append((-1000, -1000))
continue
new_x, new_y = point[0] - x, point[1] - y
# should not crop outside the image
if new_x > w - 1 or new_y > h - 1:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((new_x, new_y))
adjust_joint_list.append(adjust_joint)
return resized, adjust_joint_list, resized_mask
return pose_crop(image, annos, mask, x, y, target_size[0], target_size[1]) | python | def keypoint_random_crop(image, annos, mask=None, size=(368, 368)):
"""Randomly crop an image and corresponding keypoints without influence scales, given by ``keypoint_random_resize_shortestedge``.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
size : tuple of int
The size of returned image.
Returns
----------
preprocessed image, annotation, mask
"""
_target_height = size[0]
_target_width = size[1]
target_size = (_target_width, _target_height)
if len(np.shape(image)) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
height, width, _ = np.shape(image)
for _ in range(50):
x = random.randrange(0, width - target_size[0]) if width > target_size[0] else 0
y = random.randrange(0, height - target_size[1]) if height > target_size[1] else 0
# check whether any face is inside the box to generate a reasonably-balanced datasets
for joint in annos:
if x <= joint[0][0] < x + target_size[0] and y <= joint[0][1] < y + target_size[1]:
break
def pose_crop(image, annos, mask, x, y, w, h): # TODO : speed up with affine transform
# adjust image
target_size = (w, h)
img = image
resized = img[y:y + target_size[1], x:x + target_size[0], :]
resized_mask = mask[y:y + target_size[1], x:x + target_size[0]]
# adjust meta data
adjust_joint_list = []
for joint in annos:
adjust_joint = []
for point in joint:
if point[0] < -10 or point[1] < -10:
adjust_joint.append((-1000, -1000))
continue
new_x, new_y = point[0] - x, point[1] - y
# should not crop outside the image
if new_x > w - 1 or new_y > h - 1:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((new_x, new_y))
adjust_joint_list.append(adjust_joint)
return resized, adjust_joint_list, resized_mask
return pose_crop(image, annos, mask, x, y, target_size[0], target_size[1]) | [
"def",
"keypoint_random_crop",
"(",
"image",
",",
"annos",
",",
"mask",
"=",
"None",
",",
"size",
"=",
"(",
"368",
",",
"368",
")",
")",
":",
"_target_height",
"=",
"size",
"[",
"0",
"]",
"_target_width",
"=",
"size",
"[",
"1",
"]",
"target_size",
"=",
"(",
"_target_width",
",",
"_target_height",
")",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"image",
")",
")",
"==",
"2",
":",
"image",
"=",
"cv2",
".",
"cvtColor",
"(",
"image",
",",
"cv2",
".",
"COLOR_GRAY2RGB",
")",
"height",
",",
"width",
",",
"_",
"=",
"np",
".",
"shape",
"(",
"image",
")",
"for",
"_",
"in",
"range",
"(",
"50",
")",
":",
"x",
"=",
"random",
".",
"randrange",
"(",
"0",
",",
"width",
"-",
"target_size",
"[",
"0",
"]",
")",
"if",
"width",
">",
"target_size",
"[",
"0",
"]",
"else",
"0",
"y",
"=",
"random",
".",
"randrange",
"(",
"0",
",",
"height",
"-",
"target_size",
"[",
"1",
"]",
")",
"if",
"height",
">",
"target_size",
"[",
"1",
"]",
"else",
"0",
"# check whether any face is inside the box to generate a reasonably-balanced datasets",
"for",
"joint",
"in",
"annos",
":",
"if",
"x",
"<=",
"joint",
"[",
"0",
"]",
"[",
"0",
"]",
"<",
"x",
"+",
"target_size",
"[",
"0",
"]",
"and",
"y",
"<=",
"joint",
"[",
"0",
"]",
"[",
"1",
"]",
"<",
"y",
"+",
"target_size",
"[",
"1",
"]",
":",
"break",
"def",
"pose_crop",
"(",
"image",
",",
"annos",
",",
"mask",
",",
"x",
",",
"y",
",",
"w",
",",
"h",
")",
":",
"# TODO : speed up with affine transform",
"# adjust image",
"target_size",
"=",
"(",
"w",
",",
"h",
")",
"img",
"=",
"image",
"resized",
"=",
"img",
"[",
"y",
":",
"y",
"+",
"target_size",
"[",
"1",
"]",
",",
"x",
":",
"x",
"+",
"target_size",
"[",
"0",
"]",
",",
":",
"]",
"resized_mask",
"=",
"mask",
"[",
"y",
":",
"y",
"+",
"target_size",
"[",
"1",
"]",
",",
"x",
":",
"x",
"+",
"target_size",
"[",
"0",
"]",
"]",
"# adjust meta data",
"adjust_joint_list",
"=",
"[",
"]",
"for",
"joint",
"in",
"annos",
":",
"adjust_joint",
"=",
"[",
"]",
"for",
"point",
"in",
"joint",
":",
"if",
"point",
"[",
"0",
"]",
"<",
"-",
"10",
"or",
"point",
"[",
"1",
"]",
"<",
"-",
"10",
":",
"adjust_joint",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"new_x",
",",
"new_y",
"=",
"point",
"[",
"0",
"]",
"-",
"x",
",",
"point",
"[",
"1",
"]",
"-",
"y",
"# should not crop outside the image",
"if",
"new_x",
">",
"w",
"-",
"1",
"or",
"new_y",
">",
"h",
"-",
"1",
":",
"adjust_joint",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"adjust_joint",
".",
"append",
"(",
"(",
"new_x",
",",
"new_y",
")",
")",
"adjust_joint_list",
".",
"append",
"(",
"adjust_joint",
")",
"return",
"resized",
",",
"adjust_joint_list",
",",
"resized_mask",
"return",
"pose_crop",
"(",
"image",
",",
"annos",
",",
"mask",
",",
"x",
",",
"y",
",",
"target_size",
"[",
"0",
"]",
",",
"target_size",
"[",
"1",
"]",
")"
] | Randomly crop an image and corresponding keypoints without influence scales, given by ``keypoint_random_resize_shortestedge``.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
size : tuple of int
The size of returned image.
Returns
----------
preprocessed image, annotation, mask | [
"Randomly",
"crop",
"an",
"image",
"and",
"corresponding",
"keypoints",
"without",
"influence",
"scales",
"given",
"by",
"keypoint_random_resize_shortestedge",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3604-L3666 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | keypoint_resize_random_crop | def keypoint_resize_random_crop(image, annos, mask=None, size=(368, 368)):
"""Reszie the image to make either its width or height equals to the given sizes.
Then randomly crop image without influence scales.
Resize the image match with the minimum size before cropping, this API will change the zoom scale of object.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
size : tuple of int
The size (height, width) of returned image.
Returns
----------
preprocessed image, annos, mask
"""
if len(np.shape(image)) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
def resize_image(image, annos, mask, target_width, target_height):
"""Reszie image
Parameters
-----------
image : 3 channel image
The given image.
annos : list of list of floats
Keypoints of people
mask : single channel image or None
The mask if available.
target_width : int
Expected width of returned image.
target_height : int
Expected height of returned image.
Returns
----------
preprocessed input image, annos, mask
"""
y, x, _ = np.shape(image)
ratio_y = target_height / y
ratio_x = target_width / x
new_joints = []
# update meta
for people in annos:
new_keypoints = []
for keypoints in people:
if keypoints[0] < 0 or keypoints[1] < 0:
new_keypoints.append((-1000, -1000))
continue
pts = (int(keypoints[0] * ratio_x + 0.5), int(keypoints[1] * ratio_y + 0.5))
if pts[0] > target_width - 1 or pts[1] > target_height - 1:
new_keypoints.append((-1000, -1000))
continue
new_keypoints.append(pts)
new_joints.append(new_keypoints)
annos = new_joints
new_image = cv2.resize(image, (target_width, target_height), interpolation=cv2.INTER_AREA)
if mask is not None:
new_mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_AREA)
return new_image, annos, new_mask
else:
return new_image, annos, None
_target_height = size[0]
_target_width = size[1]
if len(np.shape(image)) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
height, width, _ = np.shape(image)
# print("the size of original img is:", height, width)
if height <= width:
ratio = _target_height / height
new_width = int(ratio * width)
if height == width:
new_width = _target_height
image, annos, mask = resize_image(image, annos, mask, new_width, _target_height)
# for i in annos:
# if len(i) is not 19:
# print('Joints of person is not 19 ERROR FROM RESIZE')
if new_width > _target_width:
crop_range_x = np.random.randint(0, new_width - _target_width)
else:
crop_range_x = 0
image = image[:, crop_range_x:crop_range_x + _target_width, :]
if mask is not None:
mask = mask[:, crop_range_x:crop_range_x + _target_width]
# joint_list= []
new_joints = []
#annos-pepople-joints (must be 19 or [])
for people in annos:
# print("number of keypoints is", np.shape(people))
new_keypoints = []
for keypoints in people:
if keypoints[0] < -10 or keypoints[1] < -10:
new_keypoints.append((-1000, -1000))
continue
top = crop_range_x + _target_width - 1
if keypoints[0] >= crop_range_x and keypoints[0] <= top:
# pts = (keypoints[0]-crop_range_x, keypoints[1])
pts = (int(keypoints[0] - crop_range_x), int(keypoints[1]))
else:
pts = (-1000, -1000)
new_keypoints.append(pts)
new_joints.append(new_keypoints)
# if len(new_keypoints) != 19:
# print('1:The Length of joints list should be 0 or 19 but actually:', len(new_keypoints))
annos = new_joints
if height > width:
ratio = _target_width / width
new_height = int(ratio * height)
image, annos, mask = resize_image(image, annos, mask, _target_width, new_height)
# for i in annos:
# if len(i) is not 19:
# print('Joints of person is not 19 ERROR')
if new_height > _target_height:
crop_range_y = np.random.randint(0, new_height - _target_height)
else:
crop_range_y = 0
image = image[crop_range_y:crop_range_y + _target_width, :, :]
if mask is not None:
mask = mask[crop_range_y:crop_range_y + _target_width, :]
new_joints = []
for people in annos: # TODO : speed up with affine transform
new_keypoints = []
for keypoints in people:
# case orginal points are not usable
if keypoints[0] < 0 or keypoints[1] < 0:
new_keypoints.append((-1000, -1000))
continue
# y axis coordinate change
bot = crop_range_y + _target_height - 1
if keypoints[1] >= crop_range_y and keypoints[1] <= bot:
# pts = (keypoints[0], keypoints[1]-crop_range_y)
pts = (int(keypoints[0]), int(keypoints[1] - crop_range_y))
# if pts[0]>367 or pts[1]>367:
# print('Error2')
else:
pts = (-1000, -1000)
new_keypoints.append(pts)
new_joints.append(new_keypoints)
# if len(new_keypoints) != 19:
# print('2:The Length of joints list should be 0 or 19 but actually:', len(new_keypoints))
annos = new_joints
# mask = cv2.resize(mask, (46, 46), interpolation=cv2.INTER_AREA)
if mask is not None:
return image, annos, mask
else:
return image, annos, None | python | def keypoint_resize_random_crop(image, annos, mask=None, size=(368, 368)):
"""Reszie the image to make either its width or height equals to the given sizes.
Then randomly crop image without influence scales.
Resize the image match with the minimum size before cropping, this API will change the zoom scale of object.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
size : tuple of int
The size (height, width) of returned image.
Returns
----------
preprocessed image, annos, mask
"""
if len(np.shape(image)) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
def resize_image(image, annos, mask, target_width, target_height):
"""Reszie image
Parameters
-----------
image : 3 channel image
The given image.
annos : list of list of floats
Keypoints of people
mask : single channel image or None
The mask if available.
target_width : int
Expected width of returned image.
target_height : int
Expected height of returned image.
Returns
----------
preprocessed input image, annos, mask
"""
y, x, _ = np.shape(image)
ratio_y = target_height / y
ratio_x = target_width / x
new_joints = []
# update meta
for people in annos:
new_keypoints = []
for keypoints in people:
if keypoints[0] < 0 or keypoints[1] < 0:
new_keypoints.append((-1000, -1000))
continue
pts = (int(keypoints[0] * ratio_x + 0.5), int(keypoints[1] * ratio_y + 0.5))
if pts[0] > target_width - 1 or pts[1] > target_height - 1:
new_keypoints.append((-1000, -1000))
continue
new_keypoints.append(pts)
new_joints.append(new_keypoints)
annos = new_joints
new_image = cv2.resize(image, (target_width, target_height), interpolation=cv2.INTER_AREA)
if mask is not None:
new_mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_AREA)
return new_image, annos, new_mask
else:
return new_image, annos, None
_target_height = size[0]
_target_width = size[1]
if len(np.shape(image)) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
height, width, _ = np.shape(image)
# print("the size of original img is:", height, width)
if height <= width:
ratio = _target_height / height
new_width = int(ratio * width)
if height == width:
new_width = _target_height
image, annos, mask = resize_image(image, annos, mask, new_width, _target_height)
# for i in annos:
# if len(i) is not 19:
# print('Joints of person is not 19 ERROR FROM RESIZE')
if new_width > _target_width:
crop_range_x = np.random.randint(0, new_width - _target_width)
else:
crop_range_x = 0
image = image[:, crop_range_x:crop_range_x + _target_width, :]
if mask is not None:
mask = mask[:, crop_range_x:crop_range_x + _target_width]
# joint_list= []
new_joints = []
#annos-pepople-joints (must be 19 or [])
for people in annos:
# print("number of keypoints is", np.shape(people))
new_keypoints = []
for keypoints in people:
if keypoints[0] < -10 or keypoints[1] < -10:
new_keypoints.append((-1000, -1000))
continue
top = crop_range_x + _target_width - 1
if keypoints[0] >= crop_range_x and keypoints[0] <= top:
# pts = (keypoints[0]-crop_range_x, keypoints[1])
pts = (int(keypoints[0] - crop_range_x), int(keypoints[1]))
else:
pts = (-1000, -1000)
new_keypoints.append(pts)
new_joints.append(new_keypoints)
# if len(new_keypoints) != 19:
# print('1:The Length of joints list should be 0 or 19 but actually:', len(new_keypoints))
annos = new_joints
if height > width:
ratio = _target_width / width
new_height = int(ratio * height)
image, annos, mask = resize_image(image, annos, mask, _target_width, new_height)
# for i in annos:
# if len(i) is not 19:
# print('Joints of person is not 19 ERROR')
if new_height > _target_height:
crop_range_y = np.random.randint(0, new_height - _target_height)
else:
crop_range_y = 0
image = image[crop_range_y:crop_range_y + _target_width, :, :]
if mask is not None:
mask = mask[crop_range_y:crop_range_y + _target_width, :]
new_joints = []
for people in annos: # TODO : speed up with affine transform
new_keypoints = []
for keypoints in people:
# case orginal points are not usable
if keypoints[0] < 0 or keypoints[1] < 0:
new_keypoints.append((-1000, -1000))
continue
# y axis coordinate change
bot = crop_range_y + _target_height - 1
if keypoints[1] >= crop_range_y and keypoints[1] <= bot:
# pts = (keypoints[0], keypoints[1]-crop_range_y)
pts = (int(keypoints[0]), int(keypoints[1] - crop_range_y))
# if pts[0]>367 or pts[1]>367:
# print('Error2')
else:
pts = (-1000, -1000)
new_keypoints.append(pts)
new_joints.append(new_keypoints)
# if len(new_keypoints) != 19:
# print('2:The Length of joints list should be 0 or 19 but actually:', len(new_keypoints))
annos = new_joints
# mask = cv2.resize(mask, (46, 46), interpolation=cv2.INTER_AREA)
if mask is not None:
return image, annos, mask
else:
return image, annos, None | [
"def",
"keypoint_resize_random_crop",
"(",
"image",
",",
"annos",
",",
"mask",
"=",
"None",
",",
"size",
"=",
"(",
"368",
",",
"368",
")",
")",
":",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"image",
")",
")",
"==",
"2",
":",
"image",
"=",
"cv2",
".",
"cvtColor",
"(",
"image",
",",
"cv2",
".",
"COLOR_GRAY2RGB",
")",
"def",
"resize_image",
"(",
"image",
",",
"annos",
",",
"mask",
",",
"target_width",
",",
"target_height",
")",
":",
"\"\"\"Reszie image\n\n Parameters\n -----------\n image : 3 channel image\n The given image.\n annos : list of list of floats\n Keypoints of people\n mask : single channel image or None\n The mask if available.\n target_width : int\n Expected width of returned image.\n target_height : int\n Expected height of returned image.\n\n Returns\n ----------\n preprocessed input image, annos, mask\n\n \"\"\"",
"y",
",",
"x",
",",
"_",
"=",
"np",
".",
"shape",
"(",
"image",
")",
"ratio_y",
"=",
"target_height",
"/",
"y",
"ratio_x",
"=",
"target_width",
"/",
"x",
"new_joints",
"=",
"[",
"]",
"# update meta",
"for",
"people",
"in",
"annos",
":",
"new_keypoints",
"=",
"[",
"]",
"for",
"keypoints",
"in",
"people",
":",
"if",
"keypoints",
"[",
"0",
"]",
"<",
"0",
"or",
"keypoints",
"[",
"1",
"]",
"<",
"0",
":",
"new_keypoints",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"pts",
"=",
"(",
"int",
"(",
"keypoints",
"[",
"0",
"]",
"*",
"ratio_x",
"+",
"0.5",
")",
",",
"int",
"(",
"keypoints",
"[",
"1",
"]",
"*",
"ratio_y",
"+",
"0.5",
")",
")",
"if",
"pts",
"[",
"0",
"]",
">",
"target_width",
"-",
"1",
"or",
"pts",
"[",
"1",
"]",
">",
"target_height",
"-",
"1",
":",
"new_keypoints",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"new_keypoints",
".",
"append",
"(",
"pts",
")",
"new_joints",
".",
"append",
"(",
"new_keypoints",
")",
"annos",
"=",
"new_joints",
"new_image",
"=",
"cv2",
".",
"resize",
"(",
"image",
",",
"(",
"target_width",
",",
"target_height",
")",
",",
"interpolation",
"=",
"cv2",
".",
"INTER_AREA",
")",
"if",
"mask",
"is",
"not",
"None",
":",
"new_mask",
"=",
"cv2",
".",
"resize",
"(",
"mask",
",",
"(",
"target_width",
",",
"target_height",
")",
",",
"interpolation",
"=",
"cv2",
".",
"INTER_AREA",
")",
"return",
"new_image",
",",
"annos",
",",
"new_mask",
"else",
":",
"return",
"new_image",
",",
"annos",
",",
"None",
"_target_height",
"=",
"size",
"[",
"0",
"]",
"_target_width",
"=",
"size",
"[",
"1",
"]",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"image",
")",
")",
"==",
"2",
":",
"image",
"=",
"cv2",
".",
"cvtColor",
"(",
"image",
",",
"cv2",
".",
"COLOR_GRAY2RGB",
")",
"height",
",",
"width",
",",
"_",
"=",
"np",
".",
"shape",
"(",
"image",
")",
"# print(\"the size of original img is:\", height, width)",
"if",
"height",
"<=",
"width",
":",
"ratio",
"=",
"_target_height",
"/",
"height",
"new_width",
"=",
"int",
"(",
"ratio",
"*",
"width",
")",
"if",
"height",
"==",
"width",
":",
"new_width",
"=",
"_target_height",
"image",
",",
"annos",
",",
"mask",
"=",
"resize_image",
"(",
"image",
",",
"annos",
",",
"mask",
",",
"new_width",
",",
"_target_height",
")",
"# for i in annos:",
"# if len(i) is not 19:",
"# print('Joints of person is not 19 ERROR FROM RESIZE')",
"if",
"new_width",
">",
"_target_width",
":",
"crop_range_x",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"new_width",
"-",
"_target_width",
")",
"else",
":",
"crop_range_x",
"=",
"0",
"image",
"=",
"image",
"[",
":",
",",
"crop_range_x",
":",
"crop_range_x",
"+",
"_target_width",
",",
":",
"]",
"if",
"mask",
"is",
"not",
"None",
":",
"mask",
"=",
"mask",
"[",
":",
",",
"crop_range_x",
":",
"crop_range_x",
"+",
"_target_width",
"]",
"# joint_list= []",
"new_joints",
"=",
"[",
"]",
"#annos-pepople-joints (must be 19 or [])",
"for",
"people",
"in",
"annos",
":",
"# print(\"number of keypoints is\", np.shape(people))",
"new_keypoints",
"=",
"[",
"]",
"for",
"keypoints",
"in",
"people",
":",
"if",
"keypoints",
"[",
"0",
"]",
"<",
"-",
"10",
"or",
"keypoints",
"[",
"1",
"]",
"<",
"-",
"10",
":",
"new_keypoints",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"top",
"=",
"crop_range_x",
"+",
"_target_width",
"-",
"1",
"if",
"keypoints",
"[",
"0",
"]",
">=",
"crop_range_x",
"and",
"keypoints",
"[",
"0",
"]",
"<=",
"top",
":",
"# pts = (keypoints[0]-crop_range_x, keypoints[1])",
"pts",
"=",
"(",
"int",
"(",
"keypoints",
"[",
"0",
"]",
"-",
"crop_range_x",
")",
",",
"int",
"(",
"keypoints",
"[",
"1",
"]",
")",
")",
"else",
":",
"pts",
"=",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
"new_keypoints",
".",
"append",
"(",
"pts",
")",
"new_joints",
".",
"append",
"(",
"new_keypoints",
")",
"# if len(new_keypoints) != 19:",
"# print('1:The Length of joints list should be 0 or 19 but actually:', len(new_keypoints))",
"annos",
"=",
"new_joints",
"if",
"height",
">",
"width",
":",
"ratio",
"=",
"_target_width",
"/",
"width",
"new_height",
"=",
"int",
"(",
"ratio",
"*",
"height",
")",
"image",
",",
"annos",
",",
"mask",
"=",
"resize_image",
"(",
"image",
",",
"annos",
",",
"mask",
",",
"_target_width",
",",
"new_height",
")",
"# for i in annos:",
"# if len(i) is not 19:",
"# print('Joints of person is not 19 ERROR')",
"if",
"new_height",
">",
"_target_height",
":",
"crop_range_y",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"new_height",
"-",
"_target_height",
")",
"else",
":",
"crop_range_y",
"=",
"0",
"image",
"=",
"image",
"[",
"crop_range_y",
":",
"crop_range_y",
"+",
"_target_width",
",",
":",
",",
":",
"]",
"if",
"mask",
"is",
"not",
"None",
":",
"mask",
"=",
"mask",
"[",
"crop_range_y",
":",
"crop_range_y",
"+",
"_target_width",
",",
":",
"]",
"new_joints",
"=",
"[",
"]",
"for",
"people",
"in",
"annos",
":",
"# TODO : speed up with affine transform",
"new_keypoints",
"=",
"[",
"]",
"for",
"keypoints",
"in",
"people",
":",
"# case orginal points are not usable",
"if",
"keypoints",
"[",
"0",
"]",
"<",
"0",
"or",
"keypoints",
"[",
"1",
"]",
"<",
"0",
":",
"new_keypoints",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"# y axis coordinate change",
"bot",
"=",
"crop_range_y",
"+",
"_target_height",
"-",
"1",
"if",
"keypoints",
"[",
"1",
"]",
">=",
"crop_range_y",
"and",
"keypoints",
"[",
"1",
"]",
"<=",
"bot",
":",
"# pts = (keypoints[0], keypoints[1]-crop_range_y)",
"pts",
"=",
"(",
"int",
"(",
"keypoints",
"[",
"0",
"]",
")",
",",
"int",
"(",
"keypoints",
"[",
"1",
"]",
"-",
"crop_range_y",
")",
")",
"# if pts[0]>367 or pts[1]>367:",
"# print('Error2')",
"else",
":",
"pts",
"=",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
"new_keypoints",
".",
"append",
"(",
"pts",
")",
"new_joints",
".",
"append",
"(",
"new_keypoints",
")",
"# if len(new_keypoints) != 19:",
"# print('2:The Length of joints list should be 0 or 19 but actually:', len(new_keypoints))",
"annos",
"=",
"new_joints",
"# mask = cv2.resize(mask, (46, 46), interpolation=cv2.INTER_AREA)",
"if",
"mask",
"is",
"not",
"None",
":",
"return",
"image",
",",
"annos",
",",
"mask",
"else",
":",
"return",
"image",
",",
"annos",
",",
"None"
] | Reszie the image to make either its width or height equals to the given sizes.
Then randomly crop image without influence scales.
Resize the image match with the minimum size before cropping, this API will change the zoom scale of object.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
size : tuple of int
The size (height, width) of returned image.
Returns
----------
preprocessed image, annos, mask | [
"Reszie",
"the",
"image",
"to",
"make",
"either",
"its",
"width",
"or",
"height",
"equals",
"to",
"the",
"given",
"sizes",
".",
"Then",
"randomly",
"crop",
"image",
"without",
"influence",
"scales",
".",
"Resize",
"the",
"image",
"match",
"with",
"the",
"minimum",
"size",
"before",
"cropping",
"this",
"API",
"will",
"change",
"the",
"zoom",
"scale",
"of",
"object",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3669-L3841 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | keypoint_random_rotate | def keypoint_random_rotate(image, annos, mask=None, rg=15.):
"""Rotate an image and corresponding keypoints.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
rg : int or float
Degree to rotate, usually 0 ~ 180.
Returns
----------
preprocessed image, annos, mask
"""
def _rotate_coord(shape, newxy, point, angle):
angle = -1 * angle / 180.0 * math.pi
ox, oy = shape
px, py = point
ox /= 2
oy /= 2
qx = math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
new_x, new_y = newxy
qx += ox - new_x
qy += oy - new_y
return int(qx + 0.5), int(qy + 0.5)
def _largest_rotated_rect(w, h, angle):
"""
Get largest rectangle after rotation.
http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
angle = angle / 180.0 * math.pi
if w <= 0 or h <= 0:
return 0, 0
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angle and 180-angle are all the same,
# if suffices to look at the first quadrant and the absolute values of sin,cos:
sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))
if side_short <= 2. * sin_a * cos_a * side_long:
# half constrained case: two crop corners touch the longer side,
# the other two corners are on the mid-line parallel to the longer line
x = 0.5 * side_short
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
else:
# fully constrained case: crop touches all 4 sides
cos_2a = cos_a * cos_a - sin_a * sin_a
wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a
return int(np.round(wr)), int(np.round(hr))
img_shape = np.shape(image)
height = img_shape[0]
width = img_shape[1]
deg = np.random.uniform(-rg, rg)
img = image
center = (img.shape[1] * 0.5, img.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(img, rot_m, img.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
neww, newh = _largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
img = ret[newy:newy + newh, newx:newx + neww]
# adjust meta data
adjust_joint_list = []
for joint in annos: # TODO : speed up with affine transform
adjust_joint = []
for point in joint:
if point[0] < -100 or point[1] < -100:
adjust_joint.append((-1000, -1000))
continue
x, y = _rotate_coord((width, height), (newx, newy), point, deg)
if x > neww - 1 or y > newh - 1:
adjust_joint.append((-1000, -1000))
continue
if x < 0 or y < 0:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((x, y))
adjust_joint_list.append(adjust_joint)
joint_list = adjust_joint_list
if mask is not None:
msk = mask
center = (msk.shape[1] * 0.5, msk.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(msk, rot_m, msk.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if msk.ndim == 3 and msk.ndim == 2:
ret = ret[:, :, np.newaxis]
neww, newh = _largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
msk = ret[newy:newy + newh, newx:newx + neww]
return img, joint_list, msk
else:
return img, joint_list, None | python | def keypoint_random_rotate(image, annos, mask=None, rg=15.):
"""Rotate an image and corresponding keypoints.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
rg : int or float
Degree to rotate, usually 0 ~ 180.
Returns
----------
preprocessed image, annos, mask
"""
def _rotate_coord(shape, newxy, point, angle):
angle = -1 * angle / 180.0 * math.pi
ox, oy = shape
px, py = point
ox /= 2
oy /= 2
qx = math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
new_x, new_y = newxy
qx += ox - new_x
qy += oy - new_y
return int(qx + 0.5), int(qy + 0.5)
def _largest_rotated_rect(w, h, angle):
"""
Get largest rectangle after rotation.
http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
angle = angle / 180.0 * math.pi
if w <= 0 or h <= 0:
return 0, 0
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angle and 180-angle are all the same,
# if suffices to look at the first quadrant and the absolute values of sin,cos:
sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))
if side_short <= 2. * sin_a * cos_a * side_long:
# half constrained case: two crop corners touch the longer side,
# the other two corners are on the mid-line parallel to the longer line
x = 0.5 * side_short
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
else:
# fully constrained case: crop touches all 4 sides
cos_2a = cos_a * cos_a - sin_a * sin_a
wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a
return int(np.round(wr)), int(np.round(hr))
img_shape = np.shape(image)
height = img_shape[0]
width = img_shape[1]
deg = np.random.uniform(-rg, rg)
img = image
center = (img.shape[1] * 0.5, img.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(img, rot_m, img.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
neww, newh = _largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
img = ret[newy:newy + newh, newx:newx + neww]
# adjust meta data
adjust_joint_list = []
for joint in annos: # TODO : speed up with affine transform
adjust_joint = []
for point in joint:
if point[0] < -100 or point[1] < -100:
adjust_joint.append((-1000, -1000))
continue
x, y = _rotate_coord((width, height), (newx, newy), point, deg)
if x > neww - 1 or y > newh - 1:
adjust_joint.append((-1000, -1000))
continue
if x < 0 or y < 0:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((x, y))
adjust_joint_list.append(adjust_joint)
joint_list = adjust_joint_list
if mask is not None:
msk = mask
center = (msk.shape[1] * 0.5, msk.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(msk, rot_m, msk.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if msk.ndim == 3 and msk.ndim == 2:
ret = ret[:, :, np.newaxis]
neww, newh = _largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
msk = ret[newy:newy + newh, newx:newx + neww]
return img, joint_list, msk
else:
return img, joint_list, None | [
"def",
"keypoint_random_rotate",
"(",
"image",
",",
"annos",
",",
"mask",
"=",
"None",
",",
"rg",
"=",
"15.",
")",
":",
"def",
"_rotate_coord",
"(",
"shape",
",",
"newxy",
",",
"point",
",",
"angle",
")",
":",
"angle",
"=",
"-",
"1",
"*",
"angle",
"/",
"180.0",
"*",
"math",
".",
"pi",
"ox",
",",
"oy",
"=",
"shape",
"px",
",",
"py",
"=",
"point",
"ox",
"/=",
"2",
"oy",
"/=",
"2",
"qx",
"=",
"math",
".",
"cos",
"(",
"angle",
")",
"*",
"(",
"px",
"-",
"ox",
")",
"-",
"math",
".",
"sin",
"(",
"angle",
")",
"*",
"(",
"py",
"-",
"oy",
")",
"qy",
"=",
"math",
".",
"sin",
"(",
"angle",
")",
"*",
"(",
"px",
"-",
"ox",
")",
"+",
"math",
".",
"cos",
"(",
"angle",
")",
"*",
"(",
"py",
"-",
"oy",
")",
"new_x",
",",
"new_y",
"=",
"newxy",
"qx",
"+=",
"ox",
"-",
"new_x",
"qy",
"+=",
"oy",
"-",
"new_y",
"return",
"int",
"(",
"qx",
"+",
"0.5",
")",
",",
"int",
"(",
"qy",
"+",
"0.5",
")",
"def",
"_largest_rotated_rect",
"(",
"w",
",",
"h",
",",
"angle",
")",
":",
"\"\"\"\n Get largest rectangle after rotation.\n http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders\n \"\"\"",
"angle",
"=",
"angle",
"/",
"180.0",
"*",
"math",
".",
"pi",
"if",
"w",
"<=",
"0",
"or",
"h",
"<=",
"0",
":",
"return",
"0",
",",
"0",
"width_is_longer",
"=",
"w",
">=",
"h",
"side_long",
",",
"side_short",
"=",
"(",
"w",
",",
"h",
")",
"if",
"width_is_longer",
"else",
"(",
"h",
",",
"w",
")",
"# since the solutions for angle, -angle and 180-angle are all the same,",
"# if suffices to look at the first quadrant and the absolute values of sin,cos:",
"sin_a",
",",
"cos_a",
"=",
"abs",
"(",
"math",
".",
"sin",
"(",
"angle",
")",
")",
",",
"abs",
"(",
"math",
".",
"cos",
"(",
"angle",
")",
")",
"if",
"side_short",
"<=",
"2.",
"*",
"sin_a",
"*",
"cos_a",
"*",
"side_long",
":",
"# half constrained case: two crop corners touch the longer side,",
"# the other two corners are on the mid-line parallel to the longer line",
"x",
"=",
"0.5",
"*",
"side_short",
"wr",
",",
"hr",
"=",
"(",
"x",
"/",
"sin_a",
",",
"x",
"/",
"cos_a",
")",
"if",
"width_is_longer",
"else",
"(",
"x",
"/",
"cos_a",
",",
"x",
"/",
"sin_a",
")",
"else",
":",
"# fully constrained case: crop touches all 4 sides",
"cos_2a",
"=",
"cos_a",
"*",
"cos_a",
"-",
"sin_a",
"*",
"sin_a",
"wr",
",",
"hr",
"=",
"(",
"w",
"*",
"cos_a",
"-",
"h",
"*",
"sin_a",
")",
"/",
"cos_2a",
",",
"(",
"h",
"*",
"cos_a",
"-",
"w",
"*",
"sin_a",
")",
"/",
"cos_2a",
"return",
"int",
"(",
"np",
".",
"round",
"(",
"wr",
")",
")",
",",
"int",
"(",
"np",
".",
"round",
"(",
"hr",
")",
")",
"img_shape",
"=",
"np",
".",
"shape",
"(",
"image",
")",
"height",
"=",
"img_shape",
"[",
"0",
"]",
"width",
"=",
"img_shape",
"[",
"1",
"]",
"deg",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"rg",
",",
"rg",
")",
"img",
"=",
"image",
"center",
"=",
"(",
"img",
".",
"shape",
"[",
"1",
"]",
"*",
"0.5",
",",
"img",
".",
"shape",
"[",
"0",
"]",
"*",
"0.5",
")",
"# x, y",
"rot_m",
"=",
"cv2",
".",
"getRotationMatrix2D",
"(",
"(",
"int",
"(",
"center",
"[",
"0",
"]",
")",
",",
"int",
"(",
"center",
"[",
"1",
"]",
")",
")",
",",
"deg",
",",
"1",
")",
"ret",
"=",
"cv2",
".",
"warpAffine",
"(",
"img",
",",
"rot_m",
",",
"img",
".",
"shape",
"[",
"1",
":",
":",
"-",
"1",
"]",
",",
"flags",
"=",
"cv2",
".",
"INTER_AREA",
",",
"borderMode",
"=",
"cv2",
".",
"BORDER_CONSTANT",
")",
"if",
"img",
".",
"ndim",
"==",
"3",
"and",
"ret",
".",
"ndim",
"==",
"2",
":",
"ret",
"=",
"ret",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"neww",
",",
"newh",
"=",
"_largest_rotated_rect",
"(",
"ret",
".",
"shape",
"[",
"1",
"]",
",",
"ret",
".",
"shape",
"[",
"0",
"]",
",",
"deg",
")",
"neww",
"=",
"min",
"(",
"neww",
",",
"ret",
".",
"shape",
"[",
"1",
"]",
")",
"newh",
"=",
"min",
"(",
"newh",
",",
"ret",
".",
"shape",
"[",
"0",
"]",
")",
"newx",
"=",
"int",
"(",
"center",
"[",
"0",
"]",
"-",
"neww",
"*",
"0.5",
")",
"newy",
"=",
"int",
"(",
"center",
"[",
"1",
"]",
"-",
"newh",
"*",
"0.5",
")",
"# print(ret.shape, deg, newx, newy, neww, newh)",
"img",
"=",
"ret",
"[",
"newy",
":",
"newy",
"+",
"newh",
",",
"newx",
":",
"newx",
"+",
"neww",
"]",
"# adjust meta data",
"adjust_joint_list",
"=",
"[",
"]",
"for",
"joint",
"in",
"annos",
":",
"# TODO : speed up with affine transform",
"adjust_joint",
"=",
"[",
"]",
"for",
"point",
"in",
"joint",
":",
"if",
"point",
"[",
"0",
"]",
"<",
"-",
"100",
"or",
"point",
"[",
"1",
"]",
"<",
"-",
"100",
":",
"adjust_joint",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"x",
",",
"y",
"=",
"_rotate_coord",
"(",
"(",
"width",
",",
"height",
")",
",",
"(",
"newx",
",",
"newy",
")",
",",
"point",
",",
"deg",
")",
"if",
"x",
">",
"neww",
"-",
"1",
"or",
"y",
">",
"newh",
"-",
"1",
":",
"adjust_joint",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"if",
"x",
"<",
"0",
"or",
"y",
"<",
"0",
":",
"adjust_joint",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"adjust_joint",
".",
"append",
"(",
"(",
"x",
",",
"y",
")",
")",
"adjust_joint_list",
".",
"append",
"(",
"adjust_joint",
")",
"joint_list",
"=",
"adjust_joint_list",
"if",
"mask",
"is",
"not",
"None",
":",
"msk",
"=",
"mask",
"center",
"=",
"(",
"msk",
".",
"shape",
"[",
"1",
"]",
"*",
"0.5",
",",
"msk",
".",
"shape",
"[",
"0",
"]",
"*",
"0.5",
")",
"# x, y",
"rot_m",
"=",
"cv2",
".",
"getRotationMatrix2D",
"(",
"(",
"int",
"(",
"center",
"[",
"0",
"]",
")",
",",
"int",
"(",
"center",
"[",
"1",
"]",
")",
")",
",",
"deg",
",",
"1",
")",
"ret",
"=",
"cv2",
".",
"warpAffine",
"(",
"msk",
",",
"rot_m",
",",
"msk",
".",
"shape",
"[",
"1",
":",
":",
"-",
"1",
"]",
",",
"flags",
"=",
"cv2",
".",
"INTER_AREA",
",",
"borderMode",
"=",
"cv2",
".",
"BORDER_CONSTANT",
")",
"if",
"msk",
".",
"ndim",
"==",
"3",
"and",
"msk",
".",
"ndim",
"==",
"2",
":",
"ret",
"=",
"ret",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"neww",
",",
"newh",
"=",
"_largest_rotated_rect",
"(",
"ret",
".",
"shape",
"[",
"1",
"]",
",",
"ret",
".",
"shape",
"[",
"0",
"]",
",",
"deg",
")",
"neww",
"=",
"min",
"(",
"neww",
",",
"ret",
".",
"shape",
"[",
"1",
"]",
")",
"newh",
"=",
"min",
"(",
"newh",
",",
"ret",
".",
"shape",
"[",
"0",
"]",
")",
"newx",
"=",
"int",
"(",
"center",
"[",
"0",
"]",
"-",
"neww",
"*",
"0.5",
")",
"newy",
"=",
"int",
"(",
"center",
"[",
"1",
"]",
"-",
"newh",
"*",
"0.5",
")",
"# print(ret.shape, deg, newx, newy, neww, newh)",
"msk",
"=",
"ret",
"[",
"newy",
":",
"newy",
"+",
"newh",
",",
"newx",
":",
"newx",
"+",
"neww",
"]",
"return",
"img",
",",
"joint_list",
",",
"msk",
"else",
":",
"return",
"img",
",",
"joint_list",
",",
"None"
] | Rotate an image and corresponding keypoints.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
rg : int or float
Degree to rotate, usually 0 ~ 180.
Returns
----------
preprocessed image, annos, mask | [
"Rotate",
"an",
"image",
"and",
"corresponding",
"keypoints",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3844-L3959 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | keypoint_random_flip | def keypoint_random_flip(
image, annos, mask=None, prob=0.5, flip_list=(0, 1, 5, 6, 7, 2, 3, 4, 11, 12, 13, 8, 9, 10, 15, 14, 17, 16, 18)
):
"""Flip an image and corresponding keypoints.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
prob : float, 0 to 1
The probability to flip the image, if 1, always flip the image.
flip_list : tuple of int
Denotes how the keypoints number be changed after flipping which is required for pose estimation task.
The left and right body should be maintained rather than switch.
(Default COCO format).
Set to an empty tuple if you don't need to maintain left and right information.
Returns
----------
preprocessed image, annos, mask
"""
_prob = np.random.uniform(0, 1.0)
if _prob < prob:
return image, annos, mask
_, width, _ = np.shape(image)
image = cv2.flip(image, 1)
mask = cv2.flip(mask, 1)
new_joints = []
for people in annos: # TODO : speed up with affine transform
new_keypoints = []
for k in flip_list:
point = people[k]
if point[0] < 0 or point[1] < 0:
new_keypoints.append((-1000, -1000))
continue
if point[0] > image.shape[1] - 1 or point[1] > image.shape[0] - 1:
new_keypoints.append((-1000, -1000))
continue
if (width - point[0]) > image.shape[1] - 1:
new_keypoints.append((-1000, -1000))
continue
new_keypoints.append((width - point[0], point[1]))
new_joints.append(new_keypoints)
annos = new_joints
return image, annos, mask | python | def keypoint_random_flip(
image, annos, mask=None, prob=0.5, flip_list=(0, 1, 5, 6, 7, 2, 3, 4, 11, 12, 13, 8, 9, 10, 15, 14, 17, 16, 18)
):
"""Flip an image and corresponding keypoints.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
prob : float, 0 to 1
The probability to flip the image, if 1, always flip the image.
flip_list : tuple of int
Denotes how the keypoints number be changed after flipping which is required for pose estimation task.
The left and right body should be maintained rather than switch.
(Default COCO format).
Set to an empty tuple if you don't need to maintain left and right information.
Returns
----------
preprocessed image, annos, mask
"""
_prob = np.random.uniform(0, 1.0)
if _prob < prob:
return image, annos, mask
_, width, _ = np.shape(image)
image = cv2.flip(image, 1)
mask = cv2.flip(mask, 1)
new_joints = []
for people in annos: # TODO : speed up with affine transform
new_keypoints = []
for k in flip_list:
point = people[k]
if point[0] < 0 or point[1] < 0:
new_keypoints.append((-1000, -1000))
continue
if point[0] > image.shape[1] - 1 or point[1] > image.shape[0] - 1:
new_keypoints.append((-1000, -1000))
continue
if (width - point[0]) > image.shape[1] - 1:
new_keypoints.append((-1000, -1000))
continue
new_keypoints.append((width - point[0], point[1]))
new_joints.append(new_keypoints)
annos = new_joints
return image, annos, mask | [
"def",
"keypoint_random_flip",
"(",
"image",
",",
"annos",
",",
"mask",
"=",
"None",
",",
"prob",
"=",
"0.5",
",",
"flip_list",
"=",
"(",
"0",
",",
"1",
",",
"5",
",",
"6",
",",
"7",
",",
"2",
",",
"3",
",",
"4",
",",
"11",
",",
"12",
",",
"13",
",",
"8",
",",
"9",
",",
"10",
",",
"15",
",",
"14",
",",
"17",
",",
"16",
",",
"18",
")",
")",
":",
"_prob",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"1.0",
")",
"if",
"_prob",
"<",
"prob",
":",
"return",
"image",
",",
"annos",
",",
"mask",
"_",
",",
"width",
",",
"_",
"=",
"np",
".",
"shape",
"(",
"image",
")",
"image",
"=",
"cv2",
".",
"flip",
"(",
"image",
",",
"1",
")",
"mask",
"=",
"cv2",
".",
"flip",
"(",
"mask",
",",
"1",
")",
"new_joints",
"=",
"[",
"]",
"for",
"people",
"in",
"annos",
":",
"# TODO : speed up with affine transform",
"new_keypoints",
"=",
"[",
"]",
"for",
"k",
"in",
"flip_list",
":",
"point",
"=",
"people",
"[",
"k",
"]",
"if",
"point",
"[",
"0",
"]",
"<",
"0",
"or",
"point",
"[",
"1",
"]",
"<",
"0",
":",
"new_keypoints",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"if",
"point",
"[",
"0",
"]",
">",
"image",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
"or",
"point",
"[",
"1",
"]",
">",
"image",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
":",
"new_keypoints",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"if",
"(",
"width",
"-",
"point",
"[",
"0",
"]",
")",
">",
"image",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
":",
"new_keypoints",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"new_keypoints",
".",
"append",
"(",
"(",
"width",
"-",
"point",
"[",
"0",
"]",
",",
"point",
"[",
"1",
"]",
")",
")",
"new_joints",
".",
"append",
"(",
"new_keypoints",
")",
"annos",
"=",
"new_joints",
"return",
"image",
",",
"annos",
",",
"mask"
] | Flip an image and corresponding keypoints.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
prob : float, 0 to 1
The probability to flip the image, if 1, always flip the image.
flip_list : tuple of int
Denotes how the keypoints number be changed after flipping which is required for pose estimation task.
The left and right body should be maintained rather than switch.
(Default COCO format).
Set to an empty tuple if you don't need to maintain left and right information.
Returns
----------
preprocessed image, annos, mask | [
"Flip",
"an",
"image",
"and",
"corresponding",
"keypoints",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L3962-L4014 | valid |
tensorlayer/tensorlayer | tensorlayer/prepro.py | keypoint_random_resize | def keypoint_random_resize(image, annos, mask=None, zoom_range=(0.8, 1.2)):
"""Randomly resize an image and corresponding keypoints.
The height and width of image will be changed independently, so the scale will be changed.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
zoom_range : tuple of two floats
The minimum and maximum factor to zoom in or out, e.g (0.5, 1) means zoom out 1~2 times.
Returns
----------
preprocessed image, annos, mask
"""
height = image.shape[0]
width = image.shape[1]
_min, _max = zoom_range
scalew = np.random.uniform(_min, _max)
scaleh = np.random.uniform(_min, _max)
neww = int(width * scalew)
newh = int(height * scaleh)
dst = cv2.resize(image, (neww, newh), interpolation=cv2.INTER_AREA)
if mask is not None:
mask = cv2.resize(mask, (neww, newh), interpolation=cv2.INTER_AREA)
# adjust meta data
adjust_joint_list = []
for joint in annos: # TODO : speed up with affine transform
adjust_joint = []
for point in joint:
if point[0] < -100 or point[1] < -100:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((int(point[0] * scalew + 0.5), int(point[1] * scaleh + 0.5)))
adjust_joint_list.append(adjust_joint)
if mask is not None:
return dst, adjust_joint_list, mask
else:
return dst, adjust_joint_list, None | python | def keypoint_random_resize(image, annos, mask=None, zoom_range=(0.8, 1.2)):
"""Randomly resize an image and corresponding keypoints.
The height and width of image will be changed independently, so the scale will be changed.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
zoom_range : tuple of two floats
The minimum and maximum factor to zoom in or out, e.g (0.5, 1) means zoom out 1~2 times.
Returns
----------
preprocessed image, annos, mask
"""
height = image.shape[0]
width = image.shape[1]
_min, _max = zoom_range
scalew = np.random.uniform(_min, _max)
scaleh = np.random.uniform(_min, _max)
neww = int(width * scalew)
newh = int(height * scaleh)
dst = cv2.resize(image, (neww, newh), interpolation=cv2.INTER_AREA)
if mask is not None:
mask = cv2.resize(mask, (neww, newh), interpolation=cv2.INTER_AREA)
# adjust meta data
adjust_joint_list = []
for joint in annos: # TODO : speed up with affine transform
adjust_joint = []
for point in joint:
if point[0] < -100 or point[1] < -100:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((int(point[0] * scalew + 0.5), int(point[1] * scaleh + 0.5)))
adjust_joint_list.append(adjust_joint)
if mask is not None:
return dst, adjust_joint_list, mask
else:
return dst, adjust_joint_list, None | [
"def",
"keypoint_random_resize",
"(",
"image",
",",
"annos",
",",
"mask",
"=",
"None",
",",
"zoom_range",
"=",
"(",
"0.8",
",",
"1.2",
")",
")",
":",
"height",
"=",
"image",
".",
"shape",
"[",
"0",
"]",
"width",
"=",
"image",
".",
"shape",
"[",
"1",
"]",
"_min",
",",
"_max",
"=",
"zoom_range",
"scalew",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"_min",
",",
"_max",
")",
"scaleh",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"_min",
",",
"_max",
")",
"neww",
"=",
"int",
"(",
"width",
"*",
"scalew",
")",
"newh",
"=",
"int",
"(",
"height",
"*",
"scaleh",
")",
"dst",
"=",
"cv2",
".",
"resize",
"(",
"image",
",",
"(",
"neww",
",",
"newh",
")",
",",
"interpolation",
"=",
"cv2",
".",
"INTER_AREA",
")",
"if",
"mask",
"is",
"not",
"None",
":",
"mask",
"=",
"cv2",
".",
"resize",
"(",
"mask",
",",
"(",
"neww",
",",
"newh",
")",
",",
"interpolation",
"=",
"cv2",
".",
"INTER_AREA",
")",
"# adjust meta data",
"adjust_joint_list",
"=",
"[",
"]",
"for",
"joint",
"in",
"annos",
":",
"# TODO : speed up with affine transform",
"adjust_joint",
"=",
"[",
"]",
"for",
"point",
"in",
"joint",
":",
"if",
"point",
"[",
"0",
"]",
"<",
"-",
"100",
"or",
"point",
"[",
"1",
"]",
"<",
"-",
"100",
":",
"adjust_joint",
".",
"append",
"(",
"(",
"-",
"1000",
",",
"-",
"1000",
")",
")",
"continue",
"adjust_joint",
".",
"append",
"(",
"(",
"int",
"(",
"point",
"[",
"0",
"]",
"*",
"scalew",
"+",
"0.5",
")",
",",
"int",
"(",
"point",
"[",
"1",
"]",
"*",
"scaleh",
"+",
"0.5",
")",
")",
")",
"adjust_joint_list",
".",
"append",
"(",
"adjust_joint",
")",
"if",
"mask",
"is",
"not",
"None",
":",
"return",
"dst",
",",
"adjust_joint_list",
",",
"mask",
"else",
":",
"return",
"dst",
",",
"adjust_joint_list",
",",
"None"
] | Randomly resize an image and corresponding keypoints.
The height and width of image will be changed independently, so the scale will be changed.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
zoom_range : tuple of two floats
The minimum and maximum factor to zoom in or out, e.g (0.5, 1) means zoom out 1~2 times.
Returns
----------
preprocessed image, annos, mask | [
"Randomly",
"resize",
"an",
"image",
"and",
"corresponding",
"keypoints",
".",
"The",
"height",
"and",
"width",
"of",
"image",
"will",
"be",
"changed",
"independently",
"so",
"the",
"scale",
"will",
"be",
"changed",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L4017-L4062 | valid |
tensorlayer/tensorlayer | examples/pretrained_cnn/tutorial_vgg19.py | Vgg19 | def Vgg19(rgb):
"""
Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(rgb_scaled, 3, 3)
if red.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if green.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if blue.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
if bgr.get_shape().as_list()[1:] != [224, 224, 3]:
raise Exception("image size unmatch")
# input layer
net_in = InputLayer(bgr, name='input')
# conv1
net = Conv2dLayer(net_in, act=tf.nn.relu, shape=[3, 3, 3, 64], strides=[1, 1, 1, 1], padding='SAME', name='conv1_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 64, 64], strides=[1, 1, 1, 1], padding='SAME', name='conv1_2')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool1')
# conv2
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 64, 128], strides=[1, 1, 1, 1], padding='SAME', name='conv2_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 128, 128], strides=[1, 1, 1, 1], padding='SAME', name='conv2_2')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool2')
# conv3
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 128, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_2')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_3')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_4')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool3')
# conv4
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_2')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_3')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_4')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool4')
# conv5
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_2')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_3')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_4')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool5')
# fc 6~8
net = FlattenLayer(net, name='flatten')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc6')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc7')
net = DenseLayer(net, n_units=1000, act=None, name='fc8')
print("build model finished: %fs" % (time.time() - start_time))
return net | python | def Vgg19(rgb):
"""
Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(rgb_scaled, 3, 3)
if red.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if green.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if blue.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
if bgr.get_shape().as_list()[1:] != [224, 224, 3]:
raise Exception("image size unmatch")
# input layer
net_in = InputLayer(bgr, name='input')
# conv1
net = Conv2dLayer(net_in, act=tf.nn.relu, shape=[3, 3, 3, 64], strides=[1, 1, 1, 1], padding='SAME', name='conv1_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 64, 64], strides=[1, 1, 1, 1], padding='SAME', name='conv1_2')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool1')
# conv2
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 64, 128], strides=[1, 1, 1, 1], padding='SAME', name='conv2_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 128, 128], strides=[1, 1, 1, 1], padding='SAME', name='conv2_2')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool2')
# conv3
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 128, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_2')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_3')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_4')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool3')
# conv4
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_2')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_3')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_4')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool4')
# conv5
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_2')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_3')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_4')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool5')
# fc 6~8
net = FlattenLayer(net, name='flatten')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc6')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc7')
net = DenseLayer(net, n_units=1000, act=None, name='fc8')
print("build model finished: %fs" % (time.time() - start_time))
return net | [
"def",
"Vgg19",
"(",
"rgb",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"\"build model started\"",
")",
"rgb_scaled",
"=",
"rgb",
"*",
"255.0",
"# Convert RGB to BGR",
"red",
",",
"green",
",",
"blue",
"=",
"tf",
".",
"split",
"(",
"rgb_scaled",
",",
"3",
",",
"3",
")",
"if",
"red",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"!=",
"[",
"224",
",",
"224",
",",
"1",
"]",
":",
"raise",
"Exception",
"(",
"\"image size unmatch\"",
")",
"if",
"green",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"!=",
"[",
"224",
",",
"224",
",",
"1",
"]",
":",
"raise",
"Exception",
"(",
"\"image size unmatch\"",
")",
"if",
"blue",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"!=",
"[",
"224",
",",
"224",
",",
"1",
"]",
":",
"raise",
"Exception",
"(",
"\"image size unmatch\"",
")",
"bgr",
"=",
"tf",
".",
"concat",
"(",
"[",
"blue",
"-",
"VGG_MEAN",
"[",
"0",
"]",
",",
"green",
"-",
"VGG_MEAN",
"[",
"1",
"]",
",",
"red",
"-",
"VGG_MEAN",
"[",
"2",
"]",
",",
"]",
",",
"axis",
"=",
"3",
")",
"if",
"bgr",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"!=",
"[",
"224",
",",
"224",
",",
"3",
"]",
":",
"raise",
"Exception",
"(",
"\"image size unmatch\"",
")",
"# input layer",
"net_in",
"=",
"InputLayer",
"(",
"bgr",
",",
"name",
"=",
"'input'",
")",
"# conv1",
"net",
"=",
"Conv2dLayer",
"(",
"net_in",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"3",
",",
"64",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv1_1'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"64",
",",
"64",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv1_2'",
")",
"net",
"=",
"PoolLayer",
"(",
"net",
",",
"ksize",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"pool",
"=",
"tf",
".",
"nn",
".",
"max_pool",
",",
"name",
"=",
"'pool1'",
")",
"# conv2",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"64",
",",
"128",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv2_1'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"128",
",",
"128",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv2_2'",
")",
"net",
"=",
"PoolLayer",
"(",
"net",
",",
"ksize",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"pool",
"=",
"tf",
".",
"nn",
".",
"max_pool",
",",
"name",
"=",
"'pool2'",
")",
"# conv3",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"128",
",",
"256",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv3_1'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"256",
",",
"256",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv3_2'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"256",
",",
"256",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv3_3'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"256",
",",
"256",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv3_4'",
")",
"net",
"=",
"PoolLayer",
"(",
"net",
",",
"ksize",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"pool",
"=",
"tf",
".",
"nn",
".",
"max_pool",
",",
"name",
"=",
"'pool3'",
")",
"# conv4",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"256",
",",
"512",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv4_1'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"512",
",",
"512",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv4_2'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"512",
",",
"512",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv4_3'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"512",
",",
"512",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv4_4'",
")",
"net",
"=",
"PoolLayer",
"(",
"net",
",",
"ksize",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"pool",
"=",
"tf",
".",
"nn",
".",
"max_pool",
",",
"name",
"=",
"'pool4'",
")",
"# conv5",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"512",
",",
"512",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv5_1'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"512",
",",
"512",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv5_2'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"512",
",",
"512",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv5_3'",
")",
"net",
"=",
"Conv2dLayer",
"(",
"net",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"shape",
"=",
"[",
"3",
",",
"3",
",",
"512",
",",
"512",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv5_4'",
")",
"net",
"=",
"PoolLayer",
"(",
"net",
",",
"ksize",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
",",
"pool",
"=",
"tf",
".",
"nn",
".",
"max_pool",
",",
"name",
"=",
"'pool5'",
")",
"# fc 6~8",
"net",
"=",
"FlattenLayer",
"(",
"net",
",",
"name",
"=",
"'flatten'",
")",
"net",
"=",
"DenseLayer",
"(",
"net",
",",
"n_units",
"=",
"4096",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"'fc6'",
")",
"net",
"=",
"DenseLayer",
"(",
"net",
",",
"n_units",
"=",
"4096",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"'fc7'",
")",
"net",
"=",
"DenseLayer",
"(",
"net",
",",
"n_units",
"=",
"1000",
",",
"act",
"=",
"None",
",",
"name",
"=",
"'fc8'",
")",
"print",
"(",
"\"build model finished: %fs\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
"return",
"net"
] | Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1] | [
"Build",
"the",
"VGG",
"19",
"Model"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/pretrained_cnn/tutorial_vgg19.py#L72-L137 | valid |
tensorlayer/tensorlayer | examples/pretrained_cnn/tutorial_vgg19.py | Vgg19_simple_api | def Vgg19_simple_api(rgb):
"""
Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(rgb_scaled, 3, 3)
if red.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if green.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if blue.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
if bgr.get_shape().as_list()[1:] != [224, 224, 3]:
raise Exception("image size unmatch")
# input layer
net_in = InputLayer(bgr, name='input')
# conv1
net = Conv2d(net_in, 64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_1')
net = Conv2d(net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_2')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
# conv2
net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_1')
net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_2')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
# conv3
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_1')
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_2')
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_3')
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_4')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')
# conv4
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_1')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_2')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_3')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_4')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
# conv5
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_1')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_2')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_3')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_4')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5')
# fc 6~8
net = FlattenLayer(net, name='flatten')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc6')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc7')
net = DenseLayer(net, n_units=1000, act=None, name='fc8')
print("build model finished: %fs" % (time.time() - start_time))
return net | python | def Vgg19_simple_api(rgb):
"""
Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(rgb_scaled, 3, 3)
if red.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if green.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if blue.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
if bgr.get_shape().as_list()[1:] != [224, 224, 3]:
raise Exception("image size unmatch")
# input layer
net_in = InputLayer(bgr, name='input')
# conv1
net = Conv2d(net_in, 64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_1')
net = Conv2d(net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_2')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
# conv2
net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_1')
net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_2')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
# conv3
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_1')
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_2')
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_3')
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_4')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')
# conv4
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_1')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_2')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_3')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_4')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
# conv5
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_1')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_2')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_3')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_4')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5')
# fc 6~8
net = FlattenLayer(net, name='flatten')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc6')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc7')
net = DenseLayer(net, n_units=1000, act=None, name='fc8')
print("build model finished: %fs" % (time.time() - start_time))
return net | [
"def",
"Vgg19_simple_api",
"(",
"rgb",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"\"build model started\"",
")",
"rgb_scaled",
"=",
"rgb",
"*",
"255.0",
"# Convert RGB to BGR",
"red",
",",
"green",
",",
"blue",
"=",
"tf",
".",
"split",
"(",
"rgb_scaled",
",",
"3",
",",
"3",
")",
"if",
"red",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"!=",
"[",
"224",
",",
"224",
",",
"1",
"]",
":",
"raise",
"Exception",
"(",
"\"image size unmatch\"",
")",
"if",
"green",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"!=",
"[",
"224",
",",
"224",
",",
"1",
"]",
":",
"raise",
"Exception",
"(",
"\"image size unmatch\"",
")",
"if",
"blue",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"!=",
"[",
"224",
",",
"224",
",",
"1",
"]",
":",
"raise",
"Exception",
"(",
"\"image size unmatch\"",
")",
"bgr",
"=",
"tf",
".",
"concat",
"(",
"[",
"blue",
"-",
"VGG_MEAN",
"[",
"0",
"]",
",",
"green",
"-",
"VGG_MEAN",
"[",
"1",
"]",
",",
"red",
"-",
"VGG_MEAN",
"[",
"2",
"]",
",",
"]",
",",
"axis",
"=",
"3",
")",
"if",
"bgr",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"!=",
"[",
"224",
",",
"224",
",",
"3",
"]",
":",
"raise",
"Exception",
"(",
"\"image size unmatch\"",
")",
"# input layer",
"net_in",
"=",
"InputLayer",
"(",
"bgr",
",",
"name",
"=",
"'input'",
")",
"# conv1",
"net",
"=",
"Conv2d",
"(",
"net_in",
",",
"64",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv1_1'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"64",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv1_2'",
")",
"net",
"=",
"MaxPool2d",
"(",
"net",
",",
"filter_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'pool1'",
")",
"# conv2",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"128",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv2_1'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"128",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv2_2'",
")",
"net",
"=",
"MaxPool2d",
"(",
"net",
",",
"filter_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'pool2'",
")",
"# conv3",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"256",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv3_1'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"256",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv3_2'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"256",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv3_3'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"256",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv3_4'",
")",
"net",
"=",
"MaxPool2d",
"(",
"net",
",",
"filter_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'pool3'",
")",
"# conv4",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"512",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv4_1'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"512",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv4_2'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"512",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv4_3'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"512",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv4_4'",
")",
"net",
"=",
"MaxPool2d",
"(",
"net",
",",
"filter_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'pool4'",
")",
"# conv5",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"512",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv5_1'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"512",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv5_2'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"512",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv5_3'",
")",
"net",
"=",
"Conv2d",
"(",
"net",
",",
"n_filter",
"=",
"512",
",",
"filter_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'conv5_4'",
")",
"net",
"=",
"MaxPool2d",
"(",
"net",
",",
"filter_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"'SAME'",
",",
"name",
"=",
"'pool5'",
")",
"# fc 6~8",
"net",
"=",
"FlattenLayer",
"(",
"net",
",",
"name",
"=",
"'flatten'",
")",
"net",
"=",
"DenseLayer",
"(",
"net",
",",
"n_units",
"=",
"4096",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"'fc6'",
")",
"net",
"=",
"DenseLayer",
"(",
"net",
",",
"n_units",
"=",
"4096",
",",
"act",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"'fc7'",
")",
"net",
"=",
"DenseLayer",
"(",
"net",
",",
"n_units",
"=",
"1000",
",",
"act",
"=",
"None",
",",
"name",
"=",
"'fc8'",
")",
"print",
"(",
"\"build model finished: %fs\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
"return",
"net"
] | Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1] | [
"Build",
"the",
"VGG",
"19",
"Model"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/pretrained_cnn/tutorial_vgg19.py#L140-L206 | valid |
tensorlayer/tensorlayer | examples/reinforcement_learning/tutorial_atari_pong.py | prepro | def prepro(I):
"""Prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
I = I[35:195]
I = I[::2, ::2, 0]
I[I == 144] = 0
I[I == 109] = 0
I[I != 0] = 1
return I.astype(np.float).ravel() | python | def prepro(I):
"""Prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
I = I[35:195]
I = I[::2, ::2, 0]
I[I == 144] = 0
I[I == 109] = 0
I[I != 0] = 1
return I.astype(np.float).ravel() | [
"def",
"prepro",
"(",
"I",
")",
":",
"I",
"=",
"I",
"[",
"35",
":",
"195",
"]",
"I",
"=",
"I",
"[",
":",
":",
"2",
",",
":",
":",
"2",
",",
"0",
"]",
"I",
"[",
"I",
"==",
"144",
"]",
"=",
"0",
"I",
"[",
"I",
"==",
"109",
"]",
"=",
"0",
"I",
"[",
"I",
"!=",
"0",
"]",
"=",
"1",
"return",
"I",
".",
"astype",
"(",
"np",
".",
"float",
")",
".",
"ravel",
"(",
")"
] | Prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector. | [
"Prepro",
"210x160x3",
"uint8",
"frame",
"into",
"6400",
"(",
"80x80",
")",
"1D",
"float",
"vector",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/reinforcement_learning/tutorial_atari_pong.py#L47-L54 | valid |
tensorlayer/tensorlayer | tensorlayer/rein.py | discount_episode_rewards | def discount_episode_rewards(rewards=None, gamma=0.99, mode=0):
"""Take 1D float array of rewards and compute discounted rewards for an
episode. When encount a non-zero value, consider as the end a of an episode.
Parameters
----------
rewards : list
List of rewards
gamma : float
Discounted factor
mode : int
Mode for computing the discount rewards.
- If mode == 0, reset the discount process when encount a non-zero reward (Ping-pong game).
- If mode == 1, would not reset the discount process.
Returns
--------
list of float
The discounted rewards.
Examples
----------
>>> rewards = np.asarray([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1])
>>> gamma = 0.9
>>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma)
>>> print(discount_rewards)
[ 0.72899997 0.81 0.89999998 1. 0.72899997 0.81
0.89999998 1. 0.72899997 0.81 0.89999998 1. ]
>>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma, mode=1)
>>> print(discount_rewards)
[ 1.52110755 1.69011939 1.87791049 2.08656716 1.20729685 1.34144104
1.49048996 1.65610003 0.72899997 0.81 0.89999998 1. ]
"""
if rewards is None:
raise Exception("rewards should be a list")
discounted_r = np.zeros_like(rewards, dtype=np.float32)
running_add = 0
for t in reversed(xrange(0, rewards.size)):
if mode == 0:
if rewards[t] != 0: running_add = 0
running_add = running_add * gamma + rewards[t]
discounted_r[t] = running_add
return discounted_r | python | def discount_episode_rewards(rewards=None, gamma=0.99, mode=0):
"""Take 1D float array of rewards and compute discounted rewards for an
episode. When encount a non-zero value, consider as the end a of an episode.
Parameters
----------
rewards : list
List of rewards
gamma : float
Discounted factor
mode : int
Mode for computing the discount rewards.
- If mode == 0, reset the discount process when encount a non-zero reward (Ping-pong game).
- If mode == 1, would not reset the discount process.
Returns
--------
list of float
The discounted rewards.
Examples
----------
>>> rewards = np.asarray([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1])
>>> gamma = 0.9
>>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma)
>>> print(discount_rewards)
[ 0.72899997 0.81 0.89999998 1. 0.72899997 0.81
0.89999998 1. 0.72899997 0.81 0.89999998 1. ]
>>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma, mode=1)
>>> print(discount_rewards)
[ 1.52110755 1.69011939 1.87791049 2.08656716 1.20729685 1.34144104
1.49048996 1.65610003 0.72899997 0.81 0.89999998 1. ]
"""
if rewards is None:
raise Exception("rewards should be a list")
discounted_r = np.zeros_like(rewards, dtype=np.float32)
running_add = 0
for t in reversed(xrange(0, rewards.size)):
if mode == 0:
if rewards[t] != 0: running_add = 0
running_add = running_add * gamma + rewards[t]
discounted_r[t] = running_add
return discounted_r | [
"def",
"discount_episode_rewards",
"(",
"rewards",
"=",
"None",
",",
"gamma",
"=",
"0.99",
",",
"mode",
"=",
"0",
")",
":",
"if",
"rewards",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"rewards should be a list\"",
")",
"discounted_r",
"=",
"np",
".",
"zeros_like",
"(",
"rewards",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"running_add",
"=",
"0",
"for",
"t",
"in",
"reversed",
"(",
"xrange",
"(",
"0",
",",
"rewards",
".",
"size",
")",
")",
":",
"if",
"mode",
"==",
"0",
":",
"if",
"rewards",
"[",
"t",
"]",
"!=",
"0",
":",
"running_add",
"=",
"0",
"running_add",
"=",
"running_add",
"*",
"gamma",
"+",
"rewards",
"[",
"t",
"]",
"discounted_r",
"[",
"t",
"]",
"=",
"running_add",
"return",
"discounted_r"
] | Take 1D float array of rewards and compute discounted rewards for an
episode. When encount a non-zero value, consider as the end a of an episode.
Parameters
----------
rewards : list
List of rewards
gamma : float
Discounted factor
mode : int
Mode for computing the discount rewards.
- If mode == 0, reset the discount process when encount a non-zero reward (Ping-pong game).
- If mode == 1, would not reset the discount process.
Returns
--------
list of float
The discounted rewards.
Examples
----------
>>> rewards = np.asarray([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1])
>>> gamma = 0.9
>>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma)
>>> print(discount_rewards)
[ 0.72899997 0.81 0.89999998 1. 0.72899997 0.81
0.89999998 1. 0.72899997 0.81 0.89999998 1. ]
>>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma, mode=1)
>>> print(discount_rewards)
[ 1.52110755 1.69011939 1.87791049 2.08656716 1.20729685 1.34144104
1.49048996 1.65610003 0.72899997 0.81 0.89999998 1. ] | [
"Take",
"1D",
"float",
"array",
"of",
"rewards",
"and",
"compute",
"discounted",
"rewards",
"for",
"an",
"episode",
".",
"When",
"encount",
"a",
"non",
"-",
"zero",
"value",
"consider",
"as",
"the",
"end",
"a",
"of",
"an",
"episode",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/rein.py#L18-L62 | valid |
tensorlayer/tensorlayer | tensorlayer/rein.py | cross_entropy_reward_loss | def cross_entropy_reward_loss(logits, actions, rewards, name=None):
"""Calculate the loss for Policy Gradient Network.
Parameters
----------
logits : tensor
The network outputs without softmax. This function implements softmax inside.
actions : tensor or placeholder
The agent actions.
rewards : tensor or placeholder
The rewards.
Returns
--------
Tensor
The TensorFlow loss function.
Examples
----------
>>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D])
>>> network = InputLayer(states_batch_pl, name='input')
>>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1')
>>> network = DenseLayer(network, n_units=3, name='out')
>>> probs = network.outputs
>>> sampling_prob = tf.nn.softmax(probs)
>>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None])
>>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None])
>>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl)
>>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)
"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions, logits=logits, name=name)
return tf.reduce_sum(tf.multiply(cross_entropy, rewards)) | python | def cross_entropy_reward_loss(logits, actions, rewards, name=None):
"""Calculate the loss for Policy Gradient Network.
Parameters
----------
logits : tensor
The network outputs without softmax. This function implements softmax inside.
actions : tensor or placeholder
The agent actions.
rewards : tensor or placeholder
The rewards.
Returns
--------
Tensor
The TensorFlow loss function.
Examples
----------
>>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D])
>>> network = InputLayer(states_batch_pl, name='input')
>>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1')
>>> network = DenseLayer(network, n_units=3, name='out')
>>> probs = network.outputs
>>> sampling_prob = tf.nn.softmax(probs)
>>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None])
>>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None])
>>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl)
>>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)
"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions, logits=logits, name=name)
return tf.reduce_sum(tf.multiply(cross_entropy, rewards)) | [
"def",
"cross_entropy_reward_loss",
"(",
"logits",
",",
"actions",
",",
"rewards",
",",
"name",
"=",
"None",
")",
":",
"cross_entropy",
"=",
"tf",
".",
"nn",
".",
"sparse_softmax_cross_entropy_with_logits",
"(",
"labels",
"=",
"actions",
",",
"logits",
"=",
"logits",
",",
"name",
"=",
"name",
")",
"return",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"cross_entropy",
",",
"rewards",
")",
")"
] | Calculate the loss for Policy Gradient Network.
Parameters
----------
logits : tensor
The network outputs without softmax. This function implements softmax inside.
actions : tensor or placeholder
The agent actions.
rewards : tensor or placeholder
The rewards.
Returns
--------
Tensor
The TensorFlow loss function.
Examples
----------
>>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D])
>>> network = InputLayer(states_batch_pl, name='input')
>>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1')
>>> network = DenseLayer(network, n_units=3, name='out')
>>> probs = network.outputs
>>> sampling_prob = tf.nn.softmax(probs)
>>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None])
>>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None])
>>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl)
>>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss) | [
"Calculate",
"the",
"loss",
"for",
"Policy",
"Gradient",
"Network",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/rein.py#L65-L98 | valid |
tensorlayer/tensorlayer | tensorlayer/rein.py | log_weight | def log_weight(probs, weights, name='log_weight'):
"""Log weight.
Parameters
-----------
probs : tensor
If it is a network output, usually we should scale it to [0, 1] via softmax.
weights : tensor
The weights.
Returns
--------
Tensor
The Tensor after appling the log weighted expression.
"""
with tf.variable_scope(name):
exp_v = tf.reduce_mean(tf.log(probs) * weights)
return exp_v | python | def log_weight(probs, weights, name='log_weight'):
"""Log weight.
Parameters
-----------
probs : tensor
If it is a network output, usually we should scale it to [0, 1] via softmax.
weights : tensor
The weights.
Returns
--------
Tensor
The Tensor after appling the log weighted expression.
"""
with tf.variable_scope(name):
exp_v = tf.reduce_mean(tf.log(probs) * weights)
return exp_v | [
"def",
"log_weight",
"(",
"probs",
",",
"weights",
",",
"name",
"=",
"'log_weight'",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
")",
":",
"exp_v",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"log",
"(",
"probs",
")",
"*",
"weights",
")",
"return",
"exp_v"
] | Log weight.
Parameters
-----------
probs : tensor
If it is a network output, usually we should scale it to [0, 1] via softmax.
weights : tensor
The weights.
Returns
--------
Tensor
The Tensor after appling the log weighted expression. | [
"Log",
"weight",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/rein.py#L101-L119 | valid |
tensorlayer/tensorlayer | tensorlayer/rein.py | choice_action_by_probs | def choice_action_by_probs(probs=(0.5, 0.5), action_list=None):
"""Choice and return an an action by given the action probability distribution.
Parameters
------------
probs : list of float.
The probability distribution of all actions.
action_list : None or a list of int or others
A list of action in integer, string or others. If None, returns an integer range between 0 and len(probs)-1.
Returns
--------
float int or str
The chosen action.
Examples
----------
>>> for _ in range(5):
>>> a = choice_action_by_probs([0.2, 0.4, 0.4])
>>> print(a)
0
1
1
2
1
>>> for _ in range(3):
>>> a = choice_action_by_probs([0.5, 0.5], ['a', 'b'])
>>> print(a)
a
b
b
"""
if action_list is None:
n_action = len(probs)
action_list = np.arange(n_action)
else:
if len(action_list) != len(probs):
raise Exception("number of actions should equal to number of probabilities.")
return np.random.choice(action_list, p=probs) | python | def choice_action_by_probs(probs=(0.5, 0.5), action_list=None):
"""Choice and return an an action by given the action probability distribution.
Parameters
------------
probs : list of float.
The probability distribution of all actions.
action_list : None or a list of int or others
A list of action in integer, string or others. If None, returns an integer range between 0 and len(probs)-1.
Returns
--------
float int or str
The chosen action.
Examples
----------
>>> for _ in range(5):
>>> a = choice_action_by_probs([0.2, 0.4, 0.4])
>>> print(a)
0
1
1
2
1
>>> for _ in range(3):
>>> a = choice_action_by_probs([0.5, 0.5], ['a', 'b'])
>>> print(a)
a
b
b
"""
if action_list is None:
n_action = len(probs)
action_list = np.arange(n_action)
else:
if len(action_list) != len(probs):
raise Exception("number of actions should equal to number of probabilities.")
return np.random.choice(action_list, p=probs) | [
"def",
"choice_action_by_probs",
"(",
"probs",
"=",
"(",
"0.5",
",",
"0.5",
")",
",",
"action_list",
"=",
"None",
")",
":",
"if",
"action_list",
"is",
"None",
":",
"n_action",
"=",
"len",
"(",
"probs",
")",
"action_list",
"=",
"np",
".",
"arange",
"(",
"n_action",
")",
"else",
":",
"if",
"len",
"(",
"action_list",
")",
"!=",
"len",
"(",
"probs",
")",
":",
"raise",
"Exception",
"(",
"\"number of actions should equal to number of probabilities.\"",
")",
"return",
"np",
".",
"random",
".",
"choice",
"(",
"action_list",
",",
"p",
"=",
"probs",
")"
] | Choice and return an an action by given the action probability distribution.
Parameters
------------
probs : list of float.
The probability distribution of all actions.
action_list : None or a list of int or others
A list of action in integer, string or others. If None, returns an integer range between 0 and len(probs)-1.
Returns
--------
float int or str
The chosen action.
Examples
----------
>>> for _ in range(5):
>>> a = choice_action_by_probs([0.2, 0.4, 0.4])
>>> print(a)
0
1
1
2
1
>>> for _ in range(3):
>>> a = choice_action_by_probs([0.5, 0.5], ['a', 'b'])
>>> print(a)
a
b
b | [
"Choice",
"and",
"return",
"an",
"an",
"action",
"by",
"given",
"the",
"action",
"probability",
"distribution",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/rein.py#L122-L161 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | cross_entropy | def cross_entropy(output, target, name=None):
"""Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions,
it implements softmax internally. See ``tf.nn.sparse_softmax_cross_entropy_with_logits``.
Parameters
----------
output : Tensor
A batch of distribution with shape: [batch_size, num of classes].
target : Tensor
A batch of index with shape: [batch_size, ].
name : string
Name of this loss.
Examples
--------
>>> ce = tl.cost.cross_entropy(y_logits, y_target_logits, 'my_loss')
References
-----------
- About cross-entropy: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.
- The code is borrowed from: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.
"""
if name is None:
raise Exception("Please give a unique name to tl.cost.cross_entropy for TF1.0+")
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output), name=name) | python | def cross_entropy(output, target, name=None):
"""Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions,
it implements softmax internally. See ``tf.nn.sparse_softmax_cross_entropy_with_logits``.
Parameters
----------
output : Tensor
A batch of distribution with shape: [batch_size, num of classes].
target : Tensor
A batch of index with shape: [batch_size, ].
name : string
Name of this loss.
Examples
--------
>>> ce = tl.cost.cross_entropy(y_logits, y_target_logits, 'my_loss')
References
-----------
- About cross-entropy: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.
- The code is borrowed from: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.
"""
if name is None:
raise Exception("Please give a unique name to tl.cost.cross_entropy for TF1.0+")
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output), name=name) | [
"def",
"cross_entropy",
"(",
"output",
",",
"target",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Please give a unique name to tl.cost.cross_entropy for TF1.0+\"",
")",
"return",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"nn",
".",
"sparse_softmax_cross_entropy_with_logits",
"(",
"labels",
"=",
"target",
",",
"logits",
"=",
"output",
")",
",",
"name",
"=",
"name",
")"
] | Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions,
it implements softmax internally. See ``tf.nn.sparse_softmax_cross_entropy_with_logits``.
Parameters
----------
output : Tensor
A batch of distribution with shape: [batch_size, num of classes].
target : Tensor
A batch of index with shape: [batch_size, ].
name : string
Name of this loss.
Examples
--------
>>> ce = tl.cost.cross_entropy(y_logits, y_target_logits, 'my_loss')
References
-----------
- About cross-entropy: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.
- The code is borrowed from: `<https://en.wikipedia.org/wiki/Cross_entropy>`__. | [
"Softmax",
"cross",
"-",
"entropy",
"operation",
"returns",
"the",
"TensorFlow",
"expression",
"of",
"cross",
"-",
"entropy",
"for",
"two",
"distributions",
"it",
"implements",
"softmax",
"internally",
".",
"See",
"tf",
".",
"nn",
".",
"sparse_softmax_cross_entropy_with_logits",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L33-L58 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | sigmoid_cross_entropy | def sigmoid_cross_entropy(output, target, name=None):
"""Sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``.
Parameters
----------
output : Tensor
A batch of distribution with shape: [batch_size, num of classes].
target : Tensor
A batch of index with shape: [batch_size, ].
name : string
Name of this loss.
"""
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output), name=name) | python | def sigmoid_cross_entropy(output, target, name=None):
"""Sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``.
Parameters
----------
output : Tensor
A batch of distribution with shape: [batch_size, num of classes].
target : Tensor
A batch of index with shape: [batch_size, ].
name : string
Name of this loss.
"""
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output), name=name) | [
"def",
"sigmoid_cross_entropy",
"(",
"output",
",",
"target",
",",
"name",
"=",
"None",
")",
":",
"return",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"nn",
".",
"sigmoid_cross_entropy_with_logits",
"(",
"labels",
"=",
"target",
",",
"logits",
"=",
"output",
")",
",",
"name",
"=",
"name",
")"
] | Sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``.
Parameters
----------
output : Tensor
A batch of distribution with shape: [batch_size, num of classes].
target : Tensor
A batch of index with shape: [batch_size, ].
name : string
Name of this loss. | [
"Sigmoid",
"cross",
"-",
"entropy",
"operation",
"see",
"tf",
".",
"nn",
".",
"sigmoid_cross_entropy_with_logits",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L61-L74 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | binary_cross_entropy | def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'):
"""Binary cross entropy operation.
Parameters
----------
output : Tensor
Tensor with type of `float32` or `float64`.
target : Tensor
The target distribution, format the same with `output`.
epsilon : float
A small value to avoid output to be zero.
name : str
An optional name to attach to this function.
References
-----------
- `ericjang-DRAW <https://github.com/ericjang/draw/blob/master/draw.py#L73>`__
"""
# with ops.op_scope([output, target], name, "bce_loss") as name:
# output = ops.convert_to_tensor(output, name="preds")
# target = ops.convert_to_tensor(targets, name="target")
# with tf.name_scope(name):
return tf.reduce_mean(
tf.reduce_sum(-(target * tf.log(output + epsilon) + (1. - target) * tf.log(1. - output + epsilon)), axis=1),
name=name
) | python | def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'):
"""Binary cross entropy operation.
Parameters
----------
output : Tensor
Tensor with type of `float32` or `float64`.
target : Tensor
The target distribution, format the same with `output`.
epsilon : float
A small value to avoid output to be zero.
name : str
An optional name to attach to this function.
References
-----------
- `ericjang-DRAW <https://github.com/ericjang/draw/blob/master/draw.py#L73>`__
"""
# with ops.op_scope([output, target], name, "bce_loss") as name:
# output = ops.convert_to_tensor(output, name="preds")
# target = ops.convert_to_tensor(targets, name="target")
# with tf.name_scope(name):
return tf.reduce_mean(
tf.reduce_sum(-(target * tf.log(output + epsilon) + (1. - target) * tf.log(1. - output + epsilon)), axis=1),
name=name
) | [
"def",
"binary_cross_entropy",
"(",
"output",
",",
"target",
",",
"epsilon",
"=",
"1e-8",
",",
"name",
"=",
"'bce_loss'",
")",
":",
"# with ops.op_scope([output, target], name, \"bce_loss\") as name:",
"# output = ops.convert_to_tensor(output, name=\"preds\")",
"# target = ops.convert_to_tensor(targets, name=\"target\")",
"# with tf.name_scope(name):",
"return",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_sum",
"(",
"-",
"(",
"target",
"*",
"tf",
".",
"log",
"(",
"output",
"+",
"epsilon",
")",
"+",
"(",
"1.",
"-",
"target",
")",
"*",
"tf",
".",
"log",
"(",
"1.",
"-",
"output",
"+",
"epsilon",
")",
")",
",",
"axis",
"=",
"1",
")",
",",
"name",
"=",
"name",
")"
] | Binary cross entropy operation.
Parameters
----------
output : Tensor
Tensor with type of `float32` or `float64`.
target : Tensor
The target distribution, format the same with `output`.
epsilon : float
A small value to avoid output to be zero.
name : str
An optional name to attach to this function.
References
-----------
- `ericjang-DRAW <https://github.com/ericjang/draw/blob/master/draw.py#L73>`__ | [
"Binary",
"cross",
"entropy",
"operation",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L77-L104 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | mean_squared_error | def mean_squared_error(output, target, is_mean=False, name="mean_squared_error"):
"""Return the TensorFlow expression of mean-square-error (L2) of two batch of data.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
is_mean : boolean
Whether compute the mean or sum for each example.
- If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
- If False, use ``tf.reduce_sum`` (default).
name : str
An optional name to attach to this function.
References
------------
- `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`__
"""
# with tf.name_scope(name):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1), name=name)
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1), name=name)
elif output.get_shape().ndims == 3: # [batch_size, w, h]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2]), name=name)
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2]), name=name)
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]), name=name)
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]), name=name)
else:
raise Exception("Unknow dimension")
return mse | python | def mean_squared_error(output, target, is_mean=False, name="mean_squared_error"):
"""Return the TensorFlow expression of mean-square-error (L2) of two batch of data.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
is_mean : boolean
Whether compute the mean or sum for each example.
- If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
- If False, use ``tf.reduce_sum`` (default).
name : str
An optional name to attach to this function.
References
------------
- `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`__
"""
# with tf.name_scope(name):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1), name=name)
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1), name=name)
elif output.get_shape().ndims == 3: # [batch_size, w, h]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2]), name=name)
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2]), name=name)
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]), name=name)
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]), name=name)
else:
raise Exception("Unknow dimension")
return mse | [
"def",
"mean_squared_error",
"(",
"output",
",",
"target",
",",
"is_mean",
"=",
"False",
",",
"name",
"=",
"\"mean_squared_error\"",
")",
":",
"# with tf.name_scope(name):",
"if",
"output",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"2",
":",
"# [batch_size, n_feature]",
"if",
"is_mean",
":",
"mse",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"squared_difference",
"(",
"output",
",",
"target",
")",
",",
"1",
")",
",",
"name",
"=",
"name",
")",
"else",
":",
"mse",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"squared_difference",
"(",
"output",
",",
"target",
")",
",",
"1",
")",
",",
"name",
"=",
"name",
")",
"elif",
"output",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"3",
":",
"# [batch_size, w, h]",
"if",
"is_mean",
":",
"mse",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"squared_difference",
"(",
"output",
",",
"target",
")",
",",
"[",
"1",
",",
"2",
"]",
")",
",",
"name",
"=",
"name",
")",
"else",
":",
"mse",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"squared_difference",
"(",
"output",
",",
"target",
")",
",",
"[",
"1",
",",
"2",
"]",
")",
",",
"name",
"=",
"name",
")",
"elif",
"output",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"4",
":",
"# [batch_size, w, h, c]",
"if",
"is_mean",
":",
"mse",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"squared_difference",
"(",
"output",
",",
"target",
")",
",",
"[",
"1",
",",
"2",
",",
"3",
"]",
")",
",",
"name",
"=",
"name",
")",
"else",
":",
"mse",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"squared_difference",
"(",
"output",
",",
"target",
")",
",",
"[",
"1",
",",
"2",
",",
"3",
"]",
")",
",",
"name",
"=",
"name",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unknow dimension\"",
")",
"return",
"mse"
] | Return the TensorFlow expression of mean-square-error (L2) of two batch of data.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
is_mean : boolean
Whether compute the mean or sum for each example.
- If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
- If False, use ``tf.reduce_sum`` (default).
name : str
An optional name to attach to this function.
References
------------
- `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`__ | [
"Return",
"the",
"TensorFlow",
"expression",
"of",
"mean",
"-",
"square",
"-",
"error",
"(",
"L2",
")",
"of",
"two",
"batch",
"of",
"data",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L111-L150 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | normalized_mean_square_error | def normalized_mean_square_error(output, target, name="normalized_mean_squared_error_loss"):
"""Return the TensorFlow expression of normalized mean-square-error of two distributions.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
name : str
An optional name to attach to this function.
"""
# with tf.name_scope("normalized_mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
elif output.get_shape().ndims == 3: # [batch_size, w, h]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2]))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2, 3]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2, 3]))
nmse = tf.reduce_mean(nmse_a / nmse_b, name=name)
return nmse | python | def normalized_mean_square_error(output, target, name="normalized_mean_squared_error_loss"):
"""Return the TensorFlow expression of normalized mean-square-error of two distributions.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
name : str
An optional name to attach to this function.
"""
# with tf.name_scope("normalized_mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
elif output.get_shape().ndims == 3: # [batch_size, w, h]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2]))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2, 3]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2, 3]))
nmse = tf.reduce_mean(nmse_a / nmse_b, name=name)
return nmse | [
"def",
"normalized_mean_square_error",
"(",
"output",
",",
"target",
",",
"name",
"=",
"\"normalized_mean_squared_error_loss\"",
")",
":",
"# with tf.name_scope(\"normalized_mean_squared_error_loss\"):",
"if",
"output",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"2",
":",
"# [batch_size, n_feature]",
"nmse_a",
"=",
"tf",
".",
"sqrt",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"squared_difference",
"(",
"output",
",",
"target",
")",
",",
"axis",
"=",
"1",
")",
")",
"nmse_b",
"=",
"tf",
".",
"sqrt",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"target",
")",
",",
"axis",
"=",
"1",
")",
")",
"elif",
"output",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"3",
":",
"# [batch_size, w, h]",
"nmse_a",
"=",
"tf",
".",
"sqrt",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"squared_difference",
"(",
"output",
",",
"target",
")",
",",
"axis",
"=",
"[",
"1",
",",
"2",
"]",
")",
")",
"nmse_b",
"=",
"tf",
".",
"sqrt",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"target",
")",
",",
"axis",
"=",
"[",
"1",
",",
"2",
"]",
")",
")",
"elif",
"output",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"4",
":",
"# [batch_size, w, h, c]",
"nmse_a",
"=",
"tf",
".",
"sqrt",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"squared_difference",
"(",
"output",
",",
"target",
")",
",",
"axis",
"=",
"[",
"1",
",",
"2",
",",
"3",
"]",
")",
")",
"nmse_b",
"=",
"tf",
".",
"sqrt",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"target",
")",
",",
"axis",
"=",
"[",
"1",
",",
"2",
",",
"3",
"]",
")",
")",
"nmse",
"=",
"tf",
".",
"reduce_mean",
"(",
"nmse_a",
"/",
"nmse_b",
",",
"name",
"=",
"name",
")",
"return",
"nmse"
] | Return the TensorFlow expression of normalized mean-square-error of two distributions.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
name : str
An optional name to attach to this function. | [
"Return",
"the",
"TensorFlow",
"expression",
"of",
"normalized",
"mean",
"-",
"square",
"-",
"error",
"of",
"two",
"distributions",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L153-L177 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | absolute_difference_error | def absolute_difference_error(output, target, is_mean=False, name="absolute_difference_error_loss"):
"""Return the TensorFlow expression of absolute difference error (L1) of two batch of data.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
is_mean : boolean
Whether compute the mean or sum for each example.
- If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
- If False, use ``tf.reduce_sum`` (default).
name : str
An optional name to attach to this function.
"""
# with tf.name_scope("absolute_difference_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), 1), name=name)
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), 1), name=name)
elif output.get_shape().ndims == 3: # [batch_size, w, h]
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [1, 2]), name=name)
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [1, 2]), name=name)
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [1, 2, 3]), name=name)
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [1, 2, 3]), name=name)
else:
raise Exception("Unknow dimension")
return loss | python | def absolute_difference_error(output, target, is_mean=False, name="absolute_difference_error_loss"):
"""Return the TensorFlow expression of absolute difference error (L1) of two batch of data.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
is_mean : boolean
Whether compute the mean or sum for each example.
- If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
- If False, use ``tf.reduce_sum`` (default).
name : str
An optional name to attach to this function.
"""
# with tf.name_scope("absolute_difference_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), 1), name=name)
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), 1), name=name)
elif output.get_shape().ndims == 3: # [batch_size, w, h]
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [1, 2]), name=name)
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [1, 2]), name=name)
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [1, 2, 3]), name=name)
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [1, 2, 3]), name=name)
else:
raise Exception("Unknow dimension")
return loss | [
"def",
"absolute_difference_error",
"(",
"output",
",",
"target",
",",
"is_mean",
"=",
"False",
",",
"name",
"=",
"\"absolute_difference_error_loss\"",
")",
":",
"# with tf.name_scope(\"absolute_difference_error_loss\"):",
"if",
"output",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"2",
":",
"# [batch_size, n_feature]",
"if",
"is_mean",
":",
"loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"abs",
"(",
"output",
"-",
"target",
")",
",",
"1",
")",
",",
"name",
"=",
"name",
")",
"else",
":",
"loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"abs",
"(",
"output",
"-",
"target",
")",
",",
"1",
")",
",",
"name",
"=",
"name",
")",
"elif",
"output",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"3",
":",
"# [batch_size, w, h]",
"if",
"is_mean",
":",
"loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"abs",
"(",
"output",
"-",
"target",
")",
",",
"[",
"1",
",",
"2",
"]",
")",
",",
"name",
"=",
"name",
")",
"else",
":",
"loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"abs",
"(",
"output",
"-",
"target",
")",
",",
"[",
"1",
",",
"2",
"]",
")",
",",
"name",
"=",
"name",
")",
"elif",
"output",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"4",
":",
"# [batch_size, w, h, c]",
"if",
"is_mean",
":",
"loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"abs",
"(",
"output",
"-",
"target",
")",
",",
"[",
"1",
",",
"2",
",",
"3",
"]",
")",
",",
"name",
"=",
"name",
")",
"else",
":",
"loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"abs",
"(",
"output",
"-",
"target",
")",
",",
"[",
"1",
",",
"2",
",",
"3",
"]",
")",
",",
"name",
"=",
"name",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unknow dimension\"",
")",
"return",
"loss"
] | Return the TensorFlow expression of absolute difference error (L1) of two batch of data.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
is_mean : boolean
Whether compute the mean or sum for each example.
- If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
- If False, use ``tf.reduce_sum`` (default).
name : str
An optional name to attach to this function. | [
"Return",
"the",
"TensorFlow",
"expression",
"of",
"absolute",
"difference",
"error",
"(",
"L1",
")",
"of",
"two",
"batch",
"of",
"data",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L180-L215 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | dice_coe | def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5):
"""Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation
i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match.
Parameters
-----------
output : Tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : Tensor
The target distribution, format the same with `output`.
loss_type : str
``jaccard`` or ``sorensen``, default is ``jaccard``.
axis : tuple of int
All dimensions are reduced, default ``[1,2,3]``.
smooth : float
This small value will be added to the numerator and denominator.
- If both output and target are empty, it makes sure dice is 1.
- If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice.
Examples
---------
>>> outputs = tl.act.pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_)
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
"""
inse = tf.reduce_sum(output * target, axis=axis)
if loss_type == 'jaccard':
l = tf.reduce_sum(output * output, axis=axis)
r = tf.reduce_sum(target * target, axis=axis)
elif loss_type == 'sorensen':
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
else:
raise Exception("Unknow loss_type")
# old axis=[0,1,2,3]
# dice = 2 * (inse) / (l + r)
# epsilon = 1e-5
# dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1
# new haodong
dice = (2. * inse + smooth) / (l + r + smooth)
##
dice = tf.reduce_mean(dice, name='dice_coe')
return dice | python | def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5):
"""Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation
i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match.
Parameters
-----------
output : Tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : Tensor
The target distribution, format the same with `output`.
loss_type : str
``jaccard`` or ``sorensen``, default is ``jaccard``.
axis : tuple of int
All dimensions are reduced, default ``[1,2,3]``.
smooth : float
This small value will be added to the numerator and denominator.
- If both output and target are empty, it makes sure dice is 1.
- If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice.
Examples
---------
>>> outputs = tl.act.pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_)
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
"""
inse = tf.reduce_sum(output * target, axis=axis)
if loss_type == 'jaccard':
l = tf.reduce_sum(output * output, axis=axis)
r = tf.reduce_sum(target * target, axis=axis)
elif loss_type == 'sorensen':
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
else:
raise Exception("Unknow loss_type")
# old axis=[0,1,2,3]
# dice = 2 * (inse) / (l + r)
# epsilon = 1e-5
# dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1
# new haodong
dice = (2. * inse + smooth) / (l + r + smooth)
##
dice = tf.reduce_mean(dice, name='dice_coe')
return dice | [
"def",
"dice_coe",
"(",
"output",
",",
"target",
",",
"loss_type",
"=",
"'jaccard'",
",",
"axis",
"=",
"(",
"1",
",",
"2",
",",
"3",
")",
",",
"smooth",
"=",
"1e-5",
")",
":",
"inse",
"=",
"tf",
".",
"reduce_sum",
"(",
"output",
"*",
"target",
",",
"axis",
"=",
"axis",
")",
"if",
"loss_type",
"==",
"'jaccard'",
":",
"l",
"=",
"tf",
".",
"reduce_sum",
"(",
"output",
"*",
"output",
",",
"axis",
"=",
"axis",
")",
"r",
"=",
"tf",
".",
"reduce_sum",
"(",
"target",
"*",
"target",
",",
"axis",
"=",
"axis",
")",
"elif",
"loss_type",
"==",
"'sorensen'",
":",
"l",
"=",
"tf",
".",
"reduce_sum",
"(",
"output",
",",
"axis",
"=",
"axis",
")",
"r",
"=",
"tf",
".",
"reduce_sum",
"(",
"target",
",",
"axis",
"=",
"axis",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unknow loss_type\"",
")",
"# old axis=[0,1,2,3]",
"# dice = 2 * (inse) / (l + r)",
"# epsilon = 1e-5",
"# dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1",
"# new haodong",
"dice",
"=",
"(",
"2.",
"*",
"inse",
"+",
"smooth",
")",
"/",
"(",
"l",
"+",
"r",
"+",
"smooth",
")",
"##",
"dice",
"=",
"tf",
".",
"reduce_mean",
"(",
"dice",
",",
"name",
"=",
"'dice_coe'",
")",
"return",
"dice"
] | Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation
i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match.
Parameters
-----------
output : Tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : Tensor
The target distribution, format the same with `output`.
loss_type : str
``jaccard`` or ``sorensen``, default is ``jaccard``.
axis : tuple of int
All dimensions are reduced, default ``[1,2,3]``.
smooth : float
This small value will be added to the numerator and denominator.
- If both output and target are empty, it makes sure dice is 1.
- If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice.
Examples
---------
>>> outputs = tl.act.pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_)
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__ | [
"Soft",
"dice",
"(",
"Sørensen",
"or",
"Jaccard",
")",
"coefficient",
"for",
"comparing",
"the",
"similarity",
"of",
"two",
"batch",
"of",
"data",
"usually",
"be",
"used",
"for",
"binary",
"image",
"segmentation",
"i",
".",
"e",
".",
"labels",
"are",
"binary",
".",
"The",
"coefficient",
"between",
"0",
"to",
"1",
"1",
"means",
"totally",
"match",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L218-L265 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | dice_hard_coe | def dice_hard_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
"""Non-differentiable Sørensen–Dice coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation i.e. labels are binary.
The coefficient between 0 to 1, 1 if totally match.
Parameters
-----------
output : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
"""
output = tf.cast(output > threshold, dtype=tf.float32)
target = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(output, target), axis=axis)
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
# old axis=[0,1,2,3]
# hard_dice = 2 * (inse) / (l + r)
# epsilon = 1e-5
# hard_dice = tf.clip_by_value(hard_dice, 0, 1.0-epsilon)
# new haodong
hard_dice = (2. * inse + smooth) / (l + r + smooth)
##
hard_dice = tf.reduce_mean(hard_dice, name='hard_dice')
return hard_dice | python | def dice_hard_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
"""Non-differentiable Sørensen–Dice coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation i.e. labels are binary.
The coefficient between 0 to 1, 1 if totally match.
Parameters
-----------
output : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
"""
output = tf.cast(output > threshold, dtype=tf.float32)
target = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(output, target), axis=axis)
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
# old axis=[0,1,2,3]
# hard_dice = 2 * (inse) / (l + r)
# epsilon = 1e-5
# hard_dice = tf.clip_by_value(hard_dice, 0, 1.0-epsilon)
# new haodong
hard_dice = (2. * inse + smooth) / (l + r + smooth)
##
hard_dice = tf.reduce_mean(hard_dice, name='hard_dice')
return hard_dice | [
"def",
"dice_hard_coe",
"(",
"output",
",",
"target",
",",
"threshold",
"=",
"0.5",
",",
"axis",
"=",
"(",
"1",
",",
"2",
",",
"3",
")",
",",
"smooth",
"=",
"1e-5",
")",
":",
"output",
"=",
"tf",
".",
"cast",
"(",
"output",
">",
"threshold",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"target",
"=",
"tf",
".",
"cast",
"(",
"target",
">",
"threshold",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"inse",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"output",
",",
"target",
")",
",",
"axis",
"=",
"axis",
")",
"l",
"=",
"tf",
".",
"reduce_sum",
"(",
"output",
",",
"axis",
"=",
"axis",
")",
"r",
"=",
"tf",
".",
"reduce_sum",
"(",
"target",
",",
"axis",
"=",
"axis",
")",
"# old axis=[0,1,2,3]",
"# hard_dice = 2 * (inse) / (l + r)",
"# epsilon = 1e-5",
"# hard_dice = tf.clip_by_value(hard_dice, 0, 1.0-epsilon)",
"# new haodong",
"hard_dice",
"=",
"(",
"2.",
"*",
"inse",
"+",
"smooth",
")",
"/",
"(",
"l",
"+",
"r",
"+",
"smooth",
")",
"##",
"hard_dice",
"=",
"tf",
".",
"reduce_mean",
"(",
"hard_dice",
",",
"name",
"=",
"'hard_dice'",
")",
"return",
"hard_dice"
] | Non-differentiable Sørensen–Dice coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation i.e. labels are binary.
The coefficient between 0 to 1, 1 if totally match.
Parameters
-----------
output : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__ | [
"Non",
"-",
"differentiable",
"Sørensen–Dice",
"coefficient",
"for",
"comparing",
"the",
"similarity",
"of",
"two",
"batch",
"of",
"data",
"usually",
"be",
"used",
"for",
"binary",
"image",
"segmentation",
"i",
".",
"e",
".",
"labels",
"are",
"binary",
".",
"The",
"coefficient",
"between",
"0",
"to",
"1",
"1",
"if",
"totally",
"match",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L268-L304 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | iou_coe | def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
"""Non-differentiable Intersection over Union (IoU) for comparing the
similarity of two batch of data, usually be used for evaluating binary image segmentation.
The coefficient between 0 to 1, and 1 means totally match.
Parameters
-----------
output : tensor
A batch of distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
Notes
------
- IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
"""
pre = tf.cast(output > threshold, dtype=tf.float32)
truth = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND
union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR
# old axis=[0,1,2,3]
# epsilon = 1e-5
# batch_iou = inse / (union + epsilon)
# new haodong
batch_iou = (inse + smooth) / (union + smooth)
iou = tf.reduce_mean(batch_iou, name='iou_coe')
return iou | python | def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
"""Non-differentiable Intersection over Union (IoU) for comparing the
similarity of two batch of data, usually be used for evaluating binary image segmentation.
The coefficient between 0 to 1, and 1 means totally match.
Parameters
-----------
output : tensor
A batch of distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
Notes
------
- IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
"""
pre = tf.cast(output > threshold, dtype=tf.float32)
truth = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND
union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR
# old axis=[0,1,2,3]
# epsilon = 1e-5
# batch_iou = inse / (union + epsilon)
# new haodong
batch_iou = (inse + smooth) / (union + smooth)
iou = tf.reduce_mean(batch_iou, name='iou_coe')
return iou | [
"def",
"iou_coe",
"(",
"output",
",",
"target",
",",
"threshold",
"=",
"0.5",
",",
"axis",
"=",
"(",
"1",
",",
"2",
",",
"3",
")",
",",
"smooth",
"=",
"1e-5",
")",
":",
"pre",
"=",
"tf",
".",
"cast",
"(",
"output",
">",
"threshold",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"truth",
"=",
"tf",
".",
"cast",
"(",
"target",
">",
"threshold",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"inse",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"pre",
",",
"truth",
")",
",",
"axis",
"=",
"axis",
")",
"# AND",
"union",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"cast",
"(",
"tf",
".",
"add",
"(",
"pre",
",",
"truth",
")",
">=",
"1",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"axis",
"=",
"axis",
")",
"# OR",
"# old axis=[0,1,2,3]",
"# epsilon = 1e-5",
"# batch_iou = inse / (union + epsilon)",
"# new haodong",
"batch_iou",
"=",
"(",
"inse",
"+",
"smooth",
")",
"/",
"(",
"union",
"+",
"smooth",
")",
"iou",
"=",
"tf",
".",
"reduce_mean",
"(",
"batch_iou",
",",
"name",
"=",
"'iou_coe'",
")",
"return",
"iou"
] | Non-differentiable Intersection over Union (IoU) for comparing the
similarity of two batch of data, usually be used for evaluating binary image segmentation.
The coefficient between 0 to 1, and 1 means totally match.
Parameters
-----------
output : tensor
A batch of distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
Notes
------
- IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating. | [
"Non",
"-",
"differentiable",
"Intersection",
"over",
"Union",
"(",
"IoU",
")",
"for",
"comparing",
"the",
"similarity",
"of",
"two",
"batch",
"of",
"data",
"usually",
"be",
"used",
"for",
"evaluating",
"binary",
"image",
"segmentation",
".",
"The",
"coefficient",
"between",
"0",
"to",
"1",
"and",
"1",
"means",
"totally",
"match",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L307-L340 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | cross_entropy_seq | def cross_entropy_seq(logits, target_seqs, batch_size=None): # , batch_size=1, num_steps=None):
"""Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for fixed length RNN outputs, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.
Parameters
----------
logits : Tensor
2D tensor with shape of `[batch_size * n_steps, n_classes]`.
target_seqs : Tensor
The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead.
batch_size : None or int.
Whether to divide the cost by batch size.
- If integer, the return cost will be divided by `batch_size`.
- If None (default), the return cost will not be divided by anything.
Examples
--------
>>> see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.for more details
>>> input_data = tf.placeholder(tf.int32, [batch_size, n_steps])
>>> targets = tf.placeholder(tf.int32, [batch_size, n_steps])
>>> # build the network
>>> print(net.outputs)
(batch_size * n_steps, n_classes)
>>> cost = tl.cost.cross_entropy_seq(network.outputs, targets)
"""
sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example
loss = sequence_loss_by_example_fn(
[logits], [tf.reshape(target_seqs, [-1])], [tf.ones_like(tf.reshape(target_seqs, [-1]), dtype=tf.float32)]
)
# [tf.ones([batch_size * num_steps])])
cost = tf.reduce_sum(loss) # / batch_size
if batch_size is not None:
cost = cost / batch_size
return cost | python | def cross_entropy_seq(logits, target_seqs, batch_size=None): # , batch_size=1, num_steps=None):
"""Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for fixed length RNN outputs, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.
Parameters
----------
logits : Tensor
2D tensor with shape of `[batch_size * n_steps, n_classes]`.
target_seqs : Tensor
The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead.
batch_size : None or int.
Whether to divide the cost by batch size.
- If integer, the return cost will be divided by `batch_size`.
- If None (default), the return cost will not be divided by anything.
Examples
--------
>>> see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.for more details
>>> input_data = tf.placeholder(tf.int32, [batch_size, n_steps])
>>> targets = tf.placeholder(tf.int32, [batch_size, n_steps])
>>> # build the network
>>> print(net.outputs)
(batch_size * n_steps, n_classes)
>>> cost = tl.cost.cross_entropy_seq(network.outputs, targets)
"""
sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example
loss = sequence_loss_by_example_fn(
[logits], [tf.reshape(target_seqs, [-1])], [tf.ones_like(tf.reshape(target_seqs, [-1]), dtype=tf.float32)]
)
# [tf.ones([batch_size * num_steps])])
cost = tf.reduce_sum(loss) # / batch_size
if batch_size is not None:
cost = cost / batch_size
return cost | [
"def",
"cross_entropy_seq",
"(",
"logits",
",",
"target_seqs",
",",
"batch_size",
"=",
"None",
")",
":",
"# , batch_size=1, num_steps=None):",
"sequence_loss_by_example_fn",
"=",
"tf",
".",
"contrib",
".",
"legacy_seq2seq",
".",
"sequence_loss_by_example",
"loss",
"=",
"sequence_loss_by_example_fn",
"(",
"[",
"logits",
"]",
",",
"[",
"tf",
".",
"reshape",
"(",
"target_seqs",
",",
"[",
"-",
"1",
"]",
")",
"]",
",",
"[",
"tf",
".",
"ones_like",
"(",
"tf",
".",
"reshape",
"(",
"target_seqs",
",",
"[",
"-",
"1",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"]",
")",
"# [tf.ones([batch_size * num_steps])])",
"cost",
"=",
"tf",
".",
"reduce_sum",
"(",
"loss",
")",
"# / batch_size",
"if",
"batch_size",
"is",
"not",
"None",
":",
"cost",
"=",
"cost",
"/",
"batch_size",
"return",
"cost"
] | Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for fixed length RNN outputs, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.
Parameters
----------
logits : Tensor
2D tensor with shape of `[batch_size * n_steps, n_classes]`.
target_seqs : Tensor
The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead.
batch_size : None or int.
Whether to divide the cost by batch size.
- If integer, the return cost will be divided by `batch_size`.
- If None (default), the return cost will not be divided by anything.
Examples
--------
>>> see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.for more details
>>> input_data = tf.placeholder(tf.int32, [batch_size, n_steps])
>>> targets = tf.placeholder(tf.int32, [batch_size, n_steps])
>>> # build the network
>>> print(net.outputs)
(batch_size * n_steps, n_classes)
>>> cost = tl.cost.cross_entropy_seq(network.outputs, targets) | [
"Returns",
"the",
"expression",
"of",
"cross",
"-",
"entropy",
"of",
"two",
"sequences",
"implement",
"softmax",
"internally",
".",
"Normally",
"be",
"used",
"for",
"fixed",
"length",
"RNN",
"outputs",
"see",
"PTB",
"example",
"<https",
":",
"//",
"github",
".",
"com",
"/",
"tensorlayer",
"/",
"tensorlayer",
"/",
"blob",
"/",
"master",
"/",
"example",
"/",
"tutorial_ptb_lstm_state_is_tuple",
".",
"py",
">",
"__",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L377-L412 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | cross_entropy_seq_with_mask | def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
"""Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output.
Parameters
-----------
logits : Tensor
2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example.
- Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`.
target_seqs : Tensor
int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example.
input_mask : Tensor
The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1.
return_details : boolean
Whether to return detailed losses.
- If False (default), only returns the loss.
- If True, returns the loss, losses, weights and targets (see source code).
Examples
--------
>>> batch_size = 64
>>> vocab_size = 10000
>>> embedding_size = 256
>>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="input")
>>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target")
>>> input_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="mask")
>>> net = tl.layers.EmbeddingInputlayer(
... inputs = input_seqs,
... vocabulary_size = vocab_size,
... embedding_size = embedding_size,
... name = 'seq_embedding')
>>> net = tl.layers.DynamicRNNLayer(net,
... cell_fn = tf.contrib.rnn.BasicLSTMCell,
... n_hidden = embedding_size,
... dropout = (0.7 if is_train else None),
... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs),
... return_seq_2d = True,
... name = 'dynamicrnn')
>>> print(net.outputs)
(?, 256)
>>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name="output")
>>> print(net.outputs)
(?, 10000)
>>> loss = tl.cost.cross_entropy_seq_with_mask(net.outputs, target_seqs, input_mask)
"""
targets = tf.reshape(target_seqs, [-1]) # to one vector
weights = tf.to_float(tf.reshape(input_mask, [-1])) # to one vector like targets
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights
# losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others
loss = tf.divide(
tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !!
tf.reduce_sum(weights),
name="seq_loss_with_mask"
)
if return_details:
return loss, losses, weights, targets
else:
return loss | python | def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
"""Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output.
Parameters
-----------
logits : Tensor
2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example.
- Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`.
target_seqs : Tensor
int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example.
input_mask : Tensor
The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1.
return_details : boolean
Whether to return detailed losses.
- If False (default), only returns the loss.
- If True, returns the loss, losses, weights and targets (see source code).
Examples
--------
>>> batch_size = 64
>>> vocab_size = 10000
>>> embedding_size = 256
>>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="input")
>>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target")
>>> input_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="mask")
>>> net = tl.layers.EmbeddingInputlayer(
... inputs = input_seqs,
... vocabulary_size = vocab_size,
... embedding_size = embedding_size,
... name = 'seq_embedding')
>>> net = tl.layers.DynamicRNNLayer(net,
... cell_fn = tf.contrib.rnn.BasicLSTMCell,
... n_hidden = embedding_size,
... dropout = (0.7 if is_train else None),
... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs),
... return_seq_2d = True,
... name = 'dynamicrnn')
>>> print(net.outputs)
(?, 256)
>>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name="output")
>>> print(net.outputs)
(?, 10000)
>>> loss = tl.cost.cross_entropy_seq_with_mask(net.outputs, target_seqs, input_mask)
"""
targets = tf.reshape(target_seqs, [-1]) # to one vector
weights = tf.to_float(tf.reshape(input_mask, [-1])) # to one vector like targets
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights
# losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others
loss = tf.divide(
tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !!
tf.reduce_sum(weights),
name="seq_loss_with_mask"
)
if return_details:
return loss, losses, weights, targets
else:
return loss | [
"def",
"cross_entropy_seq_with_mask",
"(",
"logits",
",",
"target_seqs",
",",
"input_mask",
",",
"return_details",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"targets",
"=",
"tf",
".",
"reshape",
"(",
"target_seqs",
",",
"[",
"-",
"1",
"]",
")",
"# to one vector",
"weights",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"reshape",
"(",
"input_mask",
",",
"[",
"-",
"1",
"]",
")",
")",
"# to one vector like targets",
"losses",
"=",
"tf",
".",
"nn",
".",
"sparse_softmax_cross_entropy_with_logits",
"(",
"logits",
"=",
"logits",
",",
"labels",
"=",
"targets",
",",
"name",
"=",
"name",
")",
"*",
"weights",
"# losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others",
"loss",
"=",
"tf",
".",
"divide",
"(",
"tf",
".",
"reduce_sum",
"(",
"losses",
")",
",",
"# loss from mask. reduce_sum before element-wise mul with mask !!",
"tf",
".",
"reduce_sum",
"(",
"weights",
")",
",",
"name",
"=",
"\"seq_loss_with_mask\"",
")",
"if",
"return_details",
":",
"return",
"loss",
",",
"losses",
",",
"weights",
",",
"targets",
"else",
":",
"return",
"loss"
] | Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output.
Parameters
-----------
logits : Tensor
2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example.
- Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`.
target_seqs : Tensor
int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example.
input_mask : Tensor
The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1.
return_details : boolean
Whether to return detailed losses.
- If False (default), only returns the loss.
- If True, returns the loss, losses, weights and targets (see source code).
Examples
--------
>>> batch_size = 64
>>> vocab_size = 10000
>>> embedding_size = 256
>>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="input")
>>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target")
>>> input_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="mask")
>>> net = tl.layers.EmbeddingInputlayer(
... inputs = input_seqs,
... vocabulary_size = vocab_size,
... embedding_size = embedding_size,
... name = 'seq_embedding')
>>> net = tl.layers.DynamicRNNLayer(net,
... cell_fn = tf.contrib.rnn.BasicLSTMCell,
... n_hidden = embedding_size,
... dropout = (0.7 if is_train else None),
... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs),
... return_seq_2d = True,
... name = 'dynamicrnn')
>>> print(net.outputs)
(?, 256)
>>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name="output")
>>> print(net.outputs)
(?, 10000)
>>> loss = tl.cost.cross_entropy_seq_with_mask(net.outputs, target_seqs, input_mask) | [
"Returns",
"the",
"expression",
"of",
"cross",
"-",
"entropy",
"of",
"two",
"sequences",
"implement",
"softmax",
"internally",
".",
"Normally",
"be",
"used",
"for",
"Dynamic",
"RNN",
"with",
"Synced",
"sequence",
"input",
"and",
"output",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L415-L475 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | cosine_similarity | def cosine_similarity(v1, v2):
"""Cosine similarity [-1, 1].
Parameters
----------
v1, v2 : Tensor
Tensor with the same shape [batch_size, n_feature].
References
----------
- `Wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`__.
"""
return tf.reduce_sum(tf.multiply(v1, v2), 1) / \
(tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) *
tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1))) | python | def cosine_similarity(v1, v2):
"""Cosine similarity [-1, 1].
Parameters
----------
v1, v2 : Tensor
Tensor with the same shape [batch_size, n_feature].
References
----------
- `Wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`__.
"""
return tf.reduce_sum(tf.multiply(v1, v2), 1) / \
(tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) *
tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1))) | [
"def",
"cosine_similarity",
"(",
"v1",
",",
"v2",
")",
":",
"return",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"v1",
",",
"v2",
")",
",",
"1",
")",
"/",
"(",
"tf",
".",
"sqrt",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"v1",
",",
"v1",
")",
",",
"1",
")",
")",
"*",
"tf",
".",
"sqrt",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"v2",
",",
"v2",
")",
",",
"1",
")",
")",
")"
] | Cosine similarity [-1, 1].
Parameters
----------
v1, v2 : Tensor
Tensor with the same shape [batch_size, n_feature].
References
----------
- `Wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`__. | [
"Cosine",
"similarity",
"[",
"-",
"1",
"1",
"]",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L478-L494 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | li_regularizer | def li_regularizer(scale, scope=None):
"""Li regularization removes the neurons of previous layer. The `i` represents `inputs`.
Returns a function that can be used to apply group li regularization to weights.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: str
An optional scope name for this function.
Returns
--------
A function with signature `li(weights, name=None)` that apply Li regularization.
Raises
------
ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
if scale >= 1.:
raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % scale)
if scale == 0.:
tl.logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def li(weights):
"""Applies li regularization to weights."""
with tf.name_scope('li_regularizer') as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
# if tf.__version__ <= '0.12':
# standard_ops_fn = standard_ops.mul
# else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(
my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))),
name=scope
)
return li | python | def li_regularizer(scale, scope=None):
"""Li regularization removes the neurons of previous layer. The `i` represents `inputs`.
Returns a function that can be used to apply group li regularization to weights.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: str
An optional scope name for this function.
Returns
--------
A function with signature `li(weights, name=None)` that apply Li regularization.
Raises
------
ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
if scale >= 1.:
raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % scale)
if scale == 0.:
tl.logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def li(weights):
"""Applies li regularization to weights."""
with tf.name_scope('li_regularizer') as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
# if tf.__version__ <= '0.12':
# standard_ops_fn = standard_ops.mul
# else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(
my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))),
name=scope
)
return li | [
"def",
"li_regularizer",
"(",
"scale",
",",
"scope",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"scale",
",",
"numbers",
".",
"Integral",
")",
":",
"raise",
"ValueError",
"(",
"'scale cannot be an integer: %s'",
"%",
"scale",
")",
"if",
"isinstance",
"(",
"scale",
",",
"numbers",
".",
"Real",
")",
":",
"if",
"scale",
"<",
"0.",
":",
"raise",
"ValueError",
"(",
"'Setting a scale less than 0 on a regularizer: %g'",
"%",
"scale",
")",
"if",
"scale",
">=",
"1.",
":",
"raise",
"ValueError",
"(",
"'Setting a scale greater than 1 on a regularizer: %g'",
"%",
"scale",
")",
"if",
"scale",
"==",
"0.",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'Scale of 0 disables regularizer.'",
")",
"return",
"lambda",
"_",
",",
"name",
"=",
"None",
":",
"None",
"def",
"li",
"(",
"weights",
")",
":",
"\"\"\"Applies li regularization to weights.\"\"\"",
"with",
"tf",
".",
"name_scope",
"(",
"'li_regularizer'",
")",
"as",
"scope",
":",
"my_scale",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"scale",
",",
"dtype",
"=",
"weights",
".",
"dtype",
".",
"base_dtype",
",",
"name",
"=",
"'scale'",
")",
"# if tf.__version__ <= '0.12':",
"# standard_ops_fn = standard_ops.mul",
"# else:",
"standard_ops_fn",
"=",
"standard_ops",
".",
"multiply",
"return",
"standard_ops_fn",
"(",
"my_scale",
",",
"standard_ops",
".",
"reduce_sum",
"(",
"standard_ops",
".",
"sqrt",
"(",
"standard_ops",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"weights",
")",
",",
"1",
")",
")",
")",
",",
"name",
"=",
"scope",
")",
"return",
"li"
] | Li regularization removes the neurons of previous layer. The `i` represents `inputs`.
Returns a function that can be used to apply group li regularization to weights.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: str
An optional scope name for this function.
Returns
--------
A function with signature `li(weights, name=None)` that apply Li regularization.
Raises
------
ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float. | [
"Li",
"regularization",
"removes",
"the",
"neurons",
"of",
"previous",
"layer",
".",
"The",
"i",
"represents",
"inputs",
".",
"Returns",
"a",
"function",
"that",
"can",
"be",
"used",
"to",
"apply",
"group",
"li",
"regularization",
"to",
"weights",
".",
"The",
"implementation",
"follows",
"TensorFlow",
"contrib",
"<https",
":",
"//",
"github",
".",
"com",
"/",
"tensorflow",
"/",
"tensorflow",
"/",
"blob",
"/",
"master",
"/",
"tensorflow",
"/",
"contrib",
"/",
"layers",
"/",
"python",
"/",
"layers",
"/",
"regularizers",
".",
"py",
">",
"__",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L498-L543 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | maxnorm_regularizer | def maxnorm_regularizer(scale=1.0):
"""Max-norm regularization returns a function that can be used to apply max-norm regularization to weights.
More about max-norm, see `wiki-max norm <https://en.wikipedia.org/wiki/Matrix_norm#Max_norm>`_.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn(weights, name=None)` that apply Lo regularization.
Raises
--------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
# if scale >= 1.:
# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
# scale)
if scale == 0.:
tl.logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def mn(weights, name='max_regularizer'):
"""Applies max-norm regularization to weights."""
with tf.name_scope(name) as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
# if tf.__version__ <= '0.12':
# standard_ops_fn = standard_ops.mul
# else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(my_scale, standard_ops.reduce_max(standard_ops.abs(weights)), name=scope)
return mn | python | def maxnorm_regularizer(scale=1.0):
"""Max-norm regularization returns a function that can be used to apply max-norm regularization to weights.
More about max-norm, see `wiki-max norm <https://en.wikipedia.org/wiki/Matrix_norm#Max_norm>`_.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn(weights, name=None)` that apply Lo regularization.
Raises
--------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
# if scale >= 1.:
# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
# scale)
if scale == 0.:
tl.logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def mn(weights, name='max_regularizer'):
"""Applies max-norm regularization to weights."""
with tf.name_scope(name) as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
# if tf.__version__ <= '0.12':
# standard_ops_fn = standard_ops.mul
# else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(my_scale, standard_ops.reduce_max(standard_ops.abs(weights)), name=scope)
return mn | [
"def",
"maxnorm_regularizer",
"(",
"scale",
"=",
"1.0",
")",
":",
"if",
"isinstance",
"(",
"scale",
",",
"numbers",
".",
"Integral",
")",
":",
"raise",
"ValueError",
"(",
"'scale cannot be an integer: %s'",
"%",
"scale",
")",
"if",
"isinstance",
"(",
"scale",
",",
"numbers",
".",
"Real",
")",
":",
"if",
"scale",
"<",
"0.",
":",
"raise",
"ValueError",
"(",
"'Setting a scale less than 0 on a regularizer: %g'",
"%",
"scale",
")",
"# if scale >= 1.:",
"# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %",
"# scale)",
"if",
"scale",
"==",
"0.",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'Scale of 0 disables regularizer.'",
")",
"return",
"lambda",
"_",
",",
"name",
"=",
"None",
":",
"None",
"def",
"mn",
"(",
"weights",
",",
"name",
"=",
"'max_regularizer'",
")",
":",
"\"\"\"Applies max-norm regularization to weights.\"\"\"",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
"as",
"scope",
":",
"my_scale",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"scale",
",",
"dtype",
"=",
"weights",
".",
"dtype",
".",
"base_dtype",
",",
"name",
"=",
"'scale'",
")",
"# if tf.__version__ <= '0.12':",
"# standard_ops_fn = standard_ops.mul",
"# else:",
"standard_ops_fn",
"=",
"standard_ops",
".",
"multiply",
"return",
"standard_ops_fn",
"(",
"my_scale",
",",
"standard_ops",
".",
"reduce_max",
"(",
"standard_ops",
".",
"abs",
"(",
"weights",
")",
")",
",",
"name",
"=",
"scope",
")",
"return",
"mn"
] | Max-norm regularization returns a function that can be used to apply max-norm regularization to weights.
More about max-norm, see `wiki-max norm <https://en.wikipedia.org/wiki/Matrix_norm#Max_norm>`_.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn(weights, name=None)` that apply Lo regularization.
Raises
--------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. | [
"Max",
"-",
"norm",
"regularization",
"returns",
"a",
"function",
"that",
"can",
"be",
"used",
"to",
"apply",
"max",
"-",
"norm",
"regularization",
"to",
"weights",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L593-L636 | valid |
tensorlayer/tensorlayer | tensorlayer/cost.py | maxnorm_o_regularizer | def maxnorm_o_regularizer(scale):
"""Max-norm output regularization removes the neurons of current layer.
Returns a function that can be used to apply max-norm regularization to each column of weight matrix.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn_o(weights, name=None)` that apply Lo regularization.
Raises
---------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
# if scale >= 1.:
# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
# scale)
if scale == 0.:
tl.logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def mn_o(weights, name='maxnorm_o_regularizer'):
"""Applies max-norm regularization to weights."""
with tf.name_scope(name) as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
if tf.__version__ <= '0.12':
standard_ops_fn = standard_ops.mul
else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(
my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 0)), name=scope
)
return mn_o | python | def maxnorm_o_regularizer(scale):
"""Max-norm output regularization removes the neurons of current layer.
Returns a function that can be used to apply max-norm regularization to each column of weight matrix.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn_o(weights, name=None)` that apply Lo regularization.
Raises
---------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
# if scale >= 1.:
# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
# scale)
if scale == 0.:
tl.logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def mn_o(weights, name='maxnorm_o_regularizer'):
"""Applies max-norm regularization to weights."""
with tf.name_scope(name) as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
if tf.__version__ <= '0.12':
standard_ops_fn = standard_ops.mul
else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(
my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 0)), name=scope
)
return mn_o | [
"def",
"maxnorm_o_regularizer",
"(",
"scale",
")",
":",
"if",
"isinstance",
"(",
"scale",
",",
"numbers",
".",
"Integral",
")",
":",
"raise",
"ValueError",
"(",
"'scale cannot be an integer: %s'",
"%",
"scale",
")",
"if",
"isinstance",
"(",
"scale",
",",
"numbers",
".",
"Real",
")",
":",
"if",
"scale",
"<",
"0.",
":",
"raise",
"ValueError",
"(",
"'Setting a scale less than 0 on a regularizer: %g'",
"%",
"scale",
")",
"# if scale >= 1.:",
"# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %",
"# scale)",
"if",
"scale",
"==",
"0.",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'Scale of 0 disables regularizer.'",
")",
"return",
"lambda",
"_",
",",
"name",
"=",
"None",
":",
"None",
"def",
"mn_o",
"(",
"weights",
",",
"name",
"=",
"'maxnorm_o_regularizer'",
")",
":",
"\"\"\"Applies max-norm regularization to weights.\"\"\"",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
"as",
"scope",
":",
"my_scale",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"scale",
",",
"dtype",
"=",
"weights",
".",
"dtype",
".",
"base_dtype",
",",
"name",
"=",
"'scale'",
")",
"if",
"tf",
".",
"__version__",
"<=",
"'0.12'",
":",
"standard_ops_fn",
"=",
"standard_ops",
".",
"mul",
"else",
":",
"standard_ops_fn",
"=",
"standard_ops",
".",
"multiply",
"return",
"standard_ops_fn",
"(",
"my_scale",
",",
"standard_ops",
".",
"reduce_sum",
"(",
"standard_ops",
".",
"reduce_max",
"(",
"standard_ops",
".",
"abs",
"(",
"weights",
")",
",",
"0",
")",
")",
",",
"name",
"=",
"scope",
")",
"return",
"mn_o"
] | Max-norm output regularization removes the neurons of current layer.
Returns a function that can be used to apply max-norm regularization to each column of weight matrix.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn_o(weights, name=None)` that apply Lo regularization.
Raises
---------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. | [
"Max",
"-",
"norm",
"output",
"regularization",
"removes",
"the",
"neurons",
"of",
"current",
"layer",
".",
"Returns",
"a",
"function",
"that",
"can",
"be",
"used",
"to",
"apply",
"max",
"-",
"norm",
"regularization",
"to",
"each",
"column",
"of",
"weight",
"matrix",
".",
"The",
"implementation",
"follows",
"TensorFlow",
"contrib",
"<https",
":",
"//",
"github",
".",
"com",
"/",
"tensorflow",
"/",
"tensorflow",
"/",
"blob",
"/",
"master",
"/",
"tensorflow",
"/",
"contrib",
"/",
"layers",
"/",
"python",
"/",
"layers",
"/",
"regularizers",
".",
"py",
">",
"__",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/cost.py#L639-L683 | valid |
tensorlayer/tensorlayer | tensorlayer/activation.py | ramp | def ramp(x, v_min=0, v_max=1, name=None):
"""Ramp activation function.
Parameters
----------
x : Tensor
input.
v_min : float
cap input to v_min as a lower bound.
v_max : float
cap input to v_max as a upper bound.
name : str
The function name (optional).
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
"""
return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name) | python | def ramp(x, v_min=0, v_max=1, name=None):
"""Ramp activation function.
Parameters
----------
x : Tensor
input.
v_min : float
cap input to v_min as a lower bound.
v_max : float
cap input to v_max as a upper bound.
name : str
The function name (optional).
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
"""
return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name) | [
"def",
"ramp",
"(",
"x",
",",
"v_min",
"=",
"0",
",",
"v_max",
"=",
"1",
",",
"name",
"=",
"None",
")",
":",
"return",
"tf",
".",
"clip_by_value",
"(",
"x",
",",
"clip_value_min",
"=",
"v_min",
",",
"clip_value_max",
"=",
"v_max",
",",
"name",
"=",
"name",
")"
] | Ramp activation function.
Parameters
----------
x : Tensor
input.
v_min : float
cap input to v_min as a lower bound.
v_max : float
cap input to v_max as a upper bound.
name : str
The function name (optional).
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``. | [
"Ramp",
"activation",
"function",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/activation.py#L25-L45 | valid |
tensorlayer/tensorlayer | tensorlayer/activation.py | leaky_relu | def leaky_relu(x, alpha=0.2, name="leaky_relu"):
"""leaky_relu can be used through its shortcut: :func:`tl.act.lrelu`.
This function is a modified version of ReLU, introducing a nonzero gradient for negative input. Introduced by the paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x >= 0: ``f(x) = x``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha : float
Slope.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
"""
if not (0 < alpha <= 1):
raise ValueError("`alpha` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_relu") as name_scope:
x = tf.convert_to_tensor(x, name="features")
return tf.maximum(x, alpha * x, name=name_scope) | python | def leaky_relu(x, alpha=0.2, name="leaky_relu"):
"""leaky_relu can be used through its shortcut: :func:`tl.act.lrelu`.
This function is a modified version of ReLU, introducing a nonzero gradient for negative input. Introduced by the paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x >= 0: ``f(x) = x``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha : float
Slope.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
"""
if not (0 < alpha <= 1):
raise ValueError("`alpha` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_relu") as name_scope:
x = tf.convert_to_tensor(x, name="features")
return tf.maximum(x, alpha * x, name=name_scope) | [
"def",
"leaky_relu",
"(",
"x",
",",
"alpha",
"=",
"0.2",
",",
"name",
"=",
"\"leaky_relu\"",
")",
":",
"if",
"not",
"(",
"0",
"<",
"alpha",
"<=",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"`alpha` value must be in [0, 1]`\"",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"\"leaky_relu\"",
")",
"as",
"name_scope",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"\"features\"",
")",
"return",
"tf",
".",
"maximum",
"(",
"x",
",",
"alpha",
"*",
"x",
",",
"name",
"=",
"name_scope",
")"
] | leaky_relu can be used through its shortcut: :func:`tl.act.lrelu`.
This function is a modified version of ReLU, introducing a nonzero gradient for negative input. Introduced by the paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x >= 0: ``f(x) = x``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha : float
Slope.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__ | [
"leaky_relu",
"can",
"be",
"used",
"through",
"its",
"shortcut",
":",
":",
"func",
":",
"tl",
".",
"act",
".",
"lrelu",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/activation.py#L49-L88 | valid |
tensorlayer/tensorlayer | tensorlayer/activation.py | leaky_relu6 | def leaky_relu6(x, alpha=0.2, name="leaky_relu6"):
""":func:`leaky_relu6` can be used through its shortcut: :func:`tl.act.lrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha : float
Slope.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_relu6(x, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
"""
if not isinstance(alpha, tf.Tensor) and not (0 < alpha <= 1):
raise ValueError("`alpha` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_relu6") as name_scope:
x = tf.convert_to_tensor(x, name="features")
return tf.minimum(tf.maximum(x, alpha * x), 6, name=name_scope) | python | def leaky_relu6(x, alpha=0.2, name="leaky_relu6"):
""":func:`leaky_relu6` can be used through its shortcut: :func:`tl.act.lrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha : float
Slope.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_relu6(x, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
"""
if not isinstance(alpha, tf.Tensor) and not (0 < alpha <= 1):
raise ValueError("`alpha` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_relu6") as name_scope:
x = tf.convert_to_tensor(x, name="features")
return tf.minimum(tf.maximum(x, alpha * x), 6, name=name_scope) | [
"def",
"leaky_relu6",
"(",
"x",
",",
"alpha",
"=",
"0.2",
",",
"name",
"=",
"\"leaky_relu6\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"alpha",
",",
"tf",
".",
"Tensor",
")",
"and",
"not",
"(",
"0",
"<",
"alpha",
"<=",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"`alpha` value must be in [0, 1]`\"",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"\"leaky_relu6\"",
")",
"as",
"name_scope",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"\"features\"",
")",
"return",
"tf",
".",
"minimum",
"(",
"tf",
".",
"maximum",
"(",
"x",
",",
"alpha",
"*",
"x",
")",
",",
"6",
",",
"name",
"=",
"name_scope",
")"
] | :func:`leaky_relu6` can be used through its shortcut: :func:`tl.act.lrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha : float
Slope.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_relu6(x, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__ | [
":",
"func",
":",
"leaky_relu6",
"can",
"be",
"used",
"through",
"its",
"shortcut",
":",
":",
"func",
":",
"tl",
".",
"act",
".",
"lrelu6",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/activation.py#L91-L134 | valid |
tensorlayer/tensorlayer | tensorlayer/activation.py | leaky_twice_relu6 | def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"):
""":func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
This function push further the logic by adding `leaky` behaviour both below zero and above six.
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha_low : float
Slope for x < 0: ``f(x) = alpha_low * x``.
alpha_high : float
Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
"""
if not isinstance(alpha_high, tf.Tensor) and not (0 < alpha_high <= 1):
raise ValueError("`alpha_high` value must be in [0, 1]`")
if not isinstance(alpha_low, tf.Tensor) and not (0 < alpha_low <= 1):
raise ValueError("`alpha_low` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_twice_relu6") as name_scope:
x = tf.convert_to_tensor(x, name="features")
x_is_above_0 = tf.minimum(x, 6 * (1 - alpha_high) + alpha_high * x)
x_is_below_0 = tf.minimum(alpha_low * x, 0)
return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope) | python | def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"):
""":func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
This function push further the logic by adding `leaky` behaviour both below zero and above six.
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha_low : float
Slope for x < 0: ``f(x) = alpha_low * x``.
alpha_high : float
Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
"""
if not isinstance(alpha_high, tf.Tensor) and not (0 < alpha_high <= 1):
raise ValueError("`alpha_high` value must be in [0, 1]`")
if not isinstance(alpha_low, tf.Tensor) and not (0 < alpha_low <= 1):
raise ValueError("`alpha_low` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_twice_relu6") as name_scope:
x = tf.convert_to_tensor(x, name="features")
x_is_above_0 = tf.minimum(x, 6 * (1 - alpha_high) + alpha_high * x)
x_is_below_0 = tf.minimum(alpha_low * x, 0)
return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope) | [
"def",
"leaky_twice_relu6",
"(",
"x",
",",
"alpha_low",
"=",
"0.2",
",",
"alpha_high",
"=",
"0.2",
",",
"name",
"=",
"\"leaky_relu6\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"alpha_high",
",",
"tf",
".",
"Tensor",
")",
"and",
"not",
"(",
"0",
"<",
"alpha_high",
"<=",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"`alpha_high` value must be in [0, 1]`\"",
")",
"if",
"not",
"isinstance",
"(",
"alpha_low",
",",
"tf",
".",
"Tensor",
")",
"and",
"not",
"(",
"0",
"<",
"alpha_low",
"<=",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"`alpha_low` value must be in [0, 1]`\"",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"\"leaky_twice_relu6\"",
")",
"as",
"name_scope",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"\"features\"",
")",
"x_is_above_0",
"=",
"tf",
".",
"minimum",
"(",
"x",
",",
"6",
"*",
"(",
"1",
"-",
"alpha_high",
")",
"+",
"alpha_high",
"*",
"x",
")",
"x_is_below_0",
"=",
"tf",
".",
"minimum",
"(",
"alpha_low",
"*",
"x",
",",
"0",
")",
"return",
"tf",
".",
"maximum",
"(",
"x_is_above_0",
",",
"x_is_below_0",
",",
"name",
"=",
"name_scope",
")"
] | :func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
This function push further the logic by adding `leaky` behaviour both below zero and above six.
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha_low : float
Slope for x < 0: ``f(x) = alpha_low * x``.
alpha_high : float
Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__ | [
":",
"func",
":",
"leaky_twice_relu6",
"can",
"be",
"used",
"through",
"its",
"shortcut",
":",
":",
"func",
":",
":",
"func",
":",
"tl",
".",
"act",
".",
"ltrelu6",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/activation.py#L137-L192 | valid |
tensorlayer/tensorlayer | tensorlayer/activation.py | swish | def swish(x, name='swish'):
"""Swish function.
See `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`__.
Parameters
----------
x : Tensor
input.
name: str
function name (optional).
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
"""
with tf.name_scope(name):
x = tf.nn.sigmoid(x) * x
return x | python | def swish(x, name='swish'):
"""Swish function.
See `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`__.
Parameters
----------
x : Tensor
input.
name: str
function name (optional).
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
"""
with tf.name_scope(name):
x = tf.nn.sigmoid(x) * x
return x | [
"def",
"swish",
"(",
"x",
",",
"name",
"=",
"'swish'",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"x",
"=",
"tf",
".",
"nn",
".",
"sigmoid",
"(",
"x",
")",
"*",
"x",
"return",
"x"
] | Swish function.
See `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`__.
Parameters
----------
x : Tensor
input.
name: str
function name (optional).
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``. | [
"Swish",
"function",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/activation.py#L195-L215 | valid |
tensorlayer/tensorlayer | tensorlayer/activation.py | pixel_wise_softmax | def pixel_wise_softmax(x, name='pixel_wise_softmax'):
"""Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.
Usually be used for image segmentation.
Parameters
----------
x : Tensor
input.
- For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2.
- For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2.
name : str
function name (optional)
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
Examples
--------
>>> outputs = pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)
References
----------
- `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`__
"""
with tf.name_scope(name):
return tf.nn.softmax(x) | python | def pixel_wise_softmax(x, name='pixel_wise_softmax'):
"""Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.
Usually be used for image segmentation.
Parameters
----------
x : Tensor
input.
- For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2.
- For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2.
name : str
function name (optional)
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
Examples
--------
>>> outputs = pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)
References
----------
- `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`__
"""
with tf.name_scope(name):
return tf.nn.softmax(x) | [
"def",
"pixel_wise_softmax",
"(",
"x",
",",
"name",
"=",
"'pixel_wise_softmax'",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"return",
"tf",
".",
"nn",
".",
"softmax",
"(",
"x",
")"
] | Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.
Usually be used for image segmentation.
Parameters
----------
x : Tensor
input.
- For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2.
- For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2.
name : str
function name (optional)
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
Examples
--------
>>> outputs = pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)
References
----------
- `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`__ | [
"Return",
"the",
"softmax",
"outputs",
"of",
"images",
"every",
"pixels",
"have",
"multiple",
"label",
"the",
"sum",
"of",
"a",
"pixel",
"is",
"1",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/activation.py#L303-L333 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/recurrent.py | _conv_linear | def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None):
"""convolution:
Parameters
----------
args : tensor
4D Tensor or a list of 4D, batch x n, Tensors.
filter_size : tuple of int
Filter height and width.
num_features : int
Nnumber of features.
bias_start : float
Starting value to initialize the bias; 0 by default.
scope : VariableScope
For the created subgraph; defaults to "Linear".
Returns
--------
- A 4D Tensor with shape [batch h w num_features]
Raises
-------
- ValueError : if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 4:
raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes))
if not shape[3]:
raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes))
else:
total_arg_size_depth += shape[3]
dtype = [a.dtype for a in args][0]
# Now the computation.
with tf.variable_scope(scope or "Conv"):
matrix = tf.get_variable(
"Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype
)
if len(args) == 1:
res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME')
else:
res = tf.nn.conv2d(tf.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME')
if not bias:
return res
bias_term = tf.get_variable(
"Bias", [num_features], dtype=dtype, initializer=tf.constant_initializer(bias_start, dtype=dtype)
)
return res + bias_term | python | def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None):
"""convolution:
Parameters
----------
args : tensor
4D Tensor or a list of 4D, batch x n, Tensors.
filter_size : tuple of int
Filter height and width.
num_features : int
Nnumber of features.
bias_start : float
Starting value to initialize the bias; 0 by default.
scope : VariableScope
For the created subgraph; defaults to "Linear".
Returns
--------
- A 4D Tensor with shape [batch h w num_features]
Raises
-------
- ValueError : if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 4:
raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes))
if not shape[3]:
raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes))
else:
total_arg_size_depth += shape[3]
dtype = [a.dtype for a in args][0]
# Now the computation.
with tf.variable_scope(scope or "Conv"):
matrix = tf.get_variable(
"Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype
)
if len(args) == 1:
res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME')
else:
res = tf.nn.conv2d(tf.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME')
if not bias:
return res
bias_term = tf.get_variable(
"Bias", [num_features], dtype=dtype, initializer=tf.constant_initializer(bias_start, dtype=dtype)
)
return res + bias_term | [
"def",
"_conv_linear",
"(",
"args",
",",
"filter_size",
",",
"num_features",
",",
"bias",
",",
"bias_start",
"=",
"0.0",
",",
"scope",
"=",
"None",
")",
":",
"# Calculate the total size of arguments on dimension 1.",
"total_arg_size_depth",
"=",
"0",
"shapes",
"=",
"[",
"a",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"for",
"a",
"in",
"args",
"]",
"for",
"shape",
"in",
"shapes",
":",
"if",
"len",
"(",
"shape",
")",
"!=",
"4",
":",
"raise",
"ValueError",
"(",
"\"Linear is expecting 4D arguments: %s\"",
"%",
"str",
"(",
"shapes",
")",
")",
"if",
"not",
"shape",
"[",
"3",
"]",
":",
"raise",
"ValueError",
"(",
"\"Linear expects shape[4] of arguments: %s\"",
"%",
"str",
"(",
"shapes",
")",
")",
"else",
":",
"total_arg_size_depth",
"+=",
"shape",
"[",
"3",
"]",
"dtype",
"=",
"[",
"a",
".",
"dtype",
"for",
"a",
"in",
"args",
"]",
"[",
"0",
"]",
"# Now the computation.",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
"or",
"\"Conv\"",
")",
":",
"matrix",
"=",
"tf",
".",
"get_variable",
"(",
"\"Matrix\"",
",",
"[",
"filter_size",
"[",
"0",
"]",
",",
"filter_size",
"[",
"1",
"]",
",",
"total_arg_size_depth",
",",
"num_features",
"]",
",",
"dtype",
"=",
"dtype",
")",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"res",
"=",
"tf",
".",
"nn",
".",
"conv2d",
"(",
"args",
"[",
"0",
"]",
",",
"matrix",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
")",
"else",
":",
"res",
"=",
"tf",
".",
"nn",
".",
"conv2d",
"(",
"tf",
".",
"concat",
"(",
"args",
",",
"3",
")",
",",
"matrix",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"'SAME'",
")",
"if",
"not",
"bias",
":",
"return",
"res",
"bias_term",
"=",
"tf",
".",
"get_variable",
"(",
"\"Bias\"",
",",
"[",
"num_features",
"]",
",",
"dtype",
"=",
"dtype",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"bias_start",
",",
"dtype",
"=",
"dtype",
")",
")",
"return",
"res",
"+",
"bias_term"
] | convolution:
Parameters
----------
args : tensor
4D Tensor or a list of 4D, batch x n, Tensors.
filter_size : tuple of int
Filter height and width.
num_features : int
Nnumber of features.
bias_start : float
Starting value to initialize the bias; 0 by default.
scope : VariableScope
For the created subgraph; defaults to "Linear".
Returns
--------
- A 4D Tensor with shape [batch h w num_features]
Raises
-------
- ValueError : if some of the arguments has unspecified or wrong shape. | [
"convolution",
":"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/recurrent.py#L596-L648 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/recurrent.py | advanced_indexing_op | def advanced_indexing_op(inputs, index):
"""Advanced Indexing for Sequences, returns the outputs by given sequence lengths.
When return the last output :class:`DynamicRNNLayer` uses it to get the last outputs with the sequence lengths.
Parameters
-----------
inputs : tensor for data
With shape of [batch_size, n_step(max), n_features]
index : tensor for indexing
Sequence length in Dynamic RNN. [batch_size]
Examples
---------
>>> import numpy as np
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> batch_size, max_length, n_features = 3, 5, 2
>>> z = np.random.uniform(low=-1, high=1, size=[batch_size, max_length, n_features]).astype(np.float32)
>>> b_z = tf.constant(z)
>>> sl = tf.placeholder(dtype=tf.int32, shape=[batch_size])
>>> o = advanced_indexing_op(b_z, sl)
>>>
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>>
>>> order = np.asarray([1,1,2])
>>> print("real",z[0][order[0]-1], z[1][order[1]-1], z[2][order[2]-1])
>>> y = sess.run([o], feed_dict={sl:order})
>>> print("given",order)
>>> print("out", y)
real [-0.93021595 0.53820813] [-0.92548317 -0.77135968] [ 0.89952248 0.19149846]
given [1 1 2]
out [array([[-0.93021595, 0.53820813],
[-0.92548317, -0.77135968],
[ 0.89952248, 0.19149846]], dtype=float32)]
References
-----------
- Modified from TFlearn (the original code is used for fixed length rnn), `references <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
"""
batch_size = tf.shape(inputs)[0]
# max_length = int(inputs.get_shape()[1]) # for fixed length rnn, length is given
max_length = tf.shape(inputs)[1] # for dynamic_rnn, length is unknown
dim_size = int(inputs.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (index - 1)
flat = tf.reshape(inputs, [-1, dim_size])
relevant = tf.gather(flat, index)
return relevant | python | def advanced_indexing_op(inputs, index):
"""Advanced Indexing for Sequences, returns the outputs by given sequence lengths.
When return the last output :class:`DynamicRNNLayer` uses it to get the last outputs with the sequence lengths.
Parameters
-----------
inputs : tensor for data
With shape of [batch_size, n_step(max), n_features]
index : tensor for indexing
Sequence length in Dynamic RNN. [batch_size]
Examples
---------
>>> import numpy as np
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> batch_size, max_length, n_features = 3, 5, 2
>>> z = np.random.uniform(low=-1, high=1, size=[batch_size, max_length, n_features]).astype(np.float32)
>>> b_z = tf.constant(z)
>>> sl = tf.placeholder(dtype=tf.int32, shape=[batch_size])
>>> o = advanced_indexing_op(b_z, sl)
>>>
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>>
>>> order = np.asarray([1,1,2])
>>> print("real",z[0][order[0]-1], z[1][order[1]-1], z[2][order[2]-1])
>>> y = sess.run([o], feed_dict={sl:order})
>>> print("given",order)
>>> print("out", y)
real [-0.93021595 0.53820813] [-0.92548317 -0.77135968] [ 0.89952248 0.19149846]
given [1 1 2]
out [array([[-0.93021595, 0.53820813],
[-0.92548317, -0.77135968],
[ 0.89952248, 0.19149846]], dtype=float32)]
References
-----------
- Modified from TFlearn (the original code is used for fixed length rnn), `references <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
"""
batch_size = tf.shape(inputs)[0]
# max_length = int(inputs.get_shape()[1]) # for fixed length rnn, length is given
max_length = tf.shape(inputs)[1] # for dynamic_rnn, length is unknown
dim_size = int(inputs.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (index - 1)
flat = tf.reshape(inputs, [-1, dim_size])
relevant = tf.gather(flat, index)
return relevant | [
"def",
"advanced_indexing_op",
"(",
"inputs",
",",
"index",
")",
":",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"0",
"]",
"# max_length = int(inputs.get_shape()[1]) # for fixed length rnn, length is given",
"max_length",
"=",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"1",
"]",
"# for dynamic_rnn, length is unknown",
"dim_size",
"=",
"int",
"(",
"inputs",
".",
"get_shape",
"(",
")",
"[",
"2",
"]",
")",
"index",
"=",
"tf",
".",
"range",
"(",
"0",
",",
"batch_size",
")",
"*",
"max_length",
"+",
"(",
"index",
"-",
"1",
")",
"flat",
"=",
"tf",
".",
"reshape",
"(",
"inputs",
",",
"[",
"-",
"1",
",",
"dim_size",
"]",
")",
"relevant",
"=",
"tf",
".",
"gather",
"(",
"flat",
",",
"index",
")",
"return",
"relevant"
] | Advanced Indexing for Sequences, returns the outputs by given sequence lengths.
When return the last output :class:`DynamicRNNLayer` uses it to get the last outputs with the sequence lengths.
Parameters
-----------
inputs : tensor for data
With shape of [batch_size, n_step(max), n_features]
index : tensor for indexing
Sequence length in Dynamic RNN. [batch_size]
Examples
---------
>>> import numpy as np
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> batch_size, max_length, n_features = 3, 5, 2
>>> z = np.random.uniform(low=-1, high=1, size=[batch_size, max_length, n_features]).astype(np.float32)
>>> b_z = tf.constant(z)
>>> sl = tf.placeholder(dtype=tf.int32, shape=[batch_size])
>>> o = advanced_indexing_op(b_z, sl)
>>>
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>>
>>> order = np.asarray([1,1,2])
>>> print("real",z[0][order[0]-1], z[1][order[1]-1], z[2][order[2]-1])
>>> y = sess.run([o], feed_dict={sl:order})
>>> print("given",order)
>>> print("out", y)
real [-0.93021595 0.53820813] [-0.92548317 -0.77135968] [ 0.89952248 0.19149846]
given [1 1 2]
out [array([[-0.93021595, 0.53820813],
[-0.92548317, -0.77135968],
[ 0.89952248, 0.19149846]], dtype=float32)]
References
-----------
- Modified from TFlearn (the original code is used for fixed length rnn), `references <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__. | [
"Advanced",
"Indexing",
"for",
"Sequences",
"returns",
"the",
"outputs",
"by",
"given",
"sequence",
"lengths",
".",
"When",
"return",
"the",
"last",
"output",
":",
"class",
":",
"DynamicRNNLayer",
"uses",
"it",
"to",
"get",
"the",
"last",
"outputs",
"with",
"the",
"sequence",
"lengths",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/recurrent.py#L798-L846 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/recurrent.py | retrieve_seq_length_op | def retrieve_seq_length_op(data):
"""An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features],
it can be used when the features of padding (on right hand side) are all zeros.
Parameters
-----------
data : tensor
[batch_size, n_step(max), n_features] with zero padding on right hand side.
Examples
---------
>>> data = [[[1],[2],[0],[0],[0]],
... [[1],[2],[3],[0],[0]],
... [[1],[2],[6],[1],[0]]]
>>> data = np.asarray(data)
>>> print(data.shape)
(3, 5, 1)
>>> data = tf.constant(data)
>>> sl = retrieve_seq_length_op(data)
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> y = sl.eval()
[2 3 4]
Multiple features
>>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
... [[2,3],[2,4],[3,2],[0,0],[0,0]],
... [[3,3],[2,2],[5,3],[1,2],[0,0]]]
>>> print(sl)
[4 3 4]
References
------------
Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
"""
with tf.name_scope('GetLength'):
used = tf.sign(tf.reduce_max(tf.abs(data), 2))
length = tf.reduce_sum(used, 1)
return tf.cast(length, tf.int32) | python | def retrieve_seq_length_op(data):
"""An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features],
it can be used when the features of padding (on right hand side) are all zeros.
Parameters
-----------
data : tensor
[batch_size, n_step(max), n_features] with zero padding on right hand side.
Examples
---------
>>> data = [[[1],[2],[0],[0],[0]],
... [[1],[2],[3],[0],[0]],
... [[1],[2],[6],[1],[0]]]
>>> data = np.asarray(data)
>>> print(data.shape)
(3, 5, 1)
>>> data = tf.constant(data)
>>> sl = retrieve_seq_length_op(data)
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> y = sl.eval()
[2 3 4]
Multiple features
>>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
... [[2,3],[2,4],[3,2],[0,0],[0,0]],
... [[3,3],[2,2],[5,3],[1,2],[0,0]]]
>>> print(sl)
[4 3 4]
References
------------
Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
"""
with tf.name_scope('GetLength'):
used = tf.sign(tf.reduce_max(tf.abs(data), 2))
length = tf.reduce_sum(used, 1)
return tf.cast(length, tf.int32) | [
"def",
"retrieve_seq_length_op",
"(",
"data",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'GetLength'",
")",
":",
"used",
"=",
"tf",
".",
"sign",
"(",
"tf",
".",
"reduce_max",
"(",
"tf",
".",
"abs",
"(",
"data",
")",
",",
"2",
")",
")",
"length",
"=",
"tf",
".",
"reduce_sum",
"(",
"used",
",",
"1",
")",
"return",
"tf",
".",
"cast",
"(",
"length",
",",
"tf",
".",
"int32",
")"
] | An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features],
it can be used when the features of padding (on right hand side) are all zeros.
Parameters
-----------
data : tensor
[batch_size, n_step(max), n_features] with zero padding on right hand side.
Examples
---------
>>> data = [[[1],[2],[0],[0],[0]],
... [[1],[2],[3],[0],[0]],
... [[1],[2],[6],[1],[0]]]
>>> data = np.asarray(data)
>>> print(data.shape)
(3, 5, 1)
>>> data = tf.constant(data)
>>> sl = retrieve_seq_length_op(data)
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> y = sl.eval()
[2 3 4]
Multiple features
>>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
... [[2,3],[2,4],[3,2],[0,0],[0,0]],
... [[3,3],[2,2],[5,3],[1,2],[0,0]]]
>>> print(sl)
[4 3 4]
References
------------
Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__. | [
"An",
"op",
"to",
"compute",
"the",
"length",
"of",
"a",
"sequence",
"from",
"input",
"shape",
"of",
"[",
"batch_size",
"n_step",
"(",
"max",
")",
"n_features",
"]",
"it",
"can",
"be",
"used",
"when",
"the",
"features",
"of",
"padding",
"(",
"on",
"right",
"hand",
"side",
")",
"are",
"all",
"zeros",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/recurrent.py#L849-L889 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/recurrent.py | retrieve_seq_length_op2 | def retrieve_seq_length_op2(data):
"""An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)],
it can be used when the features of padding (on right hand side) are all zeros.
Parameters
-----------
data : tensor
[batch_size, n_step(max)] with zero padding on right hand side.
Examples
--------
>>> data = [[1,2,0,0,0],
... [1,2,3,0,0],
... [1,2,6,1,0]]
>>> o = retrieve_seq_length_op2(data)
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> print(o.eval())
[2 3 4]
"""
return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), 1) | python | def retrieve_seq_length_op2(data):
"""An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)],
it can be used when the features of padding (on right hand side) are all zeros.
Parameters
-----------
data : tensor
[batch_size, n_step(max)] with zero padding on right hand side.
Examples
--------
>>> data = [[1,2,0,0,0],
... [1,2,3,0,0],
... [1,2,6,1,0]]
>>> o = retrieve_seq_length_op2(data)
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> print(o.eval())
[2 3 4]
"""
return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), 1) | [
"def",
"retrieve_seq_length_op2",
"(",
"data",
")",
":",
"return",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"cast",
"(",
"tf",
".",
"greater",
"(",
"data",
",",
"tf",
".",
"zeros_like",
"(",
"data",
")",
")",
",",
"tf",
".",
"int32",
")",
",",
"1",
")"
] | An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)],
it can be used when the features of padding (on right hand side) are all zeros.
Parameters
-----------
data : tensor
[batch_size, n_step(max)] with zero padding on right hand side.
Examples
--------
>>> data = [[1,2,0,0,0],
... [1,2,3,0,0],
... [1,2,6,1,0]]
>>> o = retrieve_seq_length_op2(data)
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> print(o.eval())
[2 3 4] | [
"An",
"op",
"to",
"compute",
"the",
"length",
"of",
"a",
"sequence",
"from",
"input",
"shape",
"of",
"[",
"batch_size",
"n_step",
"(",
"max",
")",
"]",
"it",
"can",
"be",
"used",
"when",
"the",
"features",
"of",
"padding",
"(",
"on",
"right",
"hand",
"side",
")",
"are",
"all",
"zeros",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/recurrent.py#L892-L913 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/recurrent.py | retrieve_seq_length_op3 | def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string
"""Return tensor for sequence length, if input is ``tf.string``."""
data_shape_size = data.get_shape().ndims
if data_shape_size == 3:
return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1)
elif data_shape_size == 2:
return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1)
elif data_shape_size == 1:
raise ValueError("retrieve_seq_length_op3: data has wrong shape!")
else:
raise ValueError(
"retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size)
) | python | def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string
"""Return tensor for sequence length, if input is ``tf.string``."""
data_shape_size = data.get_shape().ndims
if data_shape_size == 3:
return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1)
elif data_shape_size == 2:
return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1)
elif data_shape_size == 1:
raise ValueError("retrieve_seq_length_op3: data has wrong shape!")
else:
raise ValueError(
"retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size)
) | [
"def",
"retrieve_seq_length_op3",
"(",
"data",
",",
"pad_val",
"=",
"0",
")",
":",
"# HangSheng: return tensor for sequence length, if input is tf.string",
"data_shape_size",
"=",
"data",
".",
"get_shape",
"(",
")",
".",
"ndims",
"if",
"data_shape_size",
"==",
"3",
":",
"return",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"cast",
"(",
"tf",
".",
"reduce_any",
"(",
"tf",
".",
"not_equal",
"(",
"data",
",",
"pad_val",
")",
",",
"axis",
"=",
"2",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"1",
")",
"elif",
"data_shape_size",
"==",
"2",
":",
"return",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"cast",
"(",
"tf",
".",
"not_equal",
"(",
"data",
",",
"pad_val",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"1",
")",
"elif",
"data_shape_size",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"retrieve_seq_length_op3: data has wrong shape!\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!\"",
"%",
"(",
"data_shape_size",
")",
")"
] | Return tensor for sequence length, if input is ``tf.string``. | [
"Return",
"tensor",
"for",
"sequence",
"length",
"if",
"input",
"is",
"tf",
".",
"string",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/recurrent.py#L916-L928 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/recurrent.py | ConvRNNCell.zero_state | def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
Returns:
tensor of shape '[batch_size x shape[0] x shape[1] x num_features]
filled with zeros
"""
shape = self.shape
num_features = self.num_features
# TODO : TypeError: 'NoneType' object is not subscriptable
zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype)
return zeros | python | def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
Returns:
tensor of shape '[batch_size x shape[0] x shape[1] x num_features]
filled with zeros
"""
shape = self.shape
num_features = self.num_features
# TODO : TypeError: 'NoneType' object is not subscriptable
zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype)
return zeros | [
"def",
"zero_state",
"(",
"self",
",",
"batch_size",
",",
"dtype",
"=",
"LayersConfig",
".",
"tf_dtype",
")",
":",
"shape",
"=",
"self",
".",
"shape",
"num_features",
"=",
"self",
".",
"num_features",
"# TODO : TypeError: 'NoneType' object is not subscriptable",
"zeros",
"=",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"shape",
"[",
"0",
"]",
",",
"shape",
"[",
"1",
"]",
",",
"num_features",
"*",
"2",
"]",
",",
"dtype",
"=",
"dtype",
")",
"return",
"zeros"
] | Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
Returns:
tensor of shape '[batch_size x shape[0] x shape[1] x num_features]
filled with zeros | [
"Return",
"zero",
"-",
"filled",
"state",
"tensor",
"(",
"s",
")",
".",
"Args",
":",
"batch_size",
":",
"int",
"float",
"or",
"unit",
"Tensor",
"representing",
"the",
"batch",
"size",
".",
"Returns",
":",
"tensor",
"of",
"shape",
"[",
"batch_size",
"x",
"shape",
"[",
"0",
"]",
"x",
"shape",
"[",
"1",
"]",
"x",
"num_features",
"]",
"filled",
"with",
"zeros"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/recurrent.py#L504-L517 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/recurrent.py | BasicConvLSTMCell.state_size | def state_size(self):
"""State size of the LSTMStateTuple."""
return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) | python | def state_size(self):
"""State size of the LSTMStateTuple."""
return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) | [
"def",
"state_size",
"(",
"self",
")",
":",
"return",
"(",
"LSTMStateTuple",
"(",
"self",
".",
"_num_units",
",",
"self",
".",
"_num_units",
")",
"if",
"self",
".",
"_state_is_tuple",
"else",
"2",
"*",
"self",
".",
"_num_units",
")"
] | State size of the LSTMStateTuple. | [
"State",
"size",
"of",
"the",
"LSTMStateTuple",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/recurrent.py#L561-L563 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/convolution/deformable_conv.py | DeformableConv2d._to_bc_h_w | def _to_bc_h_w(self, x, x_shape):
"""(b, h, w, c) -> (b*c, h, w)"""
x = tf.transpose(x, [0, 3, 1, 2])
x = tf.reshape(x, (-1, x_shape[1], x_shape[2]))
return x | python | def _to_bc_h_w(self, x, x_shape):
"""(b, h, w, c) -> (b*c, h, w)"""
x = tf.transpose(x, [0, 3, 1, 2])
x = tf.reshape(x, (-1, x_shape[1], x_shape[2]))
return x | [
"def",
"_to_bc_h_w",
"(",
"self",
",",
"x",
",",
"x_shape",
")",
":",
"x",
"=",
"tf",
".",
"transpose",
"(",
"x",
",",
"[",
"0",
",",
"3",
",",
"1",
",",
"2",
"]",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"(",
"-",
"1",
",",
"x_shape",
"[",
"1",
"]",
",",
"x_shape",
"[",
"2",
"]",
")",
")",
"return",
"x"
] | (b, h, w, c) -> (b*c, h, w) | [
"(",
"b",
"h",
"w",
"c",
")",
"-",
">",
"(",
"b",
"*",
"c",
"h",
"w",
")"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/convolution/deformable_conv.py#L158-L162 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/convolution/deformable_conv.py | DeformableConv2d._to_b_h_w_n_c | def _to_b_h_w_n_c(self, x, x_shape):
"""(b*c, h, w, n) -> (b, h, w, n, c)"""
x = tf.reshape(x, (-1, x_shape[4], x_shape[1], x_shape[2], x_shape[3]))
x = tf.transpose(x, [0, 2, 3, 4, 1])
return x | python | def _to_b_h_w_n_c(self, x, x_shape):
"""(b*c, h, w, n) -> (b, h, w, n, c)"""
x = tf.reshape(x, (-1, x_shape[4], x_shape[1], x_shape[2], x_shape[3]))
x = tf.transpose(x, [0, 2, 3, 4, 1])
return x | [
"def",
"_to_b_h_w_n_c",
"(",
"self",
",",
"x",
",",
"x_shape",
")",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"(",
"-",
"1",
",",
"x_shape",
"[",
"4",
"]",
",",
"x_shape",
"[",
"1",
"]",
",",
"x_shape",
"[",
"2",
"]",
",",
"x_shape",
"[",
"3",
"]",
")",
")",
"x",
"=",
"tf",
".",
"transpose",
"(",
"x",
",",
"[",
"0",
",",
"2",
",",
"3",
",",
"4",
",",
"1",
"]",
")",
"return",
"x"
] | (b*c, h, w, n) -> (b, h, w, n, c) | [
"(",
"b",
"*",
"c",
"h",
"w",
"n",
")",
"-",
">",
"(",
"b",
"h",
"w",
"n",
"c",
")"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/convolution/deformable_conv.py#L165-L169 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/convolution/deformable_conv.py | DeformableConv2d._tf_repeat | def _tf_repeat(self, a, repeats):
"""Tensorflow version of np.repeat for 1D"""
# https://github.com/tensorflow/tensorflow/issues/8521
if len(a.get_shape()) != 1:
raise AssertionError("This is not a 1D Tensor")
a = tf.expand_dims(a, -1)
a = tf.tile(a, [1, repeats])
a = self.tf_flatten(a)
return a | python | def _tf_repeat(self, a, repeats):
"""Tensorflow version of np.repeat for 1D"""
# https://github.com/tensorflow/tensorflow/issues/8521
if len(a.get_shape()) != 1:
raise AssertionError("This is not a 1D Tensor")
a = tf.expand_dims(a, -1)
a = tf.tile(a, [1, repeats])
a = self.tf_flatten(a)
return a | [
"def",
"_tf_repeat",
"(",
"self",
",",
"a",
",",
"repeats",
")",
":",
"# https://github.com/tensorflow/tensorflow/issues/8521",
"if",
"len",
"(",
"a",
".",
"get_shape",
"(",
")",
")",
"!=",
"1",
":",
"raise",
"AssertionError",
"(",
"\"This is not a 1D Tensor\"",
")",
"a",
"=",
"tf",
".",
"expand_dims",
"(",
"a",
",",
"-",
"1",
")",
"a",
"=",
"tf",
".",
"tile",
"(",
"a",
",",
"[",
"1",
",",
"repeats",
"]",
")",
"a",
"=",
"self",
".",
"tf_flatten",
"(",
"a",
")",
"return",
"a"
] | Tensorflow version of np.repeat for 1D | [
"Tensorflow",
"version",
"of",
"np",
".",
"repeat",
"for",
"1D"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/convolution/deformable_conv.py#L187-L197 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/convolution/deformable_conv.py | DeformableConv2d._tf_batch_map_coordinates | def _tf_batch_map_coordinates(self, inputs, coords):
"""Batch version of tf_map_coordinates
Only supports 2D feature maps
Parameters
----------
inputs : ``tf.Tensor``
shape = (b*c, h, w)
coords : ``tf.Tensor``
shape = (b*c, h, w, n, 2)
Returns
-------
``tf.Tensor``
A Tensor with the shape as (b*c, h, w, n)
"""
input_shape = inputs.get_shape()
coords_shape = coords.get_shape()
batch_channel = tf.shape(inputs)[0]
input_h = int(input_shape[1])
input_w = int(input_shape[2])
kernel_n = int(coords_shape[3])
n_coords = input_h * input_w * kernel_n
coords_lt = tf.cast(tf.floor(coords), 'int32')
coords_rb = tf.cast(tf.ceil(coords), 'int32')
coords_lb = tf.stack([coords_lt[:, :, :, :, 0], coords_rb[:, :, :, :, 1]], axis=-1)
coords_rt = tf.stack([coords_rb[:, :, :, :, 0], coords_lt[:, :, :, :, 1]], axis=-1)
idx = self._tf_repeat(tf.range(batch_channel), n_coords)
vals_lt = self._get_vals_by_coords(inputs, coords_lt, idx, (batch_channel, input_h, input_w, kernel_n))
vals_rb = self._get_vals_by_coords(inputs, coords_rb, idx, (batch_channel, input_h, input_w, kernel_n))
vals_lb = self._get_vals_by_coords(inputs, coords_lb, idx, (batch_channel, input_h, input_w, kernel_n))
vals_rt = self._get_vals_by_coords(inputs, coords_rt, idx, (batch_channel, input_h, input_w, kernel_n))
coords_offset_lt = coords - tf.cast(coords_lt, 'float32')
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, :, :, :, 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, :, :, :, 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, :, :, :, 1]
return mapped_vals | python | def _tf_batch_map_coordinates(self, inputs, coords):
"""Batch version of tf_map_coordinates
Only supports 2D feature maps
Parameters
----------
inputs : ``tf.Tensor``
shape = (b*c, h, w)
coords : ``tf.Tensor``
shape = (b*c, h, w, n, 2)
Returns
-------
``tf.Tensor``
A Tensor with the shape as (b*c, h, w, n)
"""
input_shape = inputs.get_shape()
coords_shape = coords.get_shape()
batch_channel = tf.shape(inputs)[0]
input_h = int(input_shape[1])
input_w = int(input_shape[2])
kernel_n = int(coords_shape[3])
n_coords = input_h * input_w * kernel_n
coords_lt = tf.cast(tf.floor(coords), 'int32')
coords_rb = tf.cast(tf.ceil(coords), 'int32')
coords_lb = tf.stack([coords_lt[:, :, :, :, 0], coords_rb[:, :, :, :, 1]], axis=-1)
coords_rt = tf.stack([coords_rb[:, :, :, :, 0], coords_lt[:, :, :, :, 1]], axis=-1)
idx = self._tf_repeat(tf.range(batch_channel), n_coords)
vals_lt = self._get_vals_by_coords(inputs, coords_lt, idx, (batch_channel, input_h, input_w, kernel_n))
vals_rb = self._get_vals_by_coords(inputs, coords_rb, idx, (batch_channel, input_h, input_w, kernel_n))
vals_lb = self._get_vals_by_coords(inputs, coords_lb, idx, (batch_channel, input_h, input_w, kernel_n))
vals_rt = self._get_vals_by_coords(inputs, coords_rt, idx, (batch_channel, input_h, input_w, kernel_n))
coords_offset_lt = coords - tf.cast(coords_lt, 'float32')
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, :, :, :, 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, :, :, :, 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, :, :, :, 1]
return mapped_vals | [
"def",
"_tf_batch_map_coordinates",
"(",
"self",
",",
"inputs",
",",
"coords",
")",
":",
"input_shape",
"=",
"inputs",
".",
"get_shape",
"(",
")",
"coords_shape",
"=",
"coords",
".",
"get_shape",
"(",
")",
"batch_channel",
"=",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"0",
"]",
"input_h",
"=",
"int",
"(",
"input_shape",
"[",
"1",
"]",
")",
"input_w",
"=",
"int",
"(",
"input_shape",
"[",
"2",
"]",
")",
"kernel_n",
"=",
"int",
"(",
"coords_shape",
"[",
"3",
"]",
")",
"n_coords",
"=",
"input_h",
"*",
"input_w",
"*",
"kernel_n",
"coords_lt",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"floor",
"(",
"coords",
")",
",",
"'int32'",
")",
"coords_rb",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"ceil",
"(",
"coords",
")",
",",
"'int32'",
")",
"coords_lb",
"=",
"tf",
".",
"stack",
"(",
"[",
"coords_lt",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"0",
"]",
",",
"coords_rb",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"1",
"]",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"coords_rt",
"=",
"tf",
".",
"stack",
"(",
"[",
"coords_rb",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"0",
"]",
",",
"coords_lt",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"1",
"]",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"idx",
"=",
"self",
".",
"_tf_repeat",
"(",
"tf",
".",
"range",
"(",
"batch_channel",
")",
",",
"n_coords",
")",
"vals_lt",
"=",
"self",
".",
"_get_vals_by_coords",
"(",
"inputs",
",",
"coords_lt",
",",
"idx",
",",
"(",
"batch_channel",
",",
"input_h",
",",
"input_w",
",",
"kernel_n",
")",
")",
"vals_rb",
"=",
"self",
".",
"_get_vals_by_coords",
"(",
"inputs",
",",
"coords_rb",
",",
"idx",
",",
"(",
"batch_channel",
",",
"input_h",
",",
"input_w",
",",
"kernel_n",
")",
")",
"vals_lb",
"=",
"self",
".",
"_get_vals_by_coords",
"(",
"inputs",
",",
"coords_lb",
",",
"idx",
",",
"(",
"batch_channel",
",",
"input_h",
",",
"input_w",
",",
"kernel_n",
")",
")",
"vals_rt",
"=",
"self",
".",
"_get_vals_by_coords",
"(",
"inputs",
",",
"coords_rt",
",",
"idx",
",",
"(",
"batch_channel",
",",
"input_h",
",",
"input_w",
",",
"kernel_n",
")",
")",
"coords_offset_lt",
"=",
"coords",
"-",
"tf",
".",
"cast",
"(",
"coords_lt",
",",
"'float32'",
")",
"vals_t",
"=",
"vals_lt",
"+",
"(",
"vals_rt",
"-",
"vals_lt",
")",
"*",
"coords_offset_lt",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"0",
"]",
"vals_b",
"=",
"vals_lb",
"+",
"(",
"vals_rb",
"-",
"vals_lb",
")",
"*",
"coords_offset_lt",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"0",
"]",
"mapped_vals",
"=",
"vals_t",
"+",
"(",
"vals_b",
"-",
"vals_t",
")",
"*",
"coords_offset_lt",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"1",
"]",
"return",
"mapped_vals"
] | Batch version of tf_map_coordinates
Only supports 2D feature maps
Parameters
----------
inputs : ``tf.Tensor``
shape = (b*c, h, w)
coords : ``tf.Tensor``
shape = (b*c, h, w, n, 2)
Returns
-------
``tf.Tensor``
A Tensor with the shape as (b*c, h, w, n) | [
"Batch",
"version",
"of",
"tf_map_coordinates"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/convolution/deformable_conv.py#L200-L244 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/convolution/deformable_conv.py | DeformableConv2d._tf_batch_map_offsets | def _tf_batch_map_offsets(self, inputs, offsets, grid_offset):
"""Batch map offsets into input
Parameters
------------
inputs : ``tf.Tensor``
shape = (b, h, w, c)
offsets: ``tf.Tensor``
shape = (b, h, w, 2*n)
grid_offset: `tf.Tensor``
Offset grids shape = (h, w, n, 2)
Returns
-------
``tf.Tensor``
A Tensor with the shape as (b, h, w, c)
"""
input_shape = inputs.get_shape()
batch_size = tf.shape(inputs)[0]
kernel_n = int(int(offsets.get_shape()[3]) / 2)
input_h = input_shape[1]
input_w = input_shape[2]
channel = input_shape[3]
# inputs (b, h, w, c) --> (b*c, h, w)
inputs = self._to_bc_h_w(inputs, input_shape)
# offsets (b, h, w, 2*n) --> (b, h, w, n, 2)
offsets = tf.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2))
# offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2)
# offsets = tf.tile(offsets, [channel, 1, 1, 1, 1])
coords = tf.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2)
coords = tf.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2)
# clip out of bound
coords = tf.stack(
[
tf.clip_by_value(coords[:, :, :, :, 0], 0.0, tf.cast(input_h - 1, 'float32')),
tf.clip_by_value(coords[:, :, :, :, 1], 0.0, tf.cast(input_w - 1, 'float32'))
], axis=-1
)
coords = tf.tile(coords, [channel, 1, 1, 1, 1])
mapped_vals = self._tf_batch_map_coordinates(inputs, coords)
# (b*c, h, w, n) --> (b, h, w, n, c)
mapped_vals = self._to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel])
return mapped_vals | python | def _tf_batch_map_offsets(self, inputs, offsets, grid_offset):
"""Batch map offsets into input
Parameters
------------
inputs : ``tf.Tensor``
shape = (b, h, w, c)
offsets: ``tf.Tensor``
shape = (b, h, w, 2*n)
grid_offset: `tf.Tensor``
Offset grids shape = (h, w, n, 2)
Returns
-------
``tf.Tensor``
A Tensor with the shape as (b, h, w, c)
"""
input_shape = inputs.get_shape()
batch_size = tf.shape(inputs)[0]
kernel_n = int(int(offsets.get_shape()[3]) / 2)
input_h = input_shape[1]
input_w = input_shape[2]
channel = input_shape[3]
# inputs (b, h, w, c) --> (b*c, h, w)
inputs = self._to_bc_h_w(inputs, input_shape)
# offsets (b, h, w, 2*n) --> (b, h, w, n, 2)
offsets = tf.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2))
# offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2)
# offsets = tf.tile(offsets, [channel, 1, 1, 1, 1])
coords = tf.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2)
coords = tf.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2)
# clip out of bound
coords = tf.stack(
[
tf.clip_by_value(coords[:, :, :, :, 0], 0.0, tf.cast(input_h - 1, 'float32')),
tf.clip_by_value(coords[:, :, :, :, 1], 0.0, tf.cast(input_w - 1, 'float32'))
], axis=-1
)
coords = tf.tile(coords, [channel, 1, 1, 1, 1])
mapped_vals = self._tf_batch_map_coordinates(inputs, coords)
# (b*c, h, w, n) --> (b, h, w, n, c)
mapped_vals = self._to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel])
return mapped_vals | [
"def",
"_tf_batch_map_offsets",
"(",
"self",
",",
"inputs",
",",
"offsets",
",",
"grid_offset",
")",
":",
"input_shape",
"=",
"inputs",
".",
"get_shape",
"(",
")",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"0",
"]",
"kernel_n",
"=",
"int",
"(",
"int",
"(",
"offsets",
".",
"get_shape",
"(",
")",
"[",
"3",
"]",
")",
"/",
"2",
")",
"input_h",
"=",
"input_shape",
"[",
"1",
"]",
"input_w",
"=",
"input_shape",
"[",
"2",
"]",
"channel",
"=",
"input_shape",
"[",
"3",
"]",
"# inputs (b, h, w, c) --> (b*c, h, w)",
"inputs",
"=",
"self",
".",
"_to_bc_h_w",
"(",
"inputs",
",",
"input_shape",
")",
"# offsets (b, h, w, 2*n) --> (b, h, w, n, 2)",
"offsets",
"=",
"tf",
".",
"reshape",
"(",
"offsets",
",",
"(",
"batch_size",
",",
"input_h",
",",
"input_w",
",",
"kernel_n",
",",
"2",
")",
")",
"# offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2)",
"# offsets = tf.tile(offsets, [channel, 1, 1, 1, 1])",
"coords",
"=",
"tf",
".",
"expand_dims",
"(",
"grid_offset",
",",
"0",
")",
"# grid_offset --> (1, h, w, n, 2)",
"coords",
"=",
"tf",
".",
"tile",
"(",
"coords",
",",
"[",
"batch_size",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"+",
"offsets",
"# grid_offset --> (b, h, w, n, 2)",
"# clip out of bound",
"coords",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"clip_by_value",
"(",
"coords",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"0",
"]",
",",
"0.0",
",",
"tf",
".",
"cast",
"(",
"input_h",
"-",
"1",
",",
"'float32'",
")",
")",
",",
"tf",
".",
"clip_by_value",
"(",
"coords",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"1",
"]",
",",
"0.0",
",",
"tf",
".",
"cast",
"(",
"input_w",
"-",
"1",
",",
"'float32'",
")",
")",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"coords",
"=",
"tf",
".",
"tile",
"(",
"coords",
",",
"[",
"channel",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"mapped_vals",
"=",
"self",
".",
"_tf_batch_map_coordinates",
"(",
"inputs",
",",
"coords",
")",
"# (b*c, h, w, n) --> (b, h, w, n, c)",
"mapped_vals",
"=",
"self",
".",
"_to_b_h_w_n_c",
"(",
"mapped_vals",
",",
"[",
"batch_size",
",",
"input_h",
",",
"input_w",
",",
"kernel_n",
",",
"channel",
"]",
")",
"return",
"mapped_vals"
] | Batch map offsets into input
Parameters
------------
inputs : ``tf.Tensor``
shape = (b, h, w, c)
offsets: ``tf.Tensor``
shape = (b, h, w, 2*n)
grid_offset: `tf.Tensor``
Offset grids shape = (h, w, n, 2)
Returns
-------
``tf.Tensor``
A Tensor with the shape as (b, h, w, c) | [
"Batch",
"map",
"offsets",
"into",
"input"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/convolution/deformable_conv.py#L247-L296 | valid |
tensorlayer/tensorlayer | tensorlayer/iterate.py | minibatches | def minibatches(inputs=None, targets=None, batch_size=None, allow_dynamic_batch_size=False, shuffle=False):
"""Generate a generator that input a group of example in numpy.array and
their labels, return the examples and labels by the given batch size.
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every row is a example.
batch_size : int
The batch size.
allow_dynamic_batch_size: boolean
Allow the use of the last data batch in case the number of examples is not a multiple of batch_size, this may result in unexpected behaviour if other functions expect a fixed-sized batch-size.
shuffle : boolean
Indicating whether to use a shuffling queue, shuffle the dataset before return.
Examples
--------
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.minibatches(inputs=X, targets=y, batch_size=2, shuffle=False):
>>> print(batch)
(array([['a', 'a'], ['b', 'b']], dtype='<U1'), array([0, 1]))
(array([['c', 'c'], ['d', 'd']], dtype='<U1'), array([2, 3]))
(array([['e', 'e'], ['f', 'f']], dtype='<U1'), array([4, 5]))
Notes
-----
If you have two inputs and one label and want to shuffle them together, e.g. X1 (1000, 100), X2 (1000, 80) and Y (1000, 1), you can stack them together (`np.hstack((X1, X2))`)
into (1000, 180) and feed to ``inputs``. After getting a batch, you can split it back into X1 and X2.
"""
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
# for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
# chulei: handling the case where the number of samples is not a multiple of batch_size, avoiding wasting samples
for start_idx in range(0, len(inputs), batch_size):
end_idx = start_idx + batch_size
if end_idx > len(inputs):
if allow_dynamic_batch_size:
end_idx = len(inputs)
else:
break
if shuffle:
excerpt = indices[start_idx:end_idx]
else:
excerpt = slice(start_idx, end_idx)
if (isinstance(inputs, list) or isinstance(targets, list)) and (shuffle ==True):
# zsdonghao: for list indexing when shuffle==True
yield [inputs[i] for i in excerpt], [targets[i] for i in excerpt]
else:
yield inputs[excerpt], targets[excerpt] | python | def minibatches(inputs=None, targets=None, batch_size=None, allow_dynamic_batch_size=False, shuffle=False):
"""Generate a generator that input a group of example in numpy.array and
their labels, return the examples and labels by the given batch size.
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every row is a example.
batch_size : int
The batch size.
allow_dynamic_batch_size: boolean
Allow the use of the last data batch in case the number of examples is not a multiple of batch_size, this may result in unexpected behaviour if other functions expect a fixed-sized batch-size.
shuffle : boolean
Indicating whether to use a shuffling queue, shuffle the dataset before return.
Examples
--------
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.minibatches(inputs=X, targets=y, batch_size=2, shuffle=False):
>>> print(batch)
(array([['a', 'a'], ['b', 'b']], dtype='<U1'), array([0, 1]))
(array([['c', 'c'], ['d', 'd']], dtype='<U1'), array([2, 3]))
(array([['e', 'e'], ['f', 'f']], dtype='<U1'), array([4, 5]))
Notes
-----
If you have two inputs and one label and want to shuffle them together, e.g. X1 (1000, 100), X2 (1000, 80) and Y (1000, 1), you can stack them together (`np.hstack((X1, X2))`)
into (1000, 180) and feed to ``inputs``. After getting a batch, you can split it back into X1 and X2.
"""
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
# for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
# chulei: handling the case where the number of samples is not a multiple of batch_size, avoiding wasting samples
for start_idx in range(0, len(inputs), batch_size):
end_idx = start_idx + batch_size
if end_idx > len(inputs):
if allow_dynamic_batch_size:
end_idx = len(inputs)
else:
break
if shuffle:
excerpt = indices[start_idx:end_idx]
else:
excerpt = slice(start_idx, end_idx)
if (isinstance(inputs, list) or isinstance(targets, list)) and (shuffle ==True):
# zsdonghao: for list indexing when shuffle==True
yield [inputs[i] for i in excerpt], [targets[i] for i in excerpt]
else:
yield inputs[excerpt], targets[excerpt] | [
"def",
"minibatches",
"(",
"inputs",
"=",
"None",
",",
"targets",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"allow_dynamic_batch_size",
"=",
"False",
",",
"shuffle",
"=",
"False",
")",
":",
"if",
"len",
"(",
"inputs",
")",
"!=",
"len",
"(",
"targets",
")",
":",
"raise",
"AssertionError",
"(",
"\"The length of inputs and targets should be equal\"",
")",
"if",
"shuffle",
":",
"indices",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"inputs",
")",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"indices",
")",
"# for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):",
"# chulei: handling the case where the number of samples is not a multiple of batch_size, avoiding wasting samples",
"for",
"start_idx",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"inputs",
")",
",",
"batch_size",
")",
":",
"end_idx",
"=",
"start_idx",
"+",
"batch_size",
"if",
"end_idx",
">",
"len",
"(",
"inputs",
")",
":",
"if",
"allow_dynamic_batch_size",
":",
"end_idx",
"=",
"len",
"(",
"inputs",
")",
"else",
":",
"break",
"if",
"shuffle",
":",
"excerpt",
"=",
"indices",
"[",
"start_idx",
":",
"end_idx",
"]",
"else",
":",
"excerpt",
"=",
"slice",
"(",
"start_idx",
",",
"end_idx",
")",
"if",
"(",
"isinstance",
"(",
"inputs",
",",
"list",
")",
"or",
"isinstance",
"(",
"targets",
",",
"list",
")",
")",
"and",
"(",
"shuffle",
"==",
"True",
")",
":",
"# zsdonghao: for list indexing when shuffle==True",
"yield",
"[",
"inputs",
"[",
"i",
"]",
"for",
"i",
"in",
"excerpt",
"]",
",",
"[",
"targets",
"[",
"i",
"]",
"for",
"i",
"in",
"excerpt",
"]",
"else",
":",
"yield",
"inputs",
"[",
"excerpt",
"]",
",",
"targets",
"[",
"excerpt",
"]"
] | Generate a generator that input a group of example in numpy.array and
their labels, return the examples and labels by the given batch size.
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every row is a example.
batch_size : int
The batch size.
allow_dynamic_batch_size: boolean
Allow the use of the last data batch in case the number of examples is not a multiple of batch_size, this may result in unexpected behaviour if other functions expect a fixed-sized batch-size.
shuffle : boolean
Indicating whether to use a shuffling queue, shuffle the dataset before return.
Examples
--------
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.minibatches(inputs=X, targets=y, batch_size=2, shuffle=False):
>>> print(batch)
(array([['a', 'a'], ['b', 'b']], dtype='<U1'), array([0, 1]))
(array([['c', 'c'], ['d', 'd']], dtype='<U1'), array([2, 3]))
(array([['e', 'e'], ['f', 'f']], dtype='<U1'), array([4, 5]))
Notes
-----
If you have two inputs and one label and want to shuffle them together, e.g. X1 (1000, 100), X2 (1000, 80) and Y (1000, 1), you can stack them together (`np.hstack((X1, X2))`)
into (1000, 180) and feed to ``inputs``. After getting a batch, you can split it back into X1 and X2. | [
"Generate",
"a",
"generator",
"that",
"input",
"a",
"group",
"of",
"example",
"in",
"numpy",
".",
"array",
"and",
"their",
"labels",
"return",
"the",
"examples",
"and",
"labels",
"by",
"the",
"given",
"batch",
"size",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/iterate.py#L15-L72 | valid |
tensorlayer/tensorlayer | tensorlayer/iterate.py | seq_minibatches | def seq_minibatches(inputs, targets, batch_size, seq_length, stride=1):
"""Generate a generator that return a batch of sequence inputs and targets.
If `batch_size=100` and `seq_length=5`, one return will have 500 rows (examples).
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every element is a example.
batch_size : int
The batch size.
seq_length : int
The sequence length.
stride : int
The stride step, default is 1.
Examples
--------
Synced sequence input and output.
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0, 1, 2, 3, 4, 5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=y, batch_size=2, seq_length=2, stride=1):
>>> print(batch)
(array([['a', 'a'], ['b', 'b'], ['b', 'b'], ['c', 'c']], dtype='<U1'), array([0, 1, 1, 2]))
(array([['c', 'c'], ['d', 'd'], ['d', 'd'], ['e', 'e']], dtype='<U1'), array([2, 3, 3, 4]))
Many to One
>>> return_last = True
>>> num_steps = 2
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> Y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=Y, batch_size=2, seq_length=num_steps, stride=1):
>>> x, y = batch
>>> if return_last:
>>> tmp_y = y.reshape((-1, num_steps) + y.shape[1:])
>>> y = tmp_y[:, -1]
>>> print(x, y)
[['a' 'a']
['b' 'b']
['b' 'b']
['c' 'c']] [1 2]
[['c' 'c']
['d' 'd']
['d' 'd']
['e' 'e']] [3 4]
"""
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
n_loads = (batch_size * stride) + (seq_length - stride)
for start_idx in range(0, len(inputs) - n_loads + 1, (batch_size * stride)):
seq_inputs = np.zeros((batch_size, seq_length) + inputs.shape[1:], dtype=inputs.dtype)
seq_targets = np.zeros((batch_size, seq_length) + targets.shape[1:], dtype=targets.dtype)
for b_idx in xrange(batch_size):
start_seq_idx = start_idx + (b_idx * stride)
end_seq_idx = start_seq_idx + seq_length
seq_inputs[b_idx] = inputs[start_seq_idx:end_seq_idx]
seq_targets[b_idx] = targets[start_seq_idx:end_seq_idx]
flatten_inputs = seq_inputs.reshape((-1, ) + inputs.shape[1:])
flatten_targets = seq_targets.reshape((-1, ) + targets.shape[1:])
yield flatten_inputs, flatten_targets | python | def seq_minibatches(inputs, targets, batch_size, seq_length, stride=1):
"""Generate a generator that return a batch of sequence inputs and targets.
If `batch_size=100` and `seq_length=5`, one return will have 500 rows (examples).
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every element is a example.
batch_size : int
The batch size.
seq_length : int
The sequence length.
stride : int
The stride step, default is 1.
Examples
--------
Synced sequence input and output.
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0, 1, 2, 3, 4, 5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=y, batch_size=2, seq_length=2, stride=1):
>>> print(batch)
(array([['a', 'a'], ['b', 'b'], ['b', 'b'], ['c', 'c']], dtype='<U1'), array([0, 1, 1, 2]))
(array([['c', 'c'], ['d', 'd'], ['d', 'd'], ['e', 'e']], dtype='<U1'), array([2, 3, 3, 4]))
Many to One
>>> return_last = True
>>> num_steps = 2
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> Y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=Y, batch_size=2, seq_length=num_steps, stride=1):
>>> x, y = batch
>>> if return_last:
>>> tmp_y = y.reshape((-1, num_steps) + y.shape[1:])
>>> y = tmp_y[:, -1]
>>> print(x, y)
[['a' 'a']
['b' 'b']
['b' 'b']
['c' 'c']] [1 2]
[['c' 'c']
['d' 'd']
['d' 'd']
['e' 'e']] [3 4]
"""
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
n_loads = (batch_size * stride) + (seq_length - stride)
for start_idx in range(0, len(inputs) - n_loads + 1, (batch_size * stride)):
seq_inputs = np.zeros((batch_size, seq_length) + inputs.shape[1:], dtype=inputs.dtype)
seq_targets = np.zeros((batch_size, seq_length) + targets.shape[1:], dtype=targets.dtype)
for b_idx in xrange(batch_size):
start_seq_idx = start_idx + (b_idx * stride)
end_seq_idx = start_seq_idx + seq_length
seq_inputs[b_idx] = inputs[start_seq_idx:end_seq_idx]
seq_targets[b_idx] = targets[start_seq_idx:end_seq_idx]
flatten_inputs = seq_inputs.reshape((-1, ) + inputs.shape[1:])
flatten_targets = seq_targets.reshape((-1, ) + targets.shape[1:])
yield flatten_inputs, flatten_targets | [
"def",
"seq_minibatches",
"(",
"inputs",
",",
"targets",
",",
"batch_size",
",",
"seq_length",
",",
"stride",
"=",
"1",
")",
":",
"if",
"len",
"(",
"inputs",
")",
"!=",
"len",
"(",
"targets",
")",
":",
"raise",
"AssertionError",
"(",
"\"The length of inputs and targets should be equal\"",
")",
"n_loads",
"=",
"(",
"batch_size",
"*",
"stride",
")",
"+",
"(",
"seq_length",
"-",
"stride",
")",
"for",
"start_idx",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"inputs",
")",
"-",
"n_loads",
"+",
"1",
",",
"(",
"batch_size",
"*",
"stride",
")",
")",
":",
"seq_inputs",
"=",
"np",
".",
"zeros",
"(",
"(",
"batch_size",
",",
"seq_length",
")",
"+",
"inputs",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"inputs",
".",
"dtype",
")",
"seq_targets",
"=",
"np",
".",
"zeros",
"(",
"(",
"batch_size",
",",
"seq_length",
")",
"+",
"targets",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"targets",
".",
"dtype",
")",
"for",
"b_idx",
"in",
"xrange",
"(",
"batch_size",
")",
":",
"start_seq_idx",
"=",
"start_idx",
"+",
"(",
"b_idx",
"*",
"stride",
")",
"end_seq_idx",
"=",
"start_seq_idx",
"+",
"seq_length",
"seq_inputs",
"[",
"b_idx",
"]",
"=",
"inputs",
"[",
"start_seq_idx",
":",
"end_seq_idx",
"]",
"seq_targets",
"[",
"b_idx",
"]",
"=",
"targets",
"[",
"start_seq_idx",
":",
"end_seq_idx",
"]",
"flatten_inputs",
"=",
"seq_inputs",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
"+",
"inputs",
".",
"shape",
"[",
"1",
":",
"]",
")",
"flatten_targets",
"=",
"seq_targets",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
"+",
"targets",
".",
"shape",
"[",
"1",
":",
"]",
")",
"yield",
"flatten_inputs",
",",
"flatten_targets"
] | Generate a generator that return a batch of sequence inputs and targets.
If `batch_size=100` and `seq_length=5`, one return will have 500 rows (examples).
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every element is a example.
batch_size : int
The batch size.
seq_length : int
The sequence length.
stride : int
The stride step, default is 1.
Examples
--------
Synced sequence input and output.
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0, 1, 2, 3, 4, 5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=y, batch_size=2, seq_length=2, stride=1):
>>> print(batch)
(array([['a', 'a'], ['b', 'b'], ['b', 'b'], ['c', 'c']], dtype='<U1'), array([0, 1, 1, 2]))
(array([['c', 'c'], ['d', 'd'], ['d', 'd'], ['e', 'e']], dtype='<U1'), array([2, 3, 3, 4]))
Many to One
>>> return_last = True
>>> num_steps = 2
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> Y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=Y, batch_size=2, seq_length=num_steps, stride=1):
>>> x, y = batch
>>> if return_last:
>>> tmp_y = y.reshape((-1, num_steps) + y.shape[1:])
>>> y = tmp_y[:, -1]
>>> print(x, y)
[['a' 'a']
['b' 'b']
['b' 'b']
['c' 'c']] [1 2]
[['c' 'c']
['d' 'd']
['d' 'd']
['e' 'e']] [3 4] | [
"Generate",
"a",
"generator",
"that",
"return",
"a",
"batch",
"of",
"sequence",
"inputs",
"and",
"targets",
".",
"If",
"batch_size",
"=",
"100",
"and",
"seq_length",
"=",
"5",
"one",
"return",
"will",
"have",
"500",
"rows",
"(",
"examples",
")",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/iterate.py#L75-L140 | valid |
tensorlayer/tensorlayer | tensorlayer/iterate.py | seq_minibatches2 | def seq_minibatches2(inputs, targets, batch_size, num_steps):
"""Generate a generator that iterates on two list of words. Yields (Returns) the source contexts and
the target context by the given batch_size and num_steps (sequence_length).
In TensorFlow's tutorial, this generates the `batch_size` pointers into the raw PTB data, and allows minibatch iteration along these pointers.
Parameters
----------
inputs : list of data
The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.
targets : list of data
The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.
batch_size : int
The batch size.
num_steps : int
The number of unrolls. i.e. sequence length
Yields
------
Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
Raises
------
ValueError : if batch_size or num_steps are too high.
Examples
--------
>>> X = [i for i in range(20)]
>>> Y = [i for i in range(20,40)]
>>> for batch in tl.iterate.seq_minibatches2(X, Y, batch_size=2, num_steps=3):
... x, y = batch
... print(x, y)
[[ 0. 1. 2.]
[ 10. 11. 12.]]
[[ 20. 21. 22.]
[ 30. 31. 32.]]
[[ 3. 4. 5.]
[ 13. 14. 15.]]
[[ 23. 24. 25.]
[ 33. 34. 35.]]
[[ 6. 7. 8.]
[ 16. 17. 18.]]
[[ 26. 27. 28.]
[ 36. 37. 38.]]
Notes
-----
- Hint, if the input data are images, you can modify the source code `data = np.zeros([batch_size, batch_len)` to `data = np.zeros([batch_size, batch_len, inputs.shape[1], inputs.shape[2], inputs.shape[3]])`.
"""
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
data_len = len(inputs)
batch_len = data_len // batch_size
# data = np.zeros([batch_size, batch_len])
data = np.zeros((batch_size, batch_len) + inputs.shape[1:], dtype=inputs.dtype)
data2 = np.zeros([batch_size, batch_len])
for i in range(batch_size):
data[i] = inputs[batch_len * i:batch_len * (i + 1)]
data2[i] = targets[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
x2 = data2[:, i * num_steps:(i + 1) * num_steps]
yield (x, x2) | python | def seq_minibatches2(inputs, targets, batch_size, num_steps):
"""Generate a generator that iterates on two list of words. Yields (Returns) the source contexts and
the target context by the given batch_size and num_steps (sequence_length).
In TensorFlow's tutorial, this generates the `batch_size` pointers into the raw PTB data, and allows minibatch iteration along these pointers.
Parameters
----------
inputs : list of data
The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.
targets : list of data
The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.
batch_size : int
The batch size.
num_steps : int
The number of unrolls. i.e. sequence length
Yields
------
Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
Raises
------
ValueError : if batch_size or num_steps are too high.
Examples
--------
>>> X = [i for i in range(20)]
>>> Y = [i for i in range(20,40)]
>>> for batch in tl.iterate.seq_minibatches2(X, Y, batch_size=2, num_steps=3):
... x, y = batch
... print(x, y)
[[ 0. 1. 2.]
[ 10. 11. 12.]]
[[ 20. 21. 22.]
[ 30. 31. 32.]]
[[ 3. 4. 5.]
[ 13. 14. 15.]]
[[ 23. 24. 25.]
[ 33. 34. 35.]]
[[ 6. 7. 8.]
[ 16. 17. 18.]]
[[ 26. 27. 28.]
[ 36. 37. 38.]]
Notes
-----
- Hint, if the input data are images, you can modify the source code `data = np.zeros([batch_size, batch_len)` to `data = np.zeros([batch_size, batch_len, inputs.shape[1], inputs.shape[2], inputs.shape[3]])`.
"""
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
data_len = len(inputs)
batch_len = data_len // batch_size
# data = np.zeros([batch_size, batch_len])
data = np.zeros((batch_size, batch_len) + inputs.shape[1:], dtype=inputs.dtype)
data2 = np.zeros([batch_size, batch_len])
for i in range(batch_size):
data[i] = inputs[batch_len * i:batch_len * (i + 1)]
data2[i] = targets[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
x2 = data2[:, i * num_steps:(i + 1) * num_steps]
yield (x, x2) | [
"def",
"seq_minibatches2",
"(",
"inputs",
",",
"targets",
",",
"batch_size",
",",
"num_steps",
")",
":",
"if",
"len",
"(",
"inputs",
")",
"!=",
"len",
"(",
"targets",
")",
":",
"raise",
"AssertionError",
"(",
"\"The length of inputs and targets should be equal\"",
")",
"data_len",
"=",
"len",
"(",
"inputs",
")",
"batch_len",
"=",
"data_len",
"//",
"batch_size",
"# data = np.zeros([batch_size, batch_len])",
"data",
"=",
"np",
".",
"zeros",
"(",
"(",
"batch_size",
",",
"batch_len",
")",
"+",
"inputs",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"inputs",
".",
"dtype",
")",
"data2",
"=",
"np",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"batch_len",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"batch_size",
")",
":",
"data",
"[",
"i",
"]",
"=",
"inputs",
"[",
"batch_len",
"*",
"i",
":",
"batch_len",
"*",
"(",
"i",
"+",
"1",
")",
"]",
"data2",
"[",
"i",
"]",
"=",
"targets",
"[",
"batch_len",
"*",
"i",
":",
"batch_len",
"*",
"(",
"i",
"+",
"1",
")",
"]",
"epoch_size",
"=",
"(",
"batch_len",
"-",
"1",
")",
"//",
"num_steps",
"if",
"epoch_size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"epoch_size == 0, decrease batch_size or num_steps\"",
")",
"for",
"i",
"in",
"range",
"(",
"epoch_size",
")",
":",
"x",
"=",
"data",
"[",
":",
",",
"i",
"*",
"num_steps",
":",
"(",
"i",
"+",
"1",
")",
"*",
"num_steps",
"]",
"x2",
"=",
"data2",
"[",
":",
",",
"i",
"*",
"num_steps",
":",
"(",
"i",
"+",
"1",
")",
"*",
"num_steps",
"]",
"yield",
"(",
"x",
",",
"x2",
")"
] | Generate a generator that iterates on two list of words. Yields (Returns) the source contexts and
the target context by the given batch_size and num_steps (sequence_length).
In TensorFlow's tutorial, this generates the `batch_size` pointers into the raw PTB data, and allows minibatch iteration along these pointers.
Parameters
----------
inputs : list of data
The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.
targets : list of data
The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.
batch_size : int
The batch size.
num_steps : int
The number of unrolls. i.e. sequence length
Yields
------
Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
Raises
------
ValueError : if batch_size or num_steps are too high.
Examples
--------
>>> X = [i for i in range(20)]
>>> Y = [i for i in range(20,40)]
>>> for batch in tl.iterate.seq_minibatches2(X, Y, batch_size=2, num_steps=3):
... x, y = batch
... print(x, y)
[[ 0. 1. 2.]
[ 10. 11. 12.]]
[[ 20. 21. 22.]
[ 30. 31. 32.]]
[[ 3. 4. 5.]
[ 13. 14. 15.]]
[[ 23. 24. 25.]
[ 33. 34. 35.]]
[[ 6. 7. 8.]
[ 16. 17. 18.]]
[[ 26. 27. 28.]
[ 36. 37. 38.]]
Notes
-----
- Hint, if the input data are images, you can modify the source code `data = np.zeros([batch_size, batch_len)` to `data = np.zeros([batch_size, batch_len, inputs.shape[1], inputs.shape[2], inputs.shape[3]])`. | [
"Generate",
"a",
"generator",
"that",
"iterates",
"on",
"two",
"list",
"of",
"words",
".",
"Yields",
"(",
"Returns",
")",
"the",
"source",
"contexts",
"and",
"the",
"target",
"context",
"by",
"the",
"given",
"batch_size",
"and",
"num_steps",
"(",
"sequence_length",
")",
".",
"In",
"TensorFlow",
"s",
"tutorial",
"this",
"generates",
"the",
"batch_size",
"pointers",
"into",
"the",
"raw",
"PTB",
"data",
"and",
"allows",
"minibatch",
"iteration",
"along",
"these",
"pointers",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/iterate.py#L143-L215 | valid |
tensorlayer/tensorlayer | tensorlayer/iterate.py | ptb_iterator | def ptb_iterator(raw_data, batch_size, num_steps):
"""Generate a generator that iterates on a list of words, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.
Yields the source contexts and the target context by the given batch_size and num_steps (sequence_length).
In TensorFlow's tutorial, this generates `batch_size` pointers into the raw
PTB data, and allows minibatch iteration along these pointers.
Parameters
----------
raw_data : a list
the context in list format; note that context usually be
represented by splitting by space, and then convert to unique
word IDs.
batch_size : int
the batch size.
num_steps : int
the number of unrolls. i.e. sequence_length
Yields
------
Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
The second element of the tuple is the same data time-shifted to the
right by one.
Raises
------
ValueError : if batch_size or num_steps are too high.
Examples
--------
>>> train_data = [i for i in range(20)]
>>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3):
>>> x, y = batch
>>> print(x, y)
[[ 0 1 2] <---x 1st subset/ iteration
[10 11 12]]
[[ 1 2 3] <---y
[11 12 13]]
[[ 3 4 5] <--- 1st batch input 2nd subset/ iteration
[13 14 15]] <--- 2nd batch input
[[ 4 5 6] <--- 1st batch target
[14 15 16]] <--- 2nd batch target
[[ 6 7 8] 3rd subset/ iteration
[16 17 18]]
[[ 7 8 9]
[17 18 19]]
"""
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1]
yield (x, y) | python | def ptb_iterator(raw_data, batch_size, num_steps):
"""Generate a generator that iterates on a list of words, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.
Yields the source contexts and the target context by the given batch_size and num_steps (sequence_length).
In TensorFlow's tutorial, this generates `batch_size` pointers into the raw
PTB data, and allows minibatch iteration along these pointers.
Parameters
----------
raw_data : a list
the context in list format; note that context usually be
represented by splitting by space, and then convert to unique
word IDs.
batch_size : int
the batch size.
num_steps : int
the number of unrolls. i.e. sequence_length
Yields
------
Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
The second element of the tuple is the same data time-shifted to the
right by one.
Raises
------
ValueError : if batch_size or num_steps are too high.
Examples
--------
>>> train_data = [i for i in range(20)]
>>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3):
>>> x, y = batch
>>> print(x, y)
[[ 0 1 2] <---x 1st subset/ iteration
[10 11 12]]
[[ 1 2 3] <---y
[11 12 13]]
[[ 3 4 5] <--- 1st batch input 2nd subset/ iteration
[13 14 15]] <--- 2nd batch input
[[ 4 5 6] <--- 1st batch target
[14 15 16]] <--- 2nd batch target
[[ 6 7 8] 3rd subset/ iteration
[16 17 18]]
[[ 7 8 9]
[17 18 19]]
"""
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1]
yield (x, y) | [
"def",
"ptb_iterator",
"(",
"raw_data",
",",
"batch_size",
",",
"num_steps",
")",
":",
"raw_data",
"=",
"np",
".",
"array",
"(",
"raw_data",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"data_len",
"=",
"len",
"(",
"raw_data",
")",
"batch_len",
"=",
"data_len",
"//",
"batch_size",
"data",
"=",
"np",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"batch_len",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"i",
"in",
"range",
"(",
"batch_size",
")",
":",
"data",
"[",
"i",
"]",
"=",
"raw_data",
"[",
"batch_len",
"*",
"i",
":",
"batch_len",
"*",
"(",
"i",
"+",
"1",
")",
"]",
"epoch_size",
"=",
"(",
"batch_len",
"-",
"1",
")",
"//",
"num_steps",
"if",
"epoch_size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"epoch_size == 0, decrease batch_size or num_steps\"",
")",
"for",
"i",
"in",
"range",
"(",
"epoch_size",
")",
":",
"x",
"=",
"data",
"[",
":",
",",
"i",
"*",
"num_steps",
":",
"(",
"i",
"+",
"1",
")",
"*",
"num_steps",
"]",
"y",
"=",
"data",
"[",
":",
",",
"i",
"*",
"num_steps",
"+",
"1",
":",
"(",
"i",
"+",
"1",
")",
"*",
"num_steps",
"+",
"1",
"]",
"yield",
"(",
"x",
",",
"y",
")"
] | Generate a generator that iterates on a list of words, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.
Yields the source contexts and the target context by the given batch_size and num_steps (sequence_length).
In TensorFlow's tutorial, this generates `batch_size` pointers into the raw
PTB data, and allows minibatch iteration along these pointers.
Parameters
----------
raw_data : a list
the context in list format; note that context usually be
represented by splitting by space, and then convert to unique
word IDs.
batch_size : int
the batch size.
num_steps : int
the number of unrolls. i.e. sequence_length
Yields
------
Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
The second element of the tuple is the same data time-shifted to the
right by one.
Raises
------
ValueError : if batch_size or num_steps are too high.
Examples
--------
>>> train_data = [i for i in range(20)]
>>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3):
>>> x, y = batch
>>> print(x, y)
[[ 0 1 2] <---x 1st subset/ iteration
[10 11 12]]
[[ 1 2 3] <---y
[11 12 13]]
[[ 3 4 5] <--- 1st batch input 2nd subset/ iteration
[13 14 15]] <--- 2nd batch input
[[ 4 5 6] <--- 1st batch target
[14 15 16]] <--- 2nd batch target
[[ 6 7 8] 3rd subset/ iteration
[16 17 18]]
[[ 7 8 9]
[17 18 19]] | [
"Generate",
"a",
"generator",
"that",
"iterates",
"on",
"a",
"list",
"of",
"words",
"see",
"PTB",
"example",
"<https",
":",
"//",
"github",
".",
"com",
"/",
"tensorlayer",
"/",
"tensorlayer",
"/",
"blob",
"/",
"master",
"/",
"example",
"/",
"tutorial_ptb_lstm_state_is_tuple",
".",
"py",
">",
"__",
".",
"Yields",
"the",
"source",
"contexts",
"and",
"the",
"target",
"context",
"by",
"the",
"given",
"batch_size",
"and",
"num_steps",
"(",
"sequence_length",
")",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/iterate.py#L218-L283 | valid |
tensorlayer/tensorlayer | tensorlayer/initializers.py | deconv2d_bilinear_upsampling_initializer | def deconv2d_bilinear_upsampling_initializer(shape):
"""Returns the initializer that can be passed to DeConv2dLayer for initializing the
weights in correspondence to channel-wise bilinear up-sampling.
Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211)
Parameters
----------
shape : tuple of int
The shape of the filters, [height, width, output_channels, in_channels].
It must match the shape passed to DeConv2dLayer.
Returns
-------
``tf.constant_initializer``
A constant initializer with weights set to correspond to per channel bilinear upsampling
when passed as W_int in DeConv2dLayer
Examples
--------
- Upsampling by a factor of 2, ie e.g 100->200
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> rescale_factor = 2
>>> imsize = 128
>>> num_channels = 3
>>> filter_shape = (5, 5)
>>> filter_size = (2 * rescale_factor - rescale_factor % 2) #Corresponding bilinear filter size
>>> num_in_channels = 3
>>> num_out_channels = 3
>>> deconv_filter_shape = (filter_size, filter_size, num_out_channels, num_in_channels)
>>> x = tf.placeholder(tf.float32, (1, imsize, imsize, num_channels))
>>> net = tl.layers.InputLayer(x, name='input_layer')
>>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape)
>>> net = tl.layers.DeConv2dLayer(net,
... shape=filter_shape,
... output_shape=(1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels),
... strides=(1, rescale_factor, rescale_factor, 1),
... W_init=bilinear_init,
... padding='SAME',
... act=None, name='g/h1/decon2d')
"""
if shape[0] != shape[1]:
raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes')
if shape[3] < shape[2]:
raise Exception(
'deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels '
)
filter_size = shape[0]
num_out_channels = shape[2]
num_in_channels = shape[3]
# Create bilinear filter kernel as numpy array
bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32)
scale_factor = (filter_size + 1) // 2
if filter_size % 2 == 1:
center = scale_factor - 1
else:
center = scale_factor - 0.5
for x in range(filter_size):
for y in range(filter_size):
bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * (1 - abs(y - center) / scale_factor)
weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels))
for i in range(num_out_channels):
weights[:, :, i, i] = bilinear_kernel
# assign numpy array to constant_initalizer and pass to get_variable
return tf.constant_initializer(value=weights, dtype=LayersConfig.tf_dtype) | python | def deconv2d_bilinear_upsampling_initializer(shape):
"""Returns the initializer that can be passed to DeConv2dLayer for initializing the
weights in correspondence to channel-wise bilinear up-sampling.
Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211)
Parameters
----------
shape : tuple of int
The shape of the filters, [height, width, output_channels, in_channels].
It must match the shape passed to DeConv2dLayer.
Returns
-------
``tf.constant_initializer``
A constant initializer with weights set to correspond to per channel bilinear upsampling
when passed as W_int in DeConv2dLayer
Examples
--------
- Upsampling by a factor of 2, ie e.g 100->200
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> rescale_factor = 2
>>> imsize = 128
>>> num_channels = 3
>>> filter_shape = (5, 5)
>>> filter_size = (2 * rescale_factor - rescale_factor % 2) #Corresponding bilinear filter size
>>> num_in_channels = 3
>>> num_out_channels = 3
>>> deconv_filter_shape = (filter_size, filter_size, num_out_channels, num_in_channels)
>>> x = tf.placeholder(tf.float32, (1, imsize, imsize, num_channels))
>>> net = tl.layers.InputLayer(x, name='input_layer')
>>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape)
>>> net = tl.layers.DeConv2dLayer(net,
... shape=filter_shape,
... output_shape=(1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels),
... strides=(1, rescale_factor, rescale_factor, 1),
... W_init=bilinear_init,
... padding='SAME',
... act=None, name='g/h1/decon2d')
"""
if shape[0] != shape[1]:
raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes')
if shape[3] < shape[2]:
raise Exception(
'deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels '
)
filter_size = shape[0]
num_out_channels = shape[2]
num_in_channels = shape[3]
# Create bilinear filter kernel as numpy array
bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32)
scale_factor = (filter_size + 1) // 2
if filter_size % 2 == 1:
center = scale_factor - 1
else:
center = scale_factor - 0.5
for x in range(filter_size):
for y in range(filter_size):
bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * (1 - abs(y - center) / scale_factor)
weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels))
for i in range(num_out_channels):
weights[:, :, i, i] = bilinear_kernel
# assign numpy array to constant_initalizer and pass to get_variable
return tf.constant_initializer(value=weights, dtype=LayersConfig.tf_dtype) | [
"def",
"deconv2d_bilinear_upsampling_initializer",
"(",
"shape",
")",
":",
"if",
"shape",
"[",
"0",
"]",
"!=",
"shape",
"[",
"1",
"]",
":",
"raise",
"Exception",
"(",
"'deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes'",
")",
"if",
"shape",
"[",
"3",
"]",
"<",
"shape",
"[",
"2",
"]",
":",
"raise",
"Exception",
"(",
"'deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels '",
")",
"filter_size",
"=",
"shape",
"[",
"0",
"]",
"num_out_channels",
"=",
"shape",
"[",
"2",
"]",
"num_in_channels",
"=",
"shape",
"[",
"3",
"]",
"# Create bilinear filter kernel as numpy array",
"bilinear_kernel",
"=",
"np",
".",
"zeros",
"(",
"[",
"filter_size",
",",
"filter_size",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"scale_factor",
"=",
"(",
"filter_size",
"+",
"1",
")",
"//",
"2",
"if",
"filter_size",
"%",
"2",
"==",
"1",
":",
"center",
"=",
"scale_factor",
"-",
"1",
"else",
":",
"center",
"=",
"scale_factor",
"-",
"0.5",
"for",
"x",
"in",
"range",
"(",
"filter_size",
")",
":",
"for",
"y",
"in",
"range",
"(",
"filter_size",
")",
":",
"bilinear_kernel",
"[",
"x",
",",
"y",
"]",
"=",
"(",
"1",
"-",
"abs",
"(",
"x",
"-",
"center",
")",
"/",
"scale_factor",
")",
"*",
"(",
"1",
"-",
"abs",
"(",
"y",
"-",
"center",
")",
"/",
"scale_factor",
")",
"weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"filter_size",
",",
"filter_size",
",",
"num_out_channels",
",",
"num_in_channels",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_out_channels",
")",
":",
"weights",
"[",
":",
",",
":",
",",
"i",
",",
"i",
"]",
"=",
"bilinear_kernel",
"# assign numpy array to constant_initalizer and pass to get_variable",
"return",
"tf",
".",
"constant_initializer",
"(",
"value",
"=",
"weights",
",",
"dtype",
"=",
"LayersConfig",
".",
"tf_dtype",
")"
] | Returns the initializer that can be passed to DeConv2dLayer for initializing the
weights in correspondence to channel-wise bilinear up-sampling.
Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211)
Parameters
----------
shape : tuple of int
The shape of the filters, [height, width, output_channels, in_channels].
It must match the shape passed to DeConv2dLayer.
Returns
-------
``tf.constant_initializer``
A constant initializer with weights set to correspond to per channel bilinear upsampling
when passed as W_int in DeConv2dLayer
Examples
--------
- Upsampling by a factor of 2, ie e.g 100->200
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> rescale_factor = 2
>>> imsize = 128
>>> num_channels = 3
>>> filter_shape = (5, 5)
>>> filter_size = (2 * rescale_factor - rescale_factor % 2) #Corresponding bilinear filter size
>>> num_in_channels = 3
>>> num_out_channels = 3
>>> deconv_filter_shape = (filter_size, filter_size, num_out_channels, num_in_channels)
>>> x = tf.placeholder(tf.float32, (1, imsize, imsize, num_channels))
>>> net = tl.layers.InputLayer(x, name='input_layer')
>>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape)
>>> net = tl.layers.DeConv2dLayer(net,
... shape=filter_shape,
... output_shape=(1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels),
... strides=(1, rescale_factor, rescale_factor, 1),
... W_init=bilinear_init,
... padding='SAME',
... act=None, name='g/h1/decon2d') | [
"Returns",
"the",
"initializer",
"that",
"can",
"be",
"passed",
"to",
"DeConv2dLayer",
"for",
"initializing",
"the",
"weights",
"in",
"correspondence",
"to",
"channel",
"-",
"wise",
"bilinear",
"up",
"-",
"sampling",
".",
"Used",
"in",
"segmentation",
"approaches",
"such",
"as",
"[",
"FCN",
"]",
"(",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1605",
".",
"06211",
")"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/initializers.py#L12-L81 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.save_model | def save_model(self, network=None, model_name='model', **kwargs):
"""Save model architecture and parameters into database, timestamp will be added automatically.
Parameters
----------
network : TensorLayer layer
TensorLayer layer instance.
model_name : str
The name/key of model.
kwargs : other events
Other events, such as name, accuracy, loss, step number and etc (optinal).
Examples
---------
Save model architecture and parameters into database.
>>> db.save_model(net, accuracy=0.8, loss=2.3, name='second_model')
Load one model with parameters from database (run this in other script)
>>> net = db.find_top_model(sess=sess, accuracy=0.8, loss=2.3)
Find and load the latest model.
>>> net = db.find_top_model(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> net = db.find_top_model(sess=sess, sort=[("time", -1)])
Find and load the oldest model.
>>> net = db.find_top_model(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> net = db.find_top_model(sess=sess, sort=[("time", 1)])
Get model information
>>> net._accuracy
... 0.8
Returns
---------
boolean : True for success, False for fail.
"""
kwargs.update({'model_name': model_name})
self._fill_project_info(kwargs) # put project_name into kwargs
params = network.get_all_params()
s = time.time()
kwargs.update({'architecture': network.all_graphs, 'time': datetime.utcnow()})
try:
params_id = self.model_fs.put(self._serialization(params))
kwargs.update({'params_id': params_id, 'time': datetime.utcnow()})
self.db.Model.insert_one(kwargs)
print("[Database] Save model: SUCCESS, took: {}s".format(round(time.time() - s, 2)))
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
print("[Database] Save model: FAIL")
return False | python | def save_model(self, network=None, model_name='model', **kwargs):
"""Save model architecture and parameters into database, timestamp will be added automatically.
Parameters
----------
network : TensorLayer layer
TensorLayer layer instance.
model_name : str
The name/key of model.
kwargs : other events
Other events, such as name, accuracy, loss, step number and etc (optinal).
Examples
---------
Save model architecture and parameters into database.
>>> db.save_model(net, accuracy=0.8, loss=2.3, name='second_model')
Load one model with parameters from database (run this in other script)
>>> net = db.find_top_model(sess=sess, accuracy=0.8, loss=2.3)
Find and load the latest model.
>>> net = db.find_top_model(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> net = db.find_top_model(sess=sess, sort=[("time", -1)])
Find and load the oldest model.
>>> net = db.find_top_model(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> net = db.find_top_model(sess=sess, sort=[("time", 1)])
Get model information
>>> net._accuracy
... 0.8
Returns
---------
boolean : True for success, False for fail.
"""
kwargs.update({'model_name': model_name})
self._fill_project_info(kwargs) # put project_name into kwargs
params = network.get_all_params()
s = time.time()
kwargs.update({'architecture': network.all_graphs, 'time': datetime.utcnow()})
try:
params_id = self.model_fs.put(self._serialization(params))
kwargs.update({'params_id': params_id, 'time': datetime.utcnow()})
self.db.Model.insert_one(kwargs)
print("[Database] Save model: SUCCESS, took: {}s".format(round(time.time() - s, 2)))
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
print("[Database] Save model: FAIL")
return False | [
"def",
"save_model",
"(",
"self",
",",
"network",
"=",
"None",
",",
"model_name",
"=",
"'model'",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"{",
"'model_name'",
":",
"model_name",
"}",
")",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"# put project_name into kwargs",
"params",
"=",
"network",
".",
"get_all_params",
"(",
")",
"s",
"=",
"time",
".",
"time",
"(",
")",
"kwargs",
".",
"update",
"(",
"{",
"'architecture'",
":",
"network",
".",
"all_graphs",
",",
"'time'",
":",
"datetime",
".",
"utcnow",
"(",
")",
"}",
")",
"try",
":",
"params_id",
"=",
"self",
".",
"model_fs",
".",
"put",
"(",
"self",
".",
"_serialization",
"(",
"params",
")",
")",
"kwargs",
".",
"update",
"(",
"{",
"'params_id'",
":",
"params_id",
",",
"'time'",
":",
"datetime",
".",
"utcnow",
"(",
")",
"}",
")",
"self",
".",
"db",
".",
"Model",
".",
"insert_one",
"(",
"kwargs",
")",
"print",
"(",
"\"[Database] Save model: SUCCESS, took: {}s\"",
".",
"format",
"(",
"round",
"(",
"time",
".",
"time",
"(",
")",
"-",
"s",
",",
"2",
")",
")",
")",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"exc_type",
",",
"exc_obj",
",",
"exc_tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"exc_tb",
".",
"tb_frame",
".",
"f_code",
".",
"co_filename",
")",
"[",
"1",
"]",
"logging",
".",
"info",
"(",
"\"{} {} {} {} {}\"",
".",
"format",
"(",
"exc_type",
",",
"exc_obj",
",",
"fname",
",",
"exc_tb",
".",
"tb_lineno",
",",
"e",
")",
")",
"print",
"(",
"\"[Database] Save model: FAIL\"",
")",
"return",
"False"
] | Save model architecture and parameters into database, timestamp will be added automatically.
Parameters
----------
network : TensorLayer layer
TensorLayer layer instance.
model_name : str
The name/key of model.
kwargs : other events
Other events, such as name, accuracy, loss, step number and etc (optinal).
Examples
---------
Save model architecture and parameters into database.
>>> db.save_model(net, accuracy=0.8, loss=2.3, name='second_model')
Load one model with parameters from database (run this in other script)
>>> net = db.find_top_model(sess=sess, accuracy=0.8, loss=2.3)
Find and load the latest model.
>>> net = db.find_top_model(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> net = db.find_top_model(sess=sess, sort=[("time", -1)])
Find and load the oldest model.
>>> net = db.find_top_model(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> net = db.find_top_model(sess=sess, sort=[("time", 1)])
Get model information
>>> net._accuracy
... 0.8
Returns
---------
boolean : True for success, False for fail. | [
"Save",
"model",
"architecture",
"and",
"parameters",
"into",
"database",
"timestamp",
"will",
"be",
"added",
"automatically",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L113-L169 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.find_top_model | def find_top_model(self, sess, sort=None, model_name='model', **kwargs):
"""Finds and returns a model architecture and its parameters from the database which matches the requirement.
Parameters
----------
sess : Session
TensorFlow session.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
model_name : str or None
The name/key of model.
kwargs : other events
Other events, such as name, accuracy, loss, step number and etc (optinal).
Examples
---------
- see ``save_model``.
Returns
---------
network : TensorLayer layer
Note that, the returned network contains all information of the document (record), e.g. if you saved accuracy in the document, you can get the accuracy by using ``net._accuracy``.
"""
# print(kwargs) # {}
kwargs.update({'model_name': model_name})
self._fill_project_info(kwargs)
s = time.time()
d = self.db.Model.find_one(filter=kwargs, sort=sort)
_temp_file_name = '_find_one_model_ztemp_file'
if d is not None:
params_id = d['params_id']
graphs = d['architecture']
_datetime = d['time']
exists_or_mkdir(_temp_file_name, False)
with open(os.path.join(_temp_file_name, 'graph.pkl'), 'wb') as file:
pickle.dump(graphs, file, protocol=pickle.HIGHEST_PROTOCOL)
else:
print("[Database] FAIL! Cannot find model: {}".format(kwargs))
return False
try:
params = self._deserialization(self.model_fs.get(params_id).read())
np.savez(os.path.join(_temp_file_name, 'params.npz'), params=params)
network = load_graph_and_params(name=_temp_file_name, sess=sess)
del_folder(_temp_file_name)
pc = self.db.Model.find(kwargs)
print(
"[Database] Find one model SUCCESS. kwargs:{} sort:{} save time:{} took: {}s".
format(kwargs, sort, _datetime, round(time.time() - s, 2))
)
# put all informations of model into the TL layer
for key in d:
network.__dict__.update({"_%s" % key: d[key]})
# check whether more parameters match the requirement
params_id_list = pc.distinct('params_id')
n_params = len(params_id_list)
if n_params != 1:
print(" Note that there are {} models match the kwargs".format(n_params))
return network
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
return False | python | def find_top_model(self, sess, sort=None, model_name='model', **kwargs):
"""Finds and returns a model architecture and its parameters from the database which matches the requirement.
Parameters
----------
sess : Session
TensorFlow session.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
model_name : str or None
The name/key of model.
kwargs : other events
Other events, such as name, accuracy, loss, step number and etc (optinal).
Examples
---------
- see ``save_model``.
Returns
---------
network : TensorLayer layer
Note that, the returned network contains all information of the document (record), e.g. if you saved accuracy in the document, you can get the accuracy by using ``net._accuracy``.
"""
# print(kwargs) # {}
kwargs.update({'model_name': model_name})
self._fill_project_info(kwargs)
s = time.time()
d = self.db.Model.find_one(filter=kwargs, sort=sort)
_temp_file_name = '_find_one_model_ztemp_file'
if d is not None:
params_id = d['params_id']
graphs = d['architecture']
_datetime = d['time']
exists_or_mkdir(_temp_file_name, False)
with open(os.path.join(_temp_file_name, 'graph.pkl'), 'wb') as file:
pickle.dump(graphs, file, protocol=pickle.HIGHEST_PROTOCOL)
else:
print("[Database] FAIL! Cannot find model: {}".format(kwargs))
return False
try:
params = self._deserialization(self.model_fs.get(params_id).read())
np.savez(os.path.join(_temp_file_name, 'params.npz'), params=params)
network = load_graph_and_params(name=_temp_file_name, sess=sess)
del_folder(_temp_file_name)
pc = self.db.Model.find(kwargs)
print(
"[Database] Find one model SUCCESS. kwargs:{} sort:{} save time:{} took: {}s".
format(kwargs, sort, _datetime, round(time.time() - s, 2))
)
# put all informations of model into the TL layer
for key in d:
network.__dict__.update({"_%s" % key: d[key]})
# check whether more parameters match the requirement
params_id_list = pc.distinct('params_id')
n_params = len(params_id_list)
if n_params != 1:
print(" Note that there are {} models match the kwargs".format(n_params))
return network
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
return False | [
"def",
"find_top_model",
"(",
"self",
",",
"sess",
",",
"sort",
"=",
"None",
",",
"model_name",
"=",
"'model'",
",",
"*",
"*",
"kwargs",
")",
":",
"# print(kwargs) # {}",
"kwargs",
".",
"update",
"(",
"{",
"'model_name'",
":",
"model_name",
"}",
")",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"s",
"=",
"time",
".",
"time",
"(",
")",
"d",
"=",
"self",
".",
"db",
".",
"Model",
".",
"find_one",
"(",
"filter",
"=",
"kwargs",
",",
"sort",
"=",
"sort",
")",
"_temp_file_name",
"=",
"'_find_one_model_ztemp_file'",
"if",
"d",
"is",
"not",
"None",
":",
"params_id",
"=",
"d",
"[",
"'params_id'",
"]",
"graphs",
"=",
"d",
"[",
"'architecture'",
"]",
"_datetime",
"=",
"d",
"[",
"'time'",
"]",
"exists_or_mkdir",
"(",
"_temp_file_name",
",",
"False",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"_temp_file_name",
",",
"'graph.pkl'",
")",
",",
"'wb'",
")",
"as",
"file",
":",
"pickle",
".",
"dump",
"(",
"graphs",
",",
"file",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"else",
":",
"print",
"(",
"\"[Database] FAIL! Cannot find model: {}\"",
".",
"format",
"(",
"kwargs",
")",
")",
"return",
"False",
"try",
":",
"params",
"=",
"self",
".",
"_deserialization",
"(",
"self",
".",
"model_fs",
".",
"get",
"(",
"params_id",
")",
".",
"read",
"(",
")",
")",
"np",
".",
"savez",
"(",
"os",
".",
"path",
".",
"join",
"(",
"_temp_file_name",
",",
"'params.npz'",
")",
",",
"params",
"=",
"params",
")",
"network",
"=",
"load_graph_and_params",
"(",
"name",
"=",
"_temp_file_name",
",",
"sess",
"=",
"sess",
")",
"del_folder",
"(",
"_temp_file_name",
")",
"pc",
"=",
"self",
".",
"db",
".",
"Model",
".",
"find",
"(",
"kwargs",
")",
"print",
"(",
"\"[Database] Find one model SUCCESS. kwargs:{} sort:{} save time:{} took: {}s\"",
".",
"format",
"(",
"kwargs",
",",
"sort",
",",
"_datetime",
",",
"round",
"(",
"time",
".",
"time",
"(",
")",
"-",
"s",
",",
"2",
")",
")",
")",
"# put all informations of model into the TL layer",
"for",
"key",
"in",
"d",
":",
"network",
".",
"__dict__",
".",
"update",
"(",
"{",
"\"_%s\"",
"%",
"key",
":",
"d",
"[",
"key",
"]",
"}",
")",
"# check whether more parameters match the requirement",
"params_id_list",
"=",
"pc",
".",
"distinct",
"(",
"'params_id'",
")",
"n_params",
"=",
"len",
"(",
"params_id_list",
")",
"if",
"n_params",
"!=",
"1",
":",
"print",
"(",
"\" Note that there are {} models match the kwargs\"",
".",
"format",
"(",
"n_params",
")",
")",
"return",
"network",
"except",
"Exception",
"as",
"e",
":",
"exc_type",
",",
"exc_obj",
",",
"exc_tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"exc_tb",
".",
"tb_frame",
".",
"f_code",
".",
"co_filename",
")",
"[",
"1",
"]",
"logging",
".",
"info",
"(",
"\"{} {} {} {} {}\"",
".",
"format",
"(",
"exc_type",
",",
"exc_obj",
",",
"fname",
",",
"exc_tb",
".",
"tb_lineno",
",",
"e",
")",
")",
"return",
"False"
] | Finds and returns a model architecture and its parameters from the database which matches the requirement.
Parameters
----------
sess : Session
TensorFlow session.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
model_name : str or None
The name/key of model.
kwargs : other events
Other events, such as name, accuracy, loss, step number and etc (optinal).
Examples
---------
- see ``save_model``.
Returns
---------
network : TensorLayer layer
Note that, the returned network contains all information of the document (record), e.g. if you saved accuracy in the document, you can get the accuracy by using ``net._accuracy``. | [
"Finds",
"and",
"returns",
"a",
"model",
"architecture",
"and",
"its",
"parameters",
"from",
"the",
"database",
"which",
"matches",
"the",
"requirement",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L171-L240 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.delete_model | def delete_model(self, **kwargs):
"""Delete model.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
"""
self._fill_project_info(kwargs)
self.db.Model.delete_many(kwargs)
logging.info("[Database] Delete Model SUCCESS") | python | def delete_model(self, **kwargs):
"""Delete model.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
"""
self._fill_project_info(kwargs)
self.db.Model.delete_many(kwargs)
logging.info("[Database] Delete Model SUCCESS") | [
"def",
"delete_model",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"self",
".",
"db",
".",
"Model",
".",
"delete_many",
"(",
"kwargs",
")",
"logging",
".",
"info",
"(",
"\"[Database] Delete Model SUCCESS\"",
")"
] | Delete model.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log. | [
"Delete",
"model",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L242-L252 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.save_dataset | def save_dataset(self, dataset=None, dataset_name=None, **kwargs):
"""Saves one dataset into database, timestamp will be added automatically.
Parameters
----------
dataset : any type
The dataset you want to store.
dataset_name : str
The name of dataset.
kwargs : other events
Other events, such as description, author and etc (optinal).
Examples
----------
Save dataset
>>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')
Get dataset
>>> dataset = db.find_top_dataset('mnist')
Returns
---------
boolean : Return True if save success, otherwise, return False.
"""
self._fill_project_info(kwargs)
if dataset_name is None:
raise Exception("dataset_name is None, please give a dataset name")
kwargs.update({'dataset_name': dataset_name})
s = time.time()
try:
dataset_id = self.dataset_fs.put(self._serialization(dataset))
kwargs.update({'dataset_id': dataset_id, 'time': datetime.utcnow()})
self.db.Dataset.insert_one(kwargs)
# print("[Database] Save params: {} SUCCESS, took: {}s".format(file_name, round(time.time()-s, 2)))
print("[Database] Save dataset: SUCCESS, took: {}s".format(round(time.time() - s, 2)))
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
print("[Database] Save dataset: FAIL")
return False | python | def save_dataset(self, dataset=None, dataset_name=None, **kwargs):
"""Saves one dataset into database, timestamp will be added automatically.
Parameters
----------
dataset : any type
The dataset you want to store.
dataset_name : str
The name of dataset.
kwargs : other events
Other events, such as description, author and etc (optinal).
Examples
----------
Save dataset
>>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')
Get dataset
>>> dataset = db.find_top_dataset('mnist')
Returns
---------
boolean : Return True if save success, otherwise, return False.
"""
self._fill_project_info(kwargs)
if dataset_name is None:
raise Exception("dataset_name is None, please give a dataset name")
kwargs.update({'dataset_name': dataset_name})
s = time.time()
try:
dataset_id = self.dataset_fs.put(self._serialization(dataset))
kwargs.update({'dataset_id': dataset_id, 'time': datetime.utcnow()})
self.db.Dataset.insert_one(kwargs)
# print("[Database] Save params: {} SUCCESS, took: {}s".format(file_name, round(time.time()-s, 2)))
print("[Database] Save dataset: SUCCESS, took: {}s".format(round(time.time() - s, 2)))
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
print("[Database] Save dataset: FAIL")
return False | [
"def",
"save_dataset",
"(",
"self",
",",
"dataset",
"=",
"None",
",",
"dataset_name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"if",
"dataset_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"dataset_name is None, please give a dataset name\"",
")",
"kwargs",
".",
"update",
"(",
"{",
"'dataset_name'",
":",
"dataset_name",
"}",
")",
"s",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"dataset_id",
"=",
"self",
".",
"dataset_fs",
".",
"put",
"(",
"self",
".",
"_serialization",
"(",
"dataset",
")",
")",
"kwargs",
".",
"update",
"(",
"{",
"'dataset_id'",
":",
"dataset_id",
",",
"'time'",
":",
"datetime",
".",
"utcnow",
"(",
")",
"}",
")",
"self",
".",
"db",
".",
"Dataset",
".",
"insert_one",
"(",
"kwargs",
")",
"# print(\"[Database] Save params: {} SUCCESS, took: {}s\".format(file_name, round(time.time()-s, 2)))",
"print",
"(",
"\"[Database] Save dataset: SUCCESS, took: {}s\"",
".",
"format",
"(",
"round",
"(",
"time",
".",
"time",
"(",
")",
"-",
"s",
",",
"2",
")",
")",
")",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"exc_type",
",",
"exc_obj",
",",
"exc_tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"exc_tb",
".",
"tb_frame",
".",
"f_code",
".",
"co_filename",
")",
"[",
"1",
"]",
"logging",
".",
"info",
"(",
"\"{} {} {} {} {}\"",
".",
"format",
"(",
"exc_type",
",",
"exc_obj",
",",
"fname",
",",
"exc_tb",
".",
"tb_lineno",
",",
"e",
")",
")",
"print",
"(",
"\"[Database] Save dataset: FAIL\"",
")",
"return",
"False"
] | Saves one dataset into database, timestamp will be added automatically.
Parameters
----------
dataset : any type
The dataset you want to store.
dataset_name : str
The name of dataset.
kwargs : other events
Other events, such as description, author and etc (optinal).
Examples
----------
Save dataset
>>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')
Get dataset
>>> dataset = db.find_top_dataset('mnist')
Returns
---------
boolean : Return True if save success, otherwise, return False. | [
"Saves",
"one",
"dataset",
"into",
"database",
"timestamp",
"will",
"be",
"added",
"automatically",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L255-L297 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.find_top_dataset | def find_top_dataset(self, dataset_name=None, sort=None, **kwargs):
"""Finds and returns a dataset from the database which matches the requirement.
Parameters
----------
dataset_name : str
The name of dataset.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other events
Other events, such as description, author and etc (optinal).
Examples
---------
Save dataset
>>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')
Get dataset
>>> dataset = db.find_top_dataset('mnist')
>>> datasets = db.find_datasets('mnist')
Returns
--------
dataset : the dataset or False
Return False if nothing found.
"""
self._fill_project_info(kwargs)
if dataset_name is None:
raise Exception("dataset_name is None, please give a dataset name")
kwargs.update({'dataset_name': dataset_name})
s = time.time()
d = self.db.Dataset.find_one(filter=kwargs, sort=sort)
if d is not None:
dataset_id = d['dataset_id']
else:
print("[Database] FAIL! Cannot find dataset: {}".format(kwargs))
return False
try:
dataset = self._deserialization(self.dataset_fs.get(dataset_id).read())
pc = self.db.Dataset.find(kwargs)
print("[Database] Find one dataset SUCCESS, {} took: {}s".format(kwargs, round(time.time() - s, 2)))
# check whether more datasets match the requirement
dataset_id_list = pc.distinct('dataset_id')
n_dataset = len(dataset_id_list)
if n_dataset != 1:
print(" Note that there are {} datasets match the requirement".format(n_dataset))
return dataset
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
return False | python | def find_top_dataset(self, dataset_name=None, sort=None, **kwargs):
"""Finds and returns a dataset from the database which matches the requirement.
Parameters
----------
dataset_name : str
The name of dataset.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other events
Other events, such as description, author and etc (optinal).
Examples
---------
Save dataset
>>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')
Get dataset
>>> dataset = db.find_top_dataset('mnist')
>>> datasets = db.find_datasets('mnist')
Returns
--------
dataset : the dataset or False
Return False if nothing found.
"""
self._fill_project_info(kwargs)
if dataset_name is None:
raise Exception("dataset_name is None, please give a dataset name")
kwargs.update({'dataset_name': dataset_name})
s = time.time()
d = self.db.Dataset.find_one(filter=kwargs, sort=sort)
if d is not None:
dataset_id = d['dataset_id']
else:
print("[Database] FAIL! Cannot find dataset: {}".format(kwargs))
return False
try:
dataset = self._deserialization(self.dataset_fs.get(dataset_id).read())
pc = self.db.Dataset.find(kwargs)
print("[Database] Find one dataset SUCCESS, {} took: {}s".format(kwargs, round(time.time() - s, 2)))
# check whether more datasets match the requirement
dataset_id_list = pc.distinct('dataset_id')
n_dataset = len(dataset_id_list)
if n_dataset != 1:
print(" Note that there are {} datasets match the requirement".format(n_dataset))
return dataset
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
return False | [
"def",
"find_top_dataset",
"(",
"self",
",",
"dataset_name",
"=",
"None",
",",
"sort",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"if",
"dataset_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"dataset_name is None, please give a dataset name\"",
")",
"kwargs",
".",
"update",
"(",
"{",
"'dataset_name'",
":",
"dataset_name",
"}",
")",
"s",
"=",
"time",
".",
"time",
"(",
")",
"d",
"=",
"self",
".",
"db",
".",
"Dataset",
".",
"find_one",
"(",
"filter",
"=",
"kwargs",
",",
"sort",
"=",
"sort",
")",
"if",
"d",
"is",
"not",
"None",
":",
"dataset_id",
"=",
"d",
"[",
"'dataset_id'",
"]",
"else",
":",
"print",
"(",
"\"[Database] FAIL! Cannot find dataset: {}\"",
".",
"format",
"(",
"kwargs",
")",
")",
"return",
"False",
"try",
":",
"dataset",
"=",
"self",
".",
"_deserialization",
"(",
"self",
".",
"dataset_fs",
".",
"get",
"(",
"dataset_id",
")",
".",
"read",
"(",
")",
")",
"pc",
"=",
"self",
".",
"db",
".",
"Dataset",
".",
"find",
"(",
"kwargs",
")",
"print",
"(",
"\"[Database] Find one dataset SUCCESS, {} took: {}s\"",
".",
"format",
"(",
"kwargs",
",",
"round",
"(",
"time",
".",
"time",
"(",
")",
"-",
"s",
",",
"2",
")",
")",
")",
"# check whether more datasets match the requirement",
"dataset_id_list",
"=",
"pc",
".",
"distinct",
"(",
"'dataset_id'",
")",
"n_dataset",
"=",
"len",
"(",
"dataset_id_list",
")",
"if",
"n_dataset",
"!=",
"1",
":",
"print",
"(",
"\" Note that there are {} datasets match the requirement\"",
".",
"format",
"(",
"n_dataset",
")",
")",
"return",
"dataset",
"except",
"Exception",
"as",
"e",
":",
"exc_type",
",",
"exc_obj",
",",
"exc_tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"exc_tb",
".",
"tb_frame",
".",
"f_code",
".",
"co_filename",
")",
"[",
"1",
"]",
"logging",
".",
"info",
"(",
"\"{} {} {} {} {}\"",
".",
"format",
"(",
"exc_type",
",",
"exc_obj",
",",
"fname",
",",
"exc_tb",
".",
"tb_lineno",
",",
"e",
")",
")",
"return",
"False"
] | Finds and returns a dataset from the database which matches the requirement.
Parameters
----------
dataset_name : str
The name of dataset.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other events
Other events, such as description, author and etc (optinal).
Examples
---------
Save dataset
>>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')
Get dataset
>>> dataset = db.find_top_dataset('mnist')
>>> datasets = db.find_datasets('mnist')
Returns
--------
dataset : the dataset or False
Return False if nothing found. | [
"Finds",
"and",
"returns",
"a",
"dataset",
"from",
"the",
"database",
"which",
"matches",
"the",
"requirement",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L299-L356 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.find_datasets | def find_datasets(self, dataset_name=None, **kwargs):
"""Finds and returns all datasets from the database which matches the requirement.
In some case, the data in a dataset can be stored separately for better management.
Parameters
----------
dataset_name : str
The name/key of dataset.
kwargs : other events
Other events, such as description, author and etc (optional).
Returns
--------
params : the parameters, return False if nothing found.
"""
self._fill_project_info(kwargs)
if dataset_name is None:
raise Exception("dataset_name is None, please give a dataset name")
kwargs.update({'dataset_name': dataset_name})
s = time.time()
pc = self.db.Dataset.find(kwargs)
if pc is not None:
dataset_id_list = pc.distinct('dataset_id')
dataset_list = []
for dataset_id in dataset_id_list: # you may have multiple Buckets files
tmp = self.dataset_fs.get(dataset_id).read()
dataset_list.append(self._deserialization(tmp))
else:
print("[Database] FAIL! Cannot find any dataset: {}".format(kwargs))
return False
print("[Database] Find {} datasets SUCCESS, took: {}s".format(len(dataset_list), round(time.time() - s, 2)))
return dataset_list | python | def find_datasets(self, dataset_name=None, **kwargs):
"""Finds and returns all datasets from the database which matches the requirement.
In some case, the data in a dataset can be stored separately for better management.
Parameters
----------
dataset_name : str
The name/key of dataset.
kwargs : other events
Other events, such as description, author and etc (optional).
Returns
--------
params : the parameters, return False if nothing found.
"""
self._fill_project_info(kwargs)
if dataset_name is None:
raise Exception("dataset_name is None, please give a dataset name")
kwargs.update({'dataset_name': dataset_name})
s = time.time()
pc = self.db.Dataset.find(kwargs)
if pc is not None:
dataset_id_list = pc.distinct('dataset_id')
dataset_list = []
for dataset_id in dataset_id_list: # you may have multiple Buckets files
tmp = self.dataset_fs.get(dataset_id).read()
dataset_list.append(self._deserialization(tmp))
else:
print("[Database] FAIL! Cannot find any dataset: {}".format(kwargs))
return False
print("[Database] Find {} datasets SUCCESS, took: {}s".format(len(dataset_list), round(time.time() - s, 2)))
return dataset_list | [
"def",
"find_datasets",
"(",
"self",
",",
"dataset_name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"if",
"dataset_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"dataset_name is None, please give a dataset name\"",
")",
"kwargs",
".",
"update",
"(",
"{",
"'dataset_name'",
":",
"dataset_name",
"}",
")",
"s",
"=",
"time",
".",
"time",
"(",
")",
"pc",
"=",
"self",
".",
"db",
".",
"Dataset",
".",
"find",
"(",
"kwargs",
")",
"if",
"pc",
"is",
"not",
"None",
":",
"dataset_id_list",
"=",
"pc",
".",
"distinct",
"(",
"'dataset_id'",
")",
"dataset_list",
"=",
"[",
"]",
"for",
"dataset_id",
"in",
"dataset_id_list",
":",
"# you may have multiple Buckets files",
"tmp",
"=",
"self",
".",
"dataset_fs",
".",
"get",
"(",
"dataset_id",
")",
".",
"read",
"(",
")",
"dataset_list",
".",
"append",
"(",
"self",
".",
"_deserialization",
"(",
"tmp",
")",
")",
"else",
":",
"print",
"(",
"\"[Database] FAIL! Cannot find any dataset: {}\"",
".",
"format",
"(",
"kwargs",
")",
")",
"return",
"False",
"print",
"(",
"\"[Database] Find {} datasets SUCCESS, took: {}s\"",
".",
"format",
"(",
"len",
"(",
"dataset_list",
")",
",",
"round",
"(",
"time",
".",
"time",
"(",
")",
"-",
"s",
",",
"2",
")",
")",
")",
"return",
"dataset_list"
] | Finds and returns all datasets from the database which matches the requirement.
In some case, the data in a dataset can be stored separately for better management.
Parameters
----------
dataset_name : str
The name/key of dataset.
kwargs : other events
Other events, such as description, author and etc (optional).
Returns
--------
params : the parameters, return False if nothing found. | [
"Finds",
"and",
"returns",
"all",
"datasets",
"from",
"the",
"database",
"which",
"matches",
"the",
"requirement",
".",
"In",
"some",
"case",
"the",
"data",
"in",
"a",
"dataset",
"can",
"be",
"stored",
"separately",
"for",
"better",
"management",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L358-L394 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.delete_datasets | def delete_datasets(self, **kwargs):
"""Delete datasets.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
"""
self._fill_project_info(kwargs)
self.db.Dataset.delete_many(kwargs)
logging.info("[Database] Delete Dataset SUCCESS") | python | def delete_datasets(self, **kwargs):
"""Delete datasets.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
"""
self._fill_project_info(kwargs)
self.db.Dataset.delete_many(kwargs)
logging.info("[Database] Delete Dataset SUCCESS") | [
"def",
"delete_datasets",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"self",
".",
"db",
".",
"Dataset",
".",
"delete_many",
"(",
"kwargs",
")",
"logging",
".",
"info",
"(",
"\"[Database] Delete Dataset SUCCESS\"",
")"
] | Delete datasets.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log. | [
"Delete",
"datasets",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L396-L408 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.save_training_log | def save_training_log(self, **kwargs):
"""Saves the training log, timestamp will be added automatically.
Parameters
-----------
kwargs : logging information
Events, such as accuracy, loss, step number and etc.
Examples
---------
>>> db.save_training_log(accuracy=0.33, loss=0.98)
"""
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
_result = self.db.TrainLog.insert_one(kwargs)
_log = self._print_dict(kwargs)
logging.info("[Database] train log: " + _log) | python | def save_training_log(self, **kwargs):
"""Saves the training log, timestamp will be added automatically.
Parameters
-----------
kwargs : logging information
Events, such as accuracy, loss, step number and etc.
Examples
---------
>>> db.save_training_log(accuracy=0.33, loss=0.98)
"""
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
_result = self.db.TrainLog.insert_one(kwargs)
_log = self._print_dict(kwargs)
logging.info("[Database] train log: " + _log) | [
"def",
"save_training_log",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"kwargs",
".",
"update",
"(",
"{",
"'time'",
":",
"datetime",
".",
"utcnow",
"(",
")",
"}",
")",
"_result",
"=",
"self",
".",
"db",
".",
"TrainLog",
".",
"insert_one",
"(",
"kwargs",
")",
"_log",
"=",
"self",
".",
"_print_dict",
"(",
"kwargs",
")",
"logging",
".",
"info",
"(",
"\"[Database] train log: \"",
"+",
"_log",
")"
] | Saves the training log, timestamp will be added automatically.
Parameters
-----------
kwargs : logging information
Events, such as accuracy, loss, step number and etc.
Examples
---------
>>> db.save_training_log(accuracy=0.33, loss=0.98) | [
"Saves",
"the",
"training",
"log",
"timestamp",
"will",
"be",
"added",
"automatically",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L411-L429 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.save_validation_log | def save_validation_log(self, **kwargs):
"""Saves the validation log, timestamp will be added automatically.
Parameters
-----------
kwargs : logging information
Events, such as accuracy, loss, step number and etc.
Examples
---------
>>> db.save_validation_log(accuracy=0.33, loss=0.98)
"""
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
_result = self.db.ValidLog.insert_one(kwargs)
_log = self._print_dict(kwargs)
logging.info("[Database] valid log: " + _log) | python | def save_validation_log(self, **kwargs):
"""Saves the validation log, timestamp will be added automatically.
Parameters
-----------
kwargs : logging information
Events, such as accuracy, loss, step number and etc.
Examples
---------
>>> db.save_validation_log(accuracy=0.33, loss=0.98)
"""
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
_result = self.db.ValidLog.insert_one(kwargs)
_log = self._print_dict(kwargs)
logging.info("[Database] valid log: " + _log) | [
"def",
"save_validation_log",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"kwargs",
".",
"update",
"(",
"{",
"'time'",
":",
"datetime",
".",
"utcnow",
"(",
")",
"}",
")",
"_result",
"=",
"self",
".",
"db",
".",
"ValidLog",
".",
"insert_one",
"(",
"kwargs",
")",
"_log",
"=",
"self",
".",
"_print_dict",
"(",
"kwargs",
")",
"logging",
".",
"info",
"(",
"\"[Database] valid log: \"",
"+",
"_log",
")"
] | Saves the validation log, timestamp will be added automatically.
Parameters
-----------
kwargs : logging information
Events, such as accuracy, loss, step number and etc.
Examples
---------
>>> db.save_validation_log(accuracy=0.33, loss=0.98) | [
"Saves",
"the",
"validation",
"log",
"timestamp",
"will",
"be",
"added",
"automatically",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L431-L449 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.delete_training_log | def delete_training_log(self, **kwargs):
"""Deletes training log.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
Examples
---------
Save training log
>>> db.save_training_log(accuracy=0.33)
>>> db.save_training_log(accuracy=0.44)
Delete logs that match the requirement
>>> db.delete_training_log(accuracy=0.33)
Delete all logs
>>> db.delete_training_log()
"""
self._fill_project_info(kwargs)
self.db.TrainLog.delete_many(kwargs)
logging.info("[Database] Delete TrainLog SUCCESS") | python | def delete_training_log(self, **kwargs):
"""Deletes training log.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
Examples
---------
Save training log
>>> db.save_training_log(accuracy=0.33)
>>> db.save_training_log(accuracy=0.44)
Delete logs that match the requirement
>>> db.delete_training_log(accuracy=0.33)
Delete all logs
>>> db.delete_training_log()
"""
self._fill_project_info(kwargs)
self.db.TrainLog.delete_many(kwargs)
logging.info("[Database] Delete TrainLog SUCCESS") | [
"def",
"delete_training_log",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"self",
".",
"db",
".",
"TrainLog",
".",
"delete_many",
"(",
"kwargs",
")",
"logging",
".",
"info",
"(",
"\"[Database] Delete TrainLog SUCCESS\"",
")"
] | Deletes training log.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
Examples
---------
Save training log
>>> db.save_training_log(accuracy=0.33)
>>> db.save_training_log(accuracy=0.44)
Delete logs that match the requirement
>>> db.delete_training_log(accuracy=0.33)
Delete all logs
>>> db.delete_training_log() | [
"Deletes",
"training",
"log",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L471-L493 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.delete_validation_log | def delete_validation_log(self, **kwargs):
"""Deletes validation log.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
Examples
---------
- see ``save_training_log``.
"""
self._fill_project_info(kwargs)
self.db.ValidLog.delete_many(kwargs)
logging.info("[Database] Delete ValidLog SUCCESS") | python | def delete_validation_log(self, **kwargs):
"""Deletes validation log.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
Examples
---------
- see ``save_training_log``.
"""
self._fill_project_info(kwargs)
self.db.ValidLog.delete_many(kwargs)
logging.info("[Database] Delete ValidLog SUCCESS") | [
"def",
"delete_validation_log",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"self",
".",
"db",
".",
"ValidLog",
".",
"delete_many",
"(",
"kwargs",
")",
"logging",
".",
"info",
"(",
"\"[Database] Delete ValidLog SUCCESS\"",
")"
] | Deletes validation log.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
Examples
---------
- see ``save_training_log``. | [
"Deletes",
"validation",
"log",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L495-L509 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.create_task | def create_task(self, task_name=None, script=None, hyper_parameters=None, saved_result_keys=None, **kwargs):
"""Uploads a task to the database, timestamp will be added automatically.
Parameters
-----------
task_name : str
The task name.
script : str
File name of the python script.
hyper_parameters : dictionary
The hyper parameters pass into the script.
saved_result_keys : list of str
The keys of the task results to keep in the database when the task finishes.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
-----------
Uploads a task
>>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')
Finds and runs the latest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", -1)])
Finds and runs the oldest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", 1)])
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
if not isinstance(script, str): # is None:
raise Exception("script should be string")
if hyper_parameters is None:
hyper_parameters = {}
if saved_result_keys is None:
saved_result_keys = []
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
kwargs.update({'hyper_parameters': hyper_parameters})
kwargs.update({'saved_result_keys': saved_result_keys})
_script = open(script, 'rb').read()
kwargs.update({'status': 'pending', 'script': _script, 'result': {}})
self.db.Task.insert_one(kwargs)
logging.info("[Database] Saved Task - task_name: {} script: {}".format(task_name, script)) | python | def create_task(self, task_name=None, script=None, hyper_parameters=None, saved_result_keys=None, **kwargs):
"""Uploads a task to the database, timestamp will be added automatically.
Parameters
-----------
task_name : str
The task name.
script : str
File name of the python script.
hyper_parameters : dictionary
The hyper parameters pass into the script.
saved_result_keys : list of str
The keys of the task results to keep in the database when the task finishes.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
-----------
Uploads a task
>>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')
Finds and runs the latest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", -1)])
Finds and runs the oldest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", 1)])
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
if not isinstance(script, str): # is None:
raise Exception("script should be string")
if hyper_parameters is None:
hyper_parameters = {}
if saved_result_keys is None:
saved_result_keys = []
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
kwargs.update({'hyper_parameters': hyper_parameters})
kwargs.update({'saved_result_keys': saved_result_keys})
_script = open(script, 'rb').read()
kwargs.update({'status': 'pending', 'script': _script, 'result': {}})
self.db.Task.insert_one(kwargs)
logging.info("[Database] Saved Task - task_name: {} script: {}".format(task_name, script)) | [
"def",
"create_task",
"(",
"self",
",",
"task_name",
"=",
"None",
",",
"script",
"=",
"None",
",",
"hyper_parameters",
"=",
"None",
",",
"saved_result_keys",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"task_name",
",",
"str",
")",
":",
"# is None:",
"raise",
"Exception",
"(",
"\"task_name should be string\"",
")",
"if",
"not",
"isinstance",
"(",
"script",
",",
"str",
")",
":",
"# is None:",
"raise",
"Exception",
"(",
"\"script should be string\"",
")",
"if",
"hyper_parameters",
"is",
"None",
":",
"hyper_parameters",
"=",
"{",
"}",
"if",
"saved_result_keys",
"is",
"None",
":",
"saved_result_keys",
"=",
"[",
"]",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"kwargs",
".",
"update",
"(",
"{",
"'time'",
":",
"datetime",
".",
"utcnow",
"(",
")",
"}",
")",
"kwargs",
".",
"update",
"(",
"{",
"'hyper_parameters'",
":",
"hyper_parameters",
"}",
")",
"kwargs",
".",
"update",
"(",
"{",
"'saved_result_keys'",
":",
"saved_result_keys",
"}",
")",
"_script",
"=",
"open",
"(",
"script",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"kwargs",
".",
"update",
"(",
"{",
"'status'",
":",
"'pending'",
",",
"'script'",
":",
"_script",
",",
"'result'",
":",
"{",
"}",
"}",
")",
"self",
".",
"db",
".",
"Task",
".",
"insert_one",
"(",
"kwargs",
")",
"logging",
".",
"info",
"(",
"\"[Database] Saved Task - task_name: {} script: {}\"",
".",
"format",
"(",
"task_name",
",",
"script",
")",
")"
] | Uploads a task to the database, timestamp will be added automatically.
Parameters
-----------
task_name : str
The task name.
script : str
File name of the python script.
hyper_parameters : dictionary
The hyper parameters pass into the script.
saved_result_keys : list of str
The keys of the task results to keep in the database when the task finishes.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
-----------
Uploads a task
>>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')
Finds and runs the latest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", -1)])
Finds and runs the oldest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", 1)]) | [
"Uploads",
"a",
"task",
"to",
"the",
"database",
"timestamp",
"will",
"be",
"added",
"automatically",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L537-L585 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.run_top_task | def run_top_task(self, task_name=None, sort=None, **kwargs):
"""Finds and runs a pending task that in the first of the sorting list.
Parameters
-----------
task_name : str
The task name.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Monitors the database and pull tasks to run
>>> while True:
>>> print("waiting task from distributor")
>>> db.run_top_task(task_name='mnist', sort=[("time", -1)])
>>> time.sleep(1)
Returns
--------
boolean : True for success, False for fail.
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
self._fill_project_info(kwargs)
kwargs.update({'status': 'pending'})
# find task and set status to running
task = self.db.Task.find_one_and_update(kwargs, {'$set': {'status': 'running'}}, sort=sort)
try:
# get task info e.g. hyper parameters, python script
if task is None:
logging.info("[Database] Find Task FAIL: key: {} sort: {}".format(task_name, sort))
return False
else:
logging.info("[Database] Find Task SUCCESS: key: {} sort: {}".format(task_name, sort))
_datetime = task['time']
_script = task['script']
_id = task['_id']
_hyper_parameters = task['hyper_parameters']
_saved_result_keys = task['saved_result_keys']
logging.info(" hyper parameters:")
for key in _hyper_parameters:
globals()[key] = _hyper_parameters[key]
logging.info(" {}: {}".format(key, _hyper_parameters[key]))
# run task
s = time.time()
logging.info("[Database] Start Task: key: {} sort: {} push time: {}".format(task_name, sort, _datetime))
_script = _script.decode('utf-8')
with tf.Graph().as_default(): # as graph: # clear all TF graphs
exec(_script, globals())
# set status to finished
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'finished'}})
# return results
__result = {}
for _key in _saved_result_keys:
logging.info(" result: {}={} {}".format(_key, globals()[_key], type(globals()[_key])))
__result.update({"%s" % _key: globals()[_key]})
_ = self.db.Task.find_one_and_update(
{
'_id': _id
}, {'$set': {
'result': __result
}}, return_document=pymongo.ReturnDocument.AFTER
)
logging.info(
"[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s".
format(task_name, sort, _datetime,
time.time() - s)
)
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
logging.info("[Database] Fail to run task")
# if fail, set status back to pending
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'pending'}})
return False | python | def run_top_task(self, task_name=None, sort=None, **kwargs):
"""Finds and runs a pending task that in the first of the sorting list.
Parameters
-----------
task_name : str
The task name.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Monitors the database and pull tasks to run
>>> while True:
>>> print("waiting task from distributor")
>>> db.run_top_task(task_name='mnist', sort=[("time", -1)])
>>> time.sleep(1)
Returns
--------
boolean : True for success, False for fail.
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
self._fill_project_info(kwargs)
kwargs.update({'status': 'pending'})
# find task and set status to running
task = self.db.Task.find_one_and_update(kwargs, {'$set': {'status': 'running'}}, sort=sort)
try:
# get task info e.g. hyper parameters, python script
if task is None:
logging.info("[Database] Find Task FAIL: key: {} sort: {}".format(task_name, sort))
return False
else:
logging.info("[Database] Find Task SUCCESS: key: {} sort: {}".format(task_name, sort))
_datetime = task['time']
_script = task['script']
_id = task['_id']
_hyper_parameters = task['hyper_parameters']
_saved_result_keys = task['saved_result_keys']
logging.info(" hyper parameters:")
for key in _hyper_parameters:
globals()[key] = _hyper_parameters[key]
logging.info(" {}: {}".format(key, _hyper_parameters[key]))
# run task
s = time.time()
logging.info("[Database] Start Task: key: {} sort: {} push time: {}".format(task_name, sort, _datetime))
_script = _script.decode('utf-8')
with tf.Graph().as_default(): # as graph: # clear all TF graphs
exec(_script, globals())
# set status to finished
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'finished'}})
# return results
__result = {}
for _key in _saved_result_keys:
logging.info(" result: {}={} {}".format(_key, globals()[_key], type(globals()[_key])))
__result.update({"%s" % _key: globals()[_key]})
_ = self.db.Task.find_one_and_update(
{
'_id': _id
}, {'$set': {
'result': __result
}}, return_document=pymongo.ReturnDocument.AFTER
)
logging.info(
"[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s".
format(task_name, sort, _datetime,
time.time() - s)
)
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
logging.info("[Database] Fail to run task")
# if fail, set status back to pending
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'pending'}})
return False | [
"def",
"run_top_task",
"(",
"self",
",",
"task_name",
"=",
"None",
",",
"sort",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"task_name",
",",
"str",
")",
":",
"# is None:",
"raise",
"Exception",
"(",
"\"task_name should be string\"",
")",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"kwargs",
".",
"update",
"(",
"{",
"'status'",
":",
"'pending'",
"}",
")",
"# find task and set status to running",
"task",
"=",
"self",
".",
"db",
".",
"Task",
".",
"find_one_and_update",
"(",
"kwargs",
",",
"{",
"'$set'",
":",
"{",
"'status'",
":",
"'running'",
"}",
"}",
",",
"sort",
"=",
"sort",
")",
"try",
":",
"# get task info e.g. hyper parameters, python script",
"if",
"task",
"is",
"None",
":",
"logging",
".",
"info",
"(",
"\"[Database] Find Task FAIL: key: {} sort: {}\"",
".",
"format",
"(",
"task_name",
",",
"sort",
")",
")",
"return",
"False",
"else",
":",
"logging",
".",
"info",
"(",
"\"[Database] Find Task SUCCESS: key: {} sort: {}\"",
".",
"format",
"(",
"task_name",
",",
"sort",
")",
")",
"_datetime",
"=",
"task",
"[",
"'time'",
"]",
"_script",
"=",
"task",
"[",
"'script'",
"]",
"_id",
"=",
"task",
"[",
"'_id'",
"]",
"_hyper_parameters",
"=",
"task",
"[",
"'hyper_parameters'",
"]",
"_saved_result_keys",
"=",
"task",
"[",
"'saved_result_keys'",
"]",
"logging",
".",
"info",
"(",
"\" hyper parameters:\"",
")",
"for",
"key",
"in",
"_hyper_parameters",
":",
"globals",
"(",
")",
"[",
"key",
"]",
"=",
"_hyper_parameters",
"[",
"key",
"]",
"logging",
".",
"info",
"(",
"\" {}: {}\"",
".",
"format",
"(",
"key",
",",
"_hyper_parameters",
"[",
"key",
"]",
")",
")",
"# run task",
"s",
"=",
"time",
".",
"time",
"(",
")",
"logging",
".",
"info",
"(",
"\"[Database] Start Task: key: {} sort: {} push time: {}\"",
".",
"format",
"(",
"task_name",
",",
"sort",
",",
"_datetime",
")",
")",
"_script",
"=",
"_script",
".",
"decode",
"(",
"'utf-8'",
")",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"# as graph: # clear all TF graphs",
"exec",
"(",
"_script",
",",
"globals",
"(",
")",
")",
"# set status to finished",
"_",
"=",
"self",
".",
"db",
".",
"Task",
".",
"find_one_and_update",
"(",
"{",
"'_id'",
":",
"_id",
"}",
",",
"{",
"'$set'",
":",
"{",
"'status'",
":",
"'finished'",
"}",
"}",
")",
"# return results",
"__result",
"=",
"{",
"}",
"for",
"_key",
"in",
"_saved_result_keys",
":",
"logging",
".",
"info",
"(",
"\" result: {}={} {}\"",
".",
"format",
"(",
"_key",
",",
"globals",
"(",
")",
"[",
"_key",
"]",
",",
"type",
"(",
"globals",
"(",
")",
"[",
"_key",
"]",
")",
")",
")",
"__result",
".",
"update",
"(",
"{",
"\"%s\"",
"%",
"_key",
":",
"globals",
"(",
")",
"[",
"_key",
"]",
"}",
")",
"_",
"=",
"self",
".",
"db",
".",
"Task",
".",
"find_one_and_update",
"(",
"{",
"'_id'",
":",
"_id",
"}",
",",
"{",
"'$set'",
":",
"{",
"'result'",
":",
"__result",
"}",
"}",
",",
"return_document",
"=",
"pymongo",
".",
"ReturnDocument",
".",
"AFTER",
")",
"logging",
".",
"info",
"(",
"\"[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s\"",
".",
"format",
"(",
"task_name",
",",
"sort",
",",
"_datetime",
",",
"time",
".",
"time",
"(",
")",
"-",
"s",
")",
")",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"exc_type",
",",
"exc_obj",
",",
"exc_tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"exc_tb",
".",
"tb_frame",
".",
"f_code",
".",
"co_filename",
")",
"[",
"1",
"]",
"logging",
".",
"info",
"(",
"\"{} {} {} {} {}\"",
".",
"format",
"(",
"exc_type",
",",
"exc_obj",
",",
"fname",
",",
"exc_tb",
".",
"tb_lineno",
",",
"e",
")",
")",
"logging",
".",
"info",
"(",
"\"[Database] Fail to run task\"",
")",
"# if fail, set status back to pending",
"_",
"=",
"self",
".",
"db",
".",
"Task",
".",
"find_one_and_update",
"(",
"{",
"'_id'",
":",
"_id",
"}",
",",
"{",
"'$set'",
":",
"{",
"'status'",
":",
"'pending'",
"}",
"}",
")",
"return",
"False"
] | Finds and runs a pending task that in the first of the sorting list.
Parameters
-----------
task_name : str
The task name.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Monitors the database and pull tasks to run
>>> while True:
>>> print("waiting task from distributor")
>>> db.run_top_task(task_name='mnist', sort=[("time", -1)])
>>> time.sleep(1)
Returns
--------
boolean : True for success, False for fail. | [
"Finds",
"and",
"runs",
"a",
"pending",
"task",
"that",
"in",
"the",
"first",
"of",
"the",
"sorting",
"list",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L587-L670 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.delete_tasks | def delete_tasks(self, **kwargs):
"""Delete tasks.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
Examples
---------
>>> db.delete_tasks()
"""
self._fill_project_info(kwargs)
self.db.Task.delete_many(kwargs)
logging.info("[Database] Delete Task SUCCESS") | python | def delete_tasks(self, **kwargs):
"""Delete tasks.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
Examples
---------
>>> db.delete_tasks()
"""
self._fill_project_info(kwargs)
self.db.Task.delete_many(kwargs)
logging.info("[Database] Delete Task SUCCESS") | [
"def",
"delete_tasks",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"self",
".",
"db",
".",
"Task",
".",
"delete_many",
"(",
"kwargs",
")",
"logging",
".",
"info",
"(",
"\"[Database] Delete Task SUCCESS\"",
")"
] | Delete tasks.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
Examples
---------
>>> db.delete_tasks() | [
"Delete",
"tasks",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L672-L688 | valid |
tensorlayer/tensorlayer | tensorlayer/db.py | TensorHub.check_unfinished_task | def check_unfinished_task(self, task_name=None, **kwargs):
"""Finds and runs a pending task.
Parameters
-----------
task_name : str
The task name.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Wait until all tasks finish in user's local console
>>> while not db.check_unfinished_task():
>>> time.sleep(1)
>>> print("all tasks finished")
>>> sess = tf.InteractiveSession()
>>> net = db.find_top_model(sess=sess, sort=[("test_accuracy", -1)])
>>> print("the best accuracy {} is from model {}".format(net._test_accuracy, net._name))
Returns
--------
boolean : True for success, False for fail.
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
self._fill_project_info(kwargs)
kwargs.update({'$or': [{'status': 'pending'}, {'status': 'running'}]})
# ## find task
# task = self.db.Task.find_one(kwargs)
task = self.db.Task.find(kwargs)
task_id_list = task.distinct('_id')
n_task = len(task_id_list)
if n_task == 0:
logging.info("[Database] No unfinished task - task_name: {}".format(task_name))
return False
else:
logging.info("[Database] Find {} unfinished task - task_name: {}".format(n_task, task_name))
return True | python | def check_unfinished_task(self, task_name=None, **kwargs):
"""Finds and runs a pending task.
Parameters
-----------
task_name : str
The task name.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Wait until all tasks finish in user's local console
>>> while not db.check_unfinished_task():
>>> time.sleep(1)
>>> print("all tasks finished")
>>> sess = tf.InteractiveSession()
>>> net = db.find_top_model(sess=sess, sort=[("test_accuracy", -1)])
>>> print("the best accuracy {} is from model {}".format(net._test_accuracy, net._name))
Returns
--------
boolean : True for success, False for fail.
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
self._fill_project_info(kwargs)
kwargs.update({'$or': [{'status': 'pending'}, {'status': 'running'}]})
# ## find task
# task = self.db.Task.find_one(kwargs)
task = self.db.Task.find(kwargs)
task_id_list = task.distinct('_id')
n_task = len(task_id_list)
if n_task == 0:
logging.info("[Database] No unfinished task - task_name: {}".format(task_name))
return False
else:
logging.info("[Database] Find {} unfinished task - task_name: {}".format(n_task, task_name))
return True | [
"def",
"check_unfinished_task",
"(",
"self",
",",
"task_name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"task_name",
",",
"str",
")",
":",
"# is None:",
"raise",
"Exception",
"(",
"\"task_name should be string\"",
")",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"kwargs",
".",
"update",
"(",
"{",
"'$or'",
":",
"[",
"{",
"'status'",
":",
"'pending'",
"}",
",",
"{",
"'status'",
":",
"'running'",
"}",
"]",
"}",
")",
"# ## find task",
"# task = self.db.Task.find_one(kwargs)",
"task",
"=",
"self",
".",
"db",
".",
"Task",
".",
"find",
"(",
"kwargs",
")",
"task_id_list",
"=",
"task",
".",
"distinct",
"(",
"'_id'",
")",
"n_task",
"=",
"len",
"(",
"task_id_list",
")",
"if",
"n_task",
"==",
"0",
":",
"logging",
".",
"info",
"(",
"\"[Database] No unfinished task - task_name: {}\"",
".",
"format",
"(",
"task_name",
")",
")",
"return",
"False",
"else",
":",
"logging",
".",
"info",
"(",
"\"[Database] Find {} unfinished task - task_name: {}\"",
".",
"format",
"(",
"n_task",
",",
"task_name",
")",
")",
"return",
"True"
] | Finds and runs a pending task.
Parameters
-----------
task_name : str
The task name.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Wait until all tasks finish in user's local console
>>> while not db.check_unfinished_task():
>>> time.sleep(1)
>>> print("all tasks finished")
>>> sess = tf.InteractiveSession()
>>> net = db.find_top_model(sess=sess, sort=[("test_accuracy", -1)])
>>> print("the best accuracy {} is from model {}".format(net._test_accuracy, net._name))
Returns
--------
boolean : True for success, False for fail. | [
"Finds",
"and",
"runs",
"a",
"pending",
"task",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/db.py#L690-L736 | valid |
tensorlayer/tensorlayer | examples/text_classification/tutorial_imdb_fasttext.py | augment_with_ngrams | def augment_with_ngrams(unigrams, unigram_vocab_size, n_buckets, n=2):
"""Augment unigram features with hashed n-gram features."""
def get_ngrams(n):
return list(zip(*[unigrams[i:] for i in range(n)]))
def hash_ngram(ngram):
bytes_ = array.array('L', ngram).tobytes()
hash_ = int(hashlib.sha256(bytes_).hexdigest(), 16)
return unigram_vocab_size + hash_ % n_buckets
return unigrams + [hash_ngram(ngram) for i in range(2, n + 1) for ngram in get_ngrams(i)] | python | def augment_with_ngrams(unigrams, unigram_vocab_size, n_buckets, n=2):
"""Augment unigram features with hashed n-gram features."""
def get_ngrams(n):
return list(zip(*[unigrams[i:] for i in range(n)]))
def hash_ngram(ngram):
bytes_ = array.array('L', ngram).tobytes()
hash_ = int(hashlib.sha256(bytes_).hexdigest(), 16)
return unigram_vocab_size + hash_ % n_buckets
return unigrams + [hash_ngram(ngram) for i in range(2, n + 1) for ngram in get_ngrams(i)] | [
"def",
"augment_with_ngrams",
"(",
"unigrams",
",",
"unigram_vocab_size",
",",
"n_buckets",
",",
"n",
"=",
"2",
")",
":",
"def",
"get_ngrams",
"(",
"n",
")",
":",
"return",
"list",
"(",
"zip",
"(",
"*",
"[",
"unigrams",
"[",
"i",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"n",
")",
"]",
")",
")",
"def",
"hash_ngram",
"(",
"ngram",
")",
":",
"bytes_",
"=",
"array",
".",
"array",
"(",
"'L'",
",",
"ngram",
")",
".",
"tobytes",
"(",
")",
"hash_",
"=",
"int",
"(",
"hashlib",
".",
"sha256",
"(",
"bytes_",
")",
".",
"hexdigest",
"(",
")",
",",
"16",
")",
"return",
"unigram_vocab_size",
"+",
"hash_",
"%",
"n_buckets",
"return",
"unigrams",
"+",
"[",
"hash_ngram",
"(",
"ngram",
")",
"for",
"i",
"in",
"range",
"(",
"2",
",",
"n",
"+",
"1",
")",
"for",
"ngram",
"in",
"get_ngrams",
"(",
"i",
")",
"]"
] | Augment unigram features with hashed n-gram features. | [
"Augment",
"unigram",
"features",
"with",
"hashed",
"n",
"-",
"gram",
"features",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/text_classification/tutorial_imdb_fasttext.py#L98-L109 | valid |
tensorlayer/tensorlayer | examples/text_classification/tutorial_imdb_fasttext.py | load_and_preprocess_imdb_data | def load_and_preprocess_imdb_data(n_gram=None):
"""Load IMDb data and augment with hashed n-gram features."""
X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)
if n_gram is not None:
X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train])
X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test])
return X_train, y_train, X_test, y_test | python | def load_and_preprocess_imdb_data(n_gram=None):
"""Load IMDb data and augment with hashed n-gram features."""
X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)
if n_gram is not None:
X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train])
X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test])
return X_train, y_train, X_test, y_test | [
"def",
"load_and_preprocess_imdb_data",
"(",
"n_gram",
"=",
"None",
")",
":",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
"=",
"tl",
".",
"files",
".",
"load_imdb_dataset",
"(",
"nb_words",
"=",
"VOCAB_SIZE",
")",
"if",
"n_gram",
"is",
"not",
"None",
":",
"X_train",
"=",
"np",
".",
"array",
"(",
"[",
"augment_with_ngrams",
"(",
"x",
",",
"VOCAB_SIZE",
",",
"N_BUCKETS",
",",
"n",
"=",
"n_gram",
")",
"for",
"x",
"in",
"X_train",
"]",
")",
"X_test",
"=",
"np",
".",
"array",
"(",
"[",
"augment_with_ngrams",
"(",
"x",
",",
"VOCAB_SIZE",
",",
"N_BUCKETS",
",",
"n",
"=",
"n_gram",
")",
"for",
"x",
"in",
"X_test",
"]",
")",
"return",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test"
] | Load IMDb data and augment with hashed n-gram features. | [
"Load",
"IMDb",
"data",
"and",
"augment",
"with",
"hashed",
"n",
"-",
"gram",
"features",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/text_classification/tutorial_imdb_fasttext.py#L112-L120 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | read_image | def read_image(image, path=''):
"""Read one image.
Parameters
-----------
image : str
The image file name.
path : str
The image folder path.
Returns
-------
numpy.array
The image.
"""
return imageio.imread(os.path.join(path, image)) | python | def read_image(image, path=''):
"""Read one image.
Parameters
-----------
image : str
The image file name.
path : str
The image folder path.
Returns
-------
numpy.array
The image.
"""
return imageio.imread(os.path.join(path, image)) | [
"def",
"read_image",
"(",
"image",
",",
"path",
"=",
"''",
")",
":",
"return",
"imageio",
".",
"imread",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"image",
")",
")"
] | Read one image.
Parameters
-----------
image : str
The image file name.
path : str
The image folder path.
Returns
-------
numpy.array
The image. | [
"Read",
"one",
"image",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L34-L50 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | read_images | def read_images(img_list, path='', n_threads=10, printable=True):
"""Returns all images in list by given path and name of each image file.
Parameters
-------------
img_list : list of str
The image file names.
path : str
The image folder path.
n_threads : int
The number of threads to read image.
printable : boolean
Whether to print information when reading images.
Returns
-------
list of numpy.array
The images.
"""
imgs = []
for idx in range(0, len(img_list), n_threads):
b_imgs_list = img_list[idx:idx + n_threads]
b_imgs = tl.prepro.threading_data(b_imgs_list, fn=read_image, path=path)
# tl.logging.info(b_imgs.shape)
imgs.extend(b_imgs)
if printable:
tl.logging.info('read %d from %s' % (len(imgs), path))
return imgs | python | def read_images(img_list, path='', n_threads=10, printable=True):
"""Returns all images in list by given path and name of each image file.
Parameters
-------------
img_list : list of str
The image file names.
path : str
The image folder path.
n_threads : int
The number of threads to read image.
printable : boolean
Whether to print information when reading images.
Returns
-------
list of numpy.array
The images.
"""
imgs = []
for idx in range(0, len(img_list), n_threads):
b_imgs_list = img_list[idx:idx + n_threads]
b_imgs = tl.prepro.threading_data(b_imgs_list, fn=read_image, path=path)
# tl.logging.info(b_imgs.shape)
imgs.extend(b_imgs)
if printable:
tl.logging.info('read %d from %s' % (len(imgs), path))
return imgs | [
"def",
"read_images",
"(",
"img_list",
",",
"path",
"=",
"''",
",",
"n_threads",
"=",
"10",
",",
"printable",
"=",
"True",
")",
":",
"imgs",
"=",
"[",
"]",
"for",
"idx",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"img_list",
")",
",",
"n_threads",
")",
":",
"b_imgs_list",
"=",
"img_list",
"[",
"idx",
":",
"idx",
"+",
"n_threads",
"]",
"b_imgs",
"=",
"tl",
".",
"prepro",
".",
"threading_data",
"(",
"b_imgs_list",
",",
"fn",
"=",
"read_image",
",",
"path",
"=",
"path",
")",
"# tl.logging.info(b_imgs.shape)",
"imgs",
".",
"extend",
"(",
"b_imgs",
")",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'read %d from %s'",
"%",
"(",
"len",
"(",
"imgs",
")",
",",
"path",
")",
")",
"return",
"imgs"
] | Returns all images in list by given path and name of each image file.
Parameters
-------------
img_list : list of str
The image file names.
path : str
The image folder path.
n_threads : int
The number of threads to read image.
printable : boolean
Whether to print information when reading images.
Returns
-------
list of numpy.array
The images. | [
"Returns",
"all",
"images",
"in",
"list",
"by",
"given",
"path",
"and",
"name",
"of",
"each",
"image",
"file",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L53-L81 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | save_image | def save_image(image, image_path='_temp.png'):
"""Save a image.
Parameters
-----------
image : numpy array
[w, h, c]
image_path : str
path
"""
try: # RGB
imageio.imwrite(image_path, image)
except Exception: # Greyscale
imageio.imwrite(image_path, image[:, :, 0]) | python | def save_image(image, image_path='_temp.png'):
"""Save a image.
Parameters
-----------
image : numpy array
[w, h, c]
image_path : str
path
"""
try: # RGB
imageio.imwrite(image_path, image)
except Exception: # Greyscale
imageio.imwrite(image_path, image[:, :, 0]) | [
"def",
"save_image",
"(",
"image",
",",
"image_path",
"=",
"'_temp.png'",
")",
":",
"try",
":",
"# RGB",
"imageio",
".",
"imwrite",
"(",
"image_path",
",",
"image",
")",
"except",
"Exception",
":",
"# Greyscale",
"imageio",
".",
"imwrite",
"(",
"image_path",
",",
"image",
"[",
":",
",",
":",
",",
"0",
"]",
")"
] | Save a image.
Parameters
-----------
image : numpy array
[w, h, c]
image_path : str
path | [
"Save",
"a",
"image",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L84-L98 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | save_images | def save_images(images, size, image_path='_temp.png'):
"""Save multiple images into one single image.
Parameters
-----------
images : numpy array
(batch, w, h, c)
size : list of 2 ints
row and column number.
number of images should be equal or less than size[0] * size[1]
image_path : str
save path
Examples
---------
>>> import numpy as np
>>> import tensorlayer as tl
>>> images = np.random.rand(64, 100, 100, 3)
>>> tl.visualize.save_images(images, [8, 8], 'temp.png')
"""
if len(images.shape) == 3: # Greyscale [batch, h, w] --> [batch, h, w, 1]
images = images[:, :, :, np.newaxis]
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3), dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
def imsave(images, size, path):
if np.max(images) <= 1 and (-1 <= np.min(images) < 0):
images = ((images + 1) * 127.5).astype(np.uint8)
elif np.max(images) <= 1 and np.min(images) >= 0:
images = (images * 255).astype(np.uint8)
return imageio.imwrite(path, merge(images, size))
if len(images) > size[0] * size[1]:
raise AssertionError("number of images should be equal or less than size[0] * size[1] {}".format(len(images)))
return imsave(images, size, image_path) | python | def save_images(images, size, image_path='_temp.png'):
"""Save multiple images into one single image.
Parameters
-----------
images : numpy array
(batch, w, h, c)
size : list of 2 ints
row and column number.
number of images should be equal or less than size[0] * size[1]
image_path : str
save path
Examples
---------
>>> import numpy as np
>>> import tensorlayer as tl
>>> images = np.random.rand(64, 100, 100, 3)
>>> tl.visualize.save_images(images, [8, 8], 'temp.png')
"""
if len(images.shape) == 3: # Greyscale [batch, h, w] --> [batch, h, w, 1]
images = images[:, :, :, np.newaxis]
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3), dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
def imsave(images, size, path):
if np.max(images) <= 1 and (-1 <= np.min(images) < 0):
images = ((images + 1) * 127.5).astype(np.uint8)
elif np.max(images) <= 1 and np.min(images) >= 0:
images = (images * 255).astype(np.uint8)
return imageio.imwrite(path, merge(images, size))
if len(images) > size[0] * size[1]:
raise AssertionError("number of images should be equal or less than size[0] * size[1] {}".format(len(images)))
return imsave(images, size, image_path) | [
"def",
"save_images",
"(",
"images",
",",
"size",
",",
"image_path",
"=",
"'_temp.png'",
")",
":",
"if",
"len",
"(",
"images",
".",
"shape",
")",
"==",
"3",
":",
"# Greyscale [batch, h, w] --> [batch, h, w, 1]",
"images",
"=",
"images",
"[",
":",
",",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"def",
"merge",
"(",
"images",
",",
"size",
")",
":",
"h",
",",
"w",
"=",
"images",
".",
"shape",
"[",
"1",
"]",
",",
"images",
".",
"shape",
"[",
"2",
"]",
"img",
"=",
"np",
".",
"zeros",
"(",
"(",
"h",
"*",
"size",
"[",
"0",
"]",
",",
"w",
"*",
"size",
"[",
"1",
"]",
",",
"3",
")",
",",
"dtype",
"=",
"images",
".",
"dtype",
")",
"for",
"idx",
",",
"image",
"in",
"enumerate",
"(",
"images",
")",
":",
"i",
"=",
"idx",
"%",
"size",
"[",
"1",
"]",
"j",
"=",
"idx",
"//",
"size",
"[",
"1",
"]",
"img",
"[",
"j",
"*",
"h",
":",
"j",
"*",
"h",
"+",
"h",
",",
"i",
"*",
"w",
":",
"i",
"*",
"w",
"+",
"w",
",",
":",
"]",
"=",
"image",
"return",
"img",
"def",
"imsave",
"(",
"images",
",",
"size",
",",
"path",
")",
":",
"if",
"np",
".",
"max",
"(",
"images",
")",
"<=",
"1",
"and",
"(",
"-",
"1",
"<=",
"np",
".",
"min",
"(",
"images",
")",
"<",
"0",
")",
":",
"images",
"=",
"(",
"(",
"images",
"+",
"1",
")",
"*",
"127.5",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"elif",
"np",
".",
"max",
"(",
"images",
")",
"<=",
"1",
"and",
"np",
".",
"min",
"(",
"images",
")",
">=",
"0",
":",
"images",
"=",
"(",
"images",
"*",
"255",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"return",
"imageio",
".",
"imwrite",
"(",
"path",
",",
"merge",
"(",
"images",
",",
"size",
")",
")",
"if",
"len",
"(",
"images",
")",
">",
"size",
"[",
"0",
"]",
"*",
"size",
"[",
"1",
"]",
":",
"raise",
"AssertionError",
"(",
"\"number of images should be equal or less than size[0] * size[1] {}\"",
".",
"format",
"(",
"len",
"(",
"images",
")",
")",
")",
"return",
"imsave",
"(",
"images",
",",
"size",
",",
"image_path",
")"
] | Save multiple images into one single image.
Parameters
-----------
images : numpy array
(batch, w, h, c)
size : list of 2 ints
row and column number.
number of images should be equal or less than size[0] * size[1]
image_path : str
save path
Examples
---------
>>> import numpy as np
>>> import tensorlayer as tl
>>> images = np.random.rand(64, 100, 100, 3)
>>> tl.visualize.save_images(images, [8, 8], 'temp.png') | [
"Save",
"multiple",
"images",
"into",
"one",
"single",
"image",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L101-L145 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | draw_boxes_and_labels_to_image | def draw_boxes_and_labels_to_image(
image, classes, coords, scores, classes_list, is_center=True, is_rescale=True, save_name=None
):
"""Draw bboxes and class labels on image. Return or save the image with bboxes, example in the docs of ``tl.prepro``.
Parameters
-----------
image : numpy.array
The RGB image [height, width, channel].
classes : list of int
A list of class ID (int).
coords : list of int
A list of list for coordinates.
- Should be [x, y, x2, y2] (up-left and botton-right format)
- If [x_center, y_center, w, h] (set is_center to True).
scores : list of float
A list of score (float). (Optional)
classes_list : list of str
for converting ID to string on image.
is_center : boolean
Whether the coordinates is [x_center, y_center, w, h]
- If coordinates are [x_center, y_center, w, h], set it to True for converting it to [x, y, x2, y2] (up-left and botton-right) internally.
- If coordinates are [x1, x2, y1, y2], set it to False.
is_rescale : boolean
Whether to rescale the coordinates from pixel-unit format to ratio format.
- If True, the input coordinates are the portion of width and high, this API will scale the coordinates to pixel unit internally.
- If False, feed the coordinates with pixel unit format.
save_name : None or str
The name of image file (i.e. image.png), if None, not to save image.
Returns
-------
numpy.array
The saved image.
References
-----------
- OpenCV rectangle and putText.
- `scikit-image <http://scikit-image.org/docs/dev/api/skimage.draw.html#skimage.draw.rectangle>`__.
"""
if len(coords) != len(classes):
raise AssertionError("number of coordinates and classes are equal")
if len(scores) > 0 and len(scores) != len(classes):
raise AssertionError("number of scores and classes are equal")
# don't change the original image, and avoid error https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy
image = image.copy()
imh, imw = image.shape[0:2]
thick = int((imh + imw) // 430)
for i, _v in enumerate(coords):
if is_center:
x, y, x2, y2 = tl.prepro.obj_box_coord_centroid_to_upleft_butright(coords[i])
else:
x, y, x2, y2 = coords[i]
if is_rescale: # scale back to pixel unit if the coords are the portion of width and high
x, y, x2, y2 = tl.prepro.obj_box_coord_scale_to_pixelunit([x, y, x2, y2], (imh, imw))
cv2.rectangle(
image,
(int(x), int(y)),
(int(x2), int(y2)), # up-left and botton-right
[0, 255, 0],
thick
)
cv2.putText(
image,
classes_list[classes[i]] + ((" %.2f" % (scores[i])) if (len(scores) != 0) else " "),
(int(x), int(y)), # button left
0,
1.5e-3 * imh, # bigger = larger font
[0, 0, 256], # self.meta['colors'][max_indx],
int(thick / 2) + 1
) # bold
if save_name is not None:
# cv2.imwrite('_my.png', image)
save_image(image, save_name)
# if len(coords) == 0:
# tl.logging.info("draw_boxes_and_labels_to_image: no bboxes exist, cannot draw !")
return image | python | def draw_boxes_and_labels_to_image(
image, classes, coords, scores, classes_list, is_center=True, is_rescale=True, save_name=None
):
"""Draw bboxes and class labels on image. Return or save the image with bboxes, example in the docs of ``tl.prepro``.
Parameters
-----------
image : numpy.array
The RGB image [height, width, channel].
classes : list of int
A list of class ID (int).
coords : list of int
A list of list for coordinates.
- Should be [x, y, x2, y2] (up-left and botton-right format)
- If [x_center, y_center, w, h] (set is_center to True).
scores : list of float
A list of score (float). (Optional)
classes_list : list of str
for converting ID to string on image.
is_center : boolean
Whether the coordinates is [x_center, y_center, w, h]
- If coordinates are [x_center, y_center, w, h], set it to True for converting it to [x, y, x2, y2] (up-left and botton-right) internally.
- If coordinates are [x1, x2, y1, y2], set it to False.
is_rescale : boolean
Whether to rescale the coordinates from pixel-unit format to ratio format.
- If True, the input coordinates are the portion of width and high, this API will scale the coordinates to pixel unit internally.
- If False, feed the coordinates with pixel unit format.
save_name : None or str
The name of image file (i.e. image.png), if None, not to save image.
Returns
-------
numpy.array
The saved image.
References
-----------
- OpenCV rectangle and putText.
- `scikit-image <http://scikit-image.org/docs/dev/api/skimage.draw.html#skimage.draw.rectangle>`__.
"""
if len(coords) != len(classes):
raise AssertionError("number of coordinates and classes are equal")
if len(scores) > 0 and len(scores) != len(classes):
raise AssertionError("number of scores and classes are equal")
# don't change the original image, and avoid error https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy
image = image.copy()
imh, imw = image.shape[0:2]
thick = int((imh + imw) // 430)
for i, _v in enumerate(coords):
if is_center:
x, y, x2, y2 = tl.prepro.obj_box_coord_centroid_to_upleft_butright(coords[i])
else:
x, y, x2, y2 = coords[i]
if is_rescale: # scale back to pixel unit if the coords are the portion of width and high
x, y, x2, y2 = tl.prepro.obj_box_coord_scale_to_pixelunit([x, y, x2, y2], (imh, imw))
cv2.rectangle(
image,
(int(x), int(y)),
(int(x2), int(y2)), # up-left and botton-right
[0, 255, 0],
thick
)
cv2.putText(
image,
classes_list[classes[i]] + ((" %.2f" % (scores[i])) if (len(scores) != 0) else " "),
(int(x), int(y)), # button left
0,
1.5e-3 * imh, # bigger = larger font
[0, 0, 256], # self.meta['colors'][max_indx],
int(thick / 2) + 1
) # bold
if save_name is not None:
# cv2.imwrite('_my.png', image)
save_image(image, save_name)
# if len(coords) == 0:
# tl.logging.info("draw_boxes_and_labels_to_image: no bboxes exist, cannot draw !")
return image | [
"def",
"draw_boxes_and_labels_to_image",
"(",
"image",
",",
"classes",
",",
"coords",
",",
"scores",
",",
"classes_list",
",",
"is_center",
"=",
"True",
",",
"is_rescale",
"=",
"True",
",",
"save_name",
"=",
"None",
")",
":",
"if",
"len",
"(",
"coords",
")",
"!=",
"len",
"(",
"classes",
")",
":",
"raise",
"AssertionError",
"(",
"\"number of coordinates and classes are equal\"",
")",
"if",
"len",
"(",
"scores",
")",
">",
"0",
"and",
"len",
"(",
"scores",
")",
"!=",
"len",
"(",
"classes",
")",
":",
"raise",
"AssertionError",
"(",
"\"number of scores and classes are equal\"",
")",
"# don't change the original image, and avoid error https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy",
"image",
"=",
"image",
".",
"copy",
"(",
")",
"imh",
",",
"imw",
"=",
"image",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"thick",
"=",
"int",
"(",
"(",
"imh",
"+",
"imw",
")",
"//",
"430",
")",
"for",
"i",
",",
"_v",
"in",
"enumerate",
"(",
"coords",
")",
":",
"if",
"is_center",
":",
"x",
",",
"y",
",",
"x2",
",",
"y2",
"=",
"tl",
".",
"prepro",
".",
"obj_box_coord_centroid_to_upleft_butright",
"(",
"coords",
"[",
"i",
"]",
")",
"else",
":",
"x",
",",
"y",
",",
"x2",
",",
"y2",
"=",
"coords",
"[",
"i",
"]",
"if",
"is_rescale",
":",
"# scale back to pixel unit if the coords are the portion of width and high",
"x",
",",
"y",
",",
"x2",
",",
"y2",
"=",
"tl",
".",
"prepro",
".",
"obj_box_coord_scale_to_pixelunit",
"(",
"[",
"x",
",",
"y",
",",
"x2",
",",
"y2",
"]",
",",
"(",
"imh",
",",
"imw",
")",
")",
"cv2",
".",
"rectangle",
"(",
"image",
",",
"(",
"int",
"(",
"x",
")",
",",
"int",
"(",
"y",
")",
")",
",",
"(",
"int",
"(",
"x2",
")",
",",
"int",
"(",
"y2",
")",
")",
",",
"# up-left and botton-right",
"[",
"0",
",",
"255",
",",
"0",
"]",
",",
"thick",
")",
"cv2",
".",
"putText",
"(",
"image",
",",
"classes_list",
"[",
"classes",
"[",
"i",
"]",
"]",
"+",
"(",
"(",
"\" %.2f\"",
"%",
"(",
"scores",
"[",
"i",
"]",
")",
")",
"if",
"(",
"len",
"(",
"scores",
")",
"!=",
"0",
")",
"else",
"\" \"",
")",
",",
"(",
"int",
"(",
"x",
")",
",",
"int",
"(",
"y",
")",
")",
",",
"# button left",
"0",
",",
"1.5e-3",
"*",
"imh",
",",
"# bigger = larger font",
"[",
"0",
",",
"0",
",",
"256",
"]",
",",
"# self.meta['colors'][max_indx],",
"int",
"(",
"thick",
"/",
"2",
")",
"+",
"1",
")",
"# bold",
"if",
"save_name",
"is",
"not",
"None",
":",
"# cv2.imwrite('_my.png', image)",
"save_image",
"(",
"image",
",",
"save_name",
")",
"# if len(coords) == 0:",
"# tl.logging.info(\"draw_boxes_and_labels_to_image: no bboxes exist, cannot draw !\")",
"return",
"image"
] | Draw bboxes and class labels on image. Return or save the image with bboxes, example in the docs of ``tl.prepro``.
Parameters
-----------
image : numpy.array
The RGB image [height, width, channel].
classes : list of int
A list of class ID (int).
coords : list of int
A list of list for coordinates.
- Should be [x, y, x2, y2] (up-left and botton-right format)
- If [x_center, y_center, w, h] (set is_center to True).
scores : list of float
A list of score (float). (Optional)
classes_list : list of str
for converting ID to string on image.
is_center : boolean
Whether the coordinates is [x_center, y_center, w, h]
- If coordinates are [x_center, y_center, w, h], set it to True for converting it to [x, y, x2, y2] (up-left and botton-right) internally.
- If coordinates are [x1, x2, y1, y2], set it to False.
is_rescale : boolean
Whether to rescale the coordinates from pixel-unit format to ratio format.
- If True, the input coordinates are the portion of width and high, this API will scale the coordinates to pixel unit internally.
- If False, feed the coordinates with pixel unit format.
save_name : None or str
The name of image file (i.e. image.png), if None, not to save image.
Returns
-------
numpy.array
The saved image.
References
-----------
- OpenCV rectangle and putText.
- `scikit-image <http://scikit-image.org/docs/dev/api/skimage.draw.html#skimage.draw.rectangle>`__. | [
"Draw",
"bboxes",
"and",
"class",
"labels",
"on",
"image",
".",
"Return",
"or",
"save",
"the",
"image",
"with",
"bboxes",
"example",
"in",
"the",
"docs",
"of",
"tl",
".",
"prepro",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L148-L233 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | draw_mpii_pose_to_image | def draw_mpii_pose_to_image(image, poses, save_name='image.png'):
"""Draw people(s) into image using MPII dataset format as input, return or save the result image.
This is an experimental API, can be changed in the future.
Parameters
-----------
image : numpy.array
The RGB image [height, width, channel].
poses : list of dict
The people(s) annotation in MPII format, see ``tl.files.load_mpii_pose_dataset``.
save_name : None or str
The name of image file (i.e. image.png), if None, not to save image.
Returns
--------
numpy.array
The saved image.
Examples
--------
>>> import pprint
>>> import tensorlayer as tl
>>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()
>>> image = tl.vis.read_image(img_train_list[0])
>>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')
>>> pprint.pprint(ann_train_list[0])
References
-----------
- `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__
"""
# import skimage
# don't change the original image, and avoid error https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy
image = image.copy()
imh, imw = image.shape[0:2]
thick = int((imh + imw) // 430)
# radius = int(image.shape[1] / 500) + 1
radius = int(thick * 1.5)
if image.max() < 1:
image = image * 255
for people in poses:
# Pose Keyponts
joint_pos = people['joint_pos']
# draw sketch
# joint id (0 - r ankle, 1 - r knee, 2 - r hip, 3 - l hip, 4 - l knee,
# 5 - l ankle, 6 - pelvis, 7 - thorax, 8 - upper neck,
# 9 - head top, 10 - r wrist, 11 - r elbow, 12 - r shoulder,
# 13 - l shoulder, 14 - l elbow, 15 - l wrist)
#
# 9
# 8
# 12 ** 7 ** 13
# * * *
# 11 * 14
# * * *
# 10 2 * 6 * 3 15
# * *
# 1 4
# * *
# 0 5
lines = [
[(0, 1), [100, 255, 100]],
[(1, 2), [50, 255, 50]],
[(2, 6), [0, 255, 0]], # right leg
[(3, 4), [100, 100, 255]],
[(4, 5), [50, 50, 255]],
[(6, 3), [0, 0, 255]], # left leg
[(6, 7), [255, 255, 100]],
[(7, 8), [255, 150, 50]], # body
[(8, 9), [255, 200, 100]], # head
[(10, 11), [255, 100, 255]],
[(11, 12), [255, 50, 255]],
[(12, 8), [255, 0, 255]], # right hand
[(8, 13), [0, 255, 255]],
[(13, 14), [100, 255, 255]],
[(14, 15), [200, 255, 255]] # left hand
]
for line in lines:
start, end = line[0]
if (start in joint_pos) and (end in joint_pos):
cv2.line(
image,
(int(joint_pos[start][0]), int(joint_pos[start][1])),
(int(joint_pos[end][0]), int(joint_pos[end][1])), # up-left and botton-right
line[1],
thick
)
# rr, cc, val = skimage.draw.line_aa(int(joint_pos[start][1]), int(joint_pos[start][0]), int(joint_pos[end][1]), int(joint_pos[end][0]))
# image[rr, cc] = line[1]
# draw circles
for pos in joint_pos.items():
_, pos_loc = pos # pos_id, pos_loc
pos_loc = (int(pos_loc[0]), int(pos_loc[1]))
cv2.circle(image, center=pos_loc, radius=radius, color=(200, 200, 200), thickness=-1)
# rr, cc = skimage.draw.circle(int(pos_loc[1]), int(pos_loc[0]), radius)
# image[rr, cc] = [0, 255, 0]
# Head
head_rect = people['head_rect']
if head_rect: # if head exists
cv2.rectangle(
image,
(int(head_rect[0]), int(head_rect[1])),
(int(head_rect[2]), int(head_rect[3])), # up-left and botton-right
[0, 180, 0],
thick
)
if save_name is not None:
# cv2.imwrite(save_name, image)
save_image(image, save_name)
return image | python | def draw_mpii_pose_to_image(image, poses, save_name='image.png'):
"""Draw people(s) into image using MPII dataset format as input, return or save the result image.
This is an experimental API, can be changed in the future.
Parameters
-----------
image : numpy.array
The RGB image [height, width, channel].
poses : list of dict
The people(s) annotation in MPII format, see ``tl.files.load_mpii_pose_dataset``.
save_name : None or str
The name of image file (i.e. image.png), if None, not to save image.
Returns
--------
numpy.array
The saved image.
Examples
--------
>>> import pprint
>>> import tensorlayer as tl
>>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()
>>> image = tl.vis.read_image(img_train_list[0])
>>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')
>>> pprint.pprint(ann_train_list[0])
References
-----------
- `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__
"""
# import skimage
# don't change the original image, and avoid error https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy
image = image.copy()
imh, imw = image.shape[0:2]
thick = int((imh + imw) // 430)
# radius = int(image.shape[1] / 500) + 1
radius = int(thick * 1.5)
if image.max() < 1:
image = image * 255
for people in poses:
# Pose Keyponts
joint_pos = people['joint_pos']
# draw sketch
# joint id (0 - r ankle, 1 - r knee, 2 - r hip, 3 - l hip, 4 - l knee,
# 5 - l ankle, 6 - pelvis, 7 - thorax, 8 - upper neck,
# 9 - head top, 10 - r wrist, 11 - r elbow, 12 - r shoulder,
# 13 - l shoulder, 14 - l elbow, 15 - l wrist)
#
# 9
# 8
# 12 ** 7 ** 13
# * * *
# 11 * 14
# * * *
# 10 2 * 6 * 3 15
# * *
# 1 4
# * *
# 0 5
lines = [
[(0, 1), [100, 255, 100]],
[(1, 2), [50, 255, 50]],
[(2, 6), [0, 255, 0]], # right leg
[(3, 4), [100, 100, 255]],
[(4, 5), [50, 50, 255]],
[(6, 3), [0, 0, 255]], # left leg
[(6, 7), [255, 255, 100]],
[(7, 8), [255, 150, 50]], # body
[(8, 9), [255, 200, 100]], # head
[(10, 11), [255, 100, 255]],
[(11, 12), [255, 50, 255]],
[(12, 8), [255, 0, 255]], # right hand
[(8, 13), [0, 255, 255]],
[(13, 14), [100, 255, 255]],
[(14, 15), [200, 255, 255]] # left hand
]
for line in lines:
start, end = line[0]
if (start in joint_pos) and (end in joint_pos):
cv2.line(
image,
(int(joint_pos[start][0]), int(joint_pos[start][1])),
(int(joint_pos[end][0]), int(joint_pos[end][1])), # up-left and botton-right
line[1],
thick
)
# rr, cc, val = skimage.draw.line_aa(int(joint_pos[start][1]), int(joint_pos[start][0]), int(joint_pos[end][1]), int(joint_pos[end][0]))
# image[rr, cc] = line[1]
# draw circles
for pos in joint_pos.items():
_, pos_loc = pos # pos_id, pos_loc
pos_loc = (int(pos_loc[0]), int(pos_loc[1]))
cv2.circle(image, center=pos_loc, radius=radius, color=(200, 200, 200), thickness=-1)
# rr, cc = skimage.draw.circle(int(pos_loc[1]), int(pos_loc[0]), radius)
# image[rr, cc] = [0, 255, 0]
# Head
head_rect = people['head_rect']
if head_rect: # if head exists
cv2.rectangle(
image,
(int(head_rect[0]), int(head_rect[1])),
(int(head_rect[2]), int(head_rect[3])), # up-left and botton-right
[0, 180, 0],
thick
)
if save_name is not None:
# cv2.imwrite(save_name, image)
save_image(image, save_name)
return image | [
"def",
"draw_mpii_pose_to_image",
"(",
"image",
",",
"poses",
",",
"save_name",
"=",
"'image.png'",
")",
":",
"# import skimage",
"# don't change the original image, and avoid error https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy",
"image",
"=",
"image",
".",
"copy",
"(",
")",
"imh",
",",
"imw",
"=",
"image",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"thick",
"=",
"int",
"(",
"(",
"imh",
"+",
"imw",
")",
"//",
"430",
")",
"# radius = int(image.shape[1] / 500) + 1",
"radius",
"=",
"int",
"(",
"thick",
"*",
"1.5",
")",
"if",
"image",
".",
"max",
"(",
")",
"<",
"1",
":",
"image",
"=",
"image",
"*",
"255",
"for",
"people",
"in",
"poses",
":",
"# Pose Keyponts",
"joint_pos",
"=",
"people",
"[",
"'joint_pos'",
"]",
"# draw sketch",
"# joint id (0 - r ankle, 1 - r knee, 2 - r hip, 3 - l hip, 4 - l knee,",
"# 5 - l ankle, 6 - pelvis, 7 - thorax, 8 - upper neck,",
"# 9 - head top, 10 - r wrist, 11 - r elbow, 12 - r shoulder,",
"# 13 - l shoulder, 14 - l elbow, 15 - l wrist)",
"#",
"# 9",
"# 8",
"# 12 ** 7 ** 13",
"# * * *",
"# 11 * 14",
"# * * *",
"# 10 2 * 6 * 3 15",
"# * *",
"# 1 4",
"# * *",
"# 0 5",
"lines",
"=",
"[",
"[",
"(",
"0",
",",
"1",
")",
",",
"[",
"100",
",",
"255",
",",
"100",
"]",
"]",
",",
"[",
"(",
"1",
",",
"2",
")",
",",
"[",
"50",
",",
"255",
",",
"50",
"]",
"]",
",",
"[",
"(",
"2",
",",
"6",
")",
",",
"[",
"0",
",",
"255",
",",
"0",
"]",
"]",
",",
"# right leg",
"[",
"(",
"3",
",",
"4",
")",
",",
"[",
"100",
",",
"100",
",",
"255",
"]",
"]",
",",
"[",
"(",
"4",
",",
"5",
")",
",",
"[",
"50",
",",
"50",
",",
"255",
"]",
"]",
",",
"[",
"(",
"6",
",",
"3",
")",
",",
"[",
"0",
",",
"0",
",",
"255",
"]",
"]",
",",
"# left leg",
"[",
"(",
"6",
",",
"7",
")",
",",
"[",
"255",
",",
"255",
",",
"100",
"]",
"]",
",",
"[",
"(",
"7",
",",
"8",
")",
",",
"[",
"255",
",",
"150",
",",
"50",
"]",
"]",
",",
"# body",
"[",
"(",
"8",
",",
"9",
")",
",",
"[",
"255",
",",
"200",
",",
"100",
"]",
"]",
",",
"# head",
"[",
"(",
"10",
",",
"11",
")",
",",
"[",
"255",
",",
"100",
",",
"255",
"]",
"]",
",",
"[",
"(",
"11",
",",
"12",
")",
",",
"[",
"255",
",",
"50",
",",
"255",
"]",
"]",
",",
"[",
"(",
"12",
",",
"8",
")",
",",
"[",
"255",
",",
"0",
",",
"255",
"]",
"]",
",",
"# right hand",
"[",
"(",
"8",
",",
"13",
")",
",",
"[",
"0",
",",
"255",
",",
"255",
"]",
"]",
",",
"[",
"(",
"13",
",",
"14",
")",
",",
"[",
"100",
",",
"255",
",",
"255",
"]",
"]",
",",
"[",
"(",
"14",
",",
"15",
")",
",",
"[",
"200",
",",
"255",
",",
"255",
"]",
"]",
"# left hand",
"]",
"for",
"line",
"in",
"lines",
":",
"start",
",",
"end",
"=",
"line",
"[",
"0",
"]",
"if",
"(",
"start",
"in",
"joint_pos",
")",
"and",
"(",
"end",
"in",
"joint_pos",
")",
":",
"cv2",
".",
"line",
"(",
"image",
",",
"(",
"int",
"(",
"joint_pos",
"[",
"start",
"]",
"[",
"0",
"]",
")",
",",
"int",
"(",
"joint_pos",
"[",
"start",
"]",
"[",
"1",
"]",
")",
")",
",",
"(",
"int",
"(",
"joint_pos",
"[",
"end",
"]",
"[",
"0",
"]",
")",
",",
"int",
"(",
"joint_pos",
"[",
"end",
"]",
"[",
"1",
"]",
")",
")",
",",
"# up-left and botton-right",
"line",
"[",
"1",
"]",
",",
"thick",
")",
"# rr, cc, val = skimage.draw.line_aa(int(joint_pos[start][1]), int(joint_pos[start][0]), int(joint_pos[end][1]), int(joint_pos[end][0]))",
"# image[rr, cc] = line[1]",
"# draw circles",
"for",
"pos",
"in",
"joint_pos",
".",
"items",
"(",
")",
":",
"_",
",",
"pos_loc",
"=",
"pos",
"# pos_id, pos_loc",
"pos_loc",
"=",
"(",
"int",
"(",
"pos_loc",
"[",
"0",
"]",
")",
",",
"int",
"(",
"pos_loc",
"[",
"1",
"]",
")",
")",
"cv2",
".",
"circle",
"(",
"image",
",",
"center",
"=",
"pos_loc",
",",
"radius",
"=",
"radius",
",",
"color",
"=",
"(",
"200",
",",
"200",
",",
"200",
")",
",",
"thickness",
"=",
"-",
"1",
")",
"# rr, cc = skimage.draw.circle(int(pos_loc[1]), int(pos_loc[0]), radius)",
"# image[rr, cc] = [0, 255, 0]",
"# Head",
"head_rect",
"=",
"people",
"[",
"'head_rect'",
"]",
"if",
"head_rect",
":",
"# if head exists",
"cv2",
".",
"rectangle",
"(",
"image",
",",
"(",
"int",
"(",
"head_rect",
"[",
"0",
"]",
")",
",",
"int",
"(",
"head_rect",
"[",
"1",
"]",
")",
")",
",",
"(",
"int",
"(",
"head_rect",
"[",
"2",
"]",
")",
",",
"int",
"(",
"head_rect",
"[",
"3",
"]",
")",
")",
",",
"# up-left and botton-right",
"[",
"0",
",",
"180",
",",
"0",
"]",
",",
"thick",
")",
"if",
"save_name",
"is",
"not",
"None",
":",
"# cv2.imwrite(save_name, image)",
"save_image",
"(",
"image",
",",
"save_name",
")",
"return",
"image"
] | Draw people(s) into image using MPII dataset format as input, return or save the result image.
This is an experimental API, can be changed in the future.
Parameters
-----------
image : numpy.array
The RGB image [height, width, channel].
poses : list of dict
The people(s) annotation in MPII format, see ``tl.files.load_mpii_pose_dataset``.
save_name : None or str
The name of image file (i.e. image.png), if None, not to save image.
Returns
--------
numpy.array
The saved image.
Examples
--------
>>> import pprint
>>> import tensorlayer as tl
>>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()
>>> image = tl.vis.read_image(img_train_list[0])
>>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')
>>> pprint.pprint(ann_train_list[0])
References
-----------
- `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__ | [
"Draw",
"people",
"(",
"s",
")",
"into",
"image",
"using",
"MPII",
"dataset",
"format",
"as",
"input",
"return",
"or",
"save",
"the",
"result",
"image",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L236-L352 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | frame | def frame(I=None, second=5, saveable=True, name='frame', cmap=None, fig_idx=12836):
"""Display a frame. Make sure OpenAI Gym render() is disable before using it.
Parameters
----------
I : numpy.array
The image.
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
name : str
A name to save the image, if saveable is True.
cmap : None or str
'gray' for greyscale, None for default, etc.
fig_idx : int
matplotlib figure index.
Examples
--------
>>> env = gym.make("Pong-v0")
>>> observation = env.reset()
>>> tl.visualize.frame(observation)
"""
import matplotlib.pyplot as plt
if saveable is False:
plt.ion()
plt.figure(fig_idx) # show all feature images
if len(I.shape) and I.shape[-1] == 1: # (10,10,1) --> (10,10)
I = I[:, :, 0]
plt.imshow(I, cmap)
plt.title(name)
# plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick
# plt.gca().yaxis.set_major_locator(plt.NullLocator())
if saveable:
plt.savefig(name + '.pdf', format='pdf')
else:
plt.draw()
plt.pause(second) | python | def frame(I=None, second=5, saveable=True, name='frame', cmap=None, fig_idx=12836):
"""Display a frame. Make sure OpenAI Gym render() is disable before using it.
Parameters
----------
I : numpy.array
The image.
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
name : str
A name to save the image, if saveable is True.
cmap : None or str
'gray' for greyscale, None for default, etc.
fig_idx : int
matplotlib figure index.
Examples
--------
>>> env = gym.make("Pong-v0")
>>> observation = env.reset()
>>> tl.visualize.frame(observation)
"""
import matplotlib.pyplot as plt
if saveable is False:
plt.ion()
plt.figure(fig_idx) # show all feature images
if len(I.shape) and I.shape[-1] == 1: # (10,10,1) --> (10,10)
I = I[:, :, 0]
plt.imshow(I, cmap)
plt.title(name)
# plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick
# plt.gca().yaxis.set_major_locator(plt.NullLocator())
if saveable:
plt.savefig(name + '.pdf', format='pdf')
else:
plt.draw()
plt.pause(second) | [
"def",
"frame",
"(",
"I",
"=",
"None",
",",
"second",
"=",
"5",
",",
"saveable",
"=",
"True",
",",
"name",
"=",
"'frame'",
",",
"cmap",
"=",
"None",
",",
"fig_idx",
"=",
"12836",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"saveable",
"is",
"False",
":",
"plt",
".",
"ion",
"(",
")",
"plt",
".",
"figure",
"(",
"fig_idx",
")",
"# show all feature images",
"if",
"len",
"(",
"I",
".",
"shape",
")",
"and",
"I",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"1",
":",
"# (10,10,1) --> (10,10)",
"I",
"=",
"I",
"[",
":",
",",
":",
",",
"0",
"]",
"plt",
".",
"imshow",
"(",
"I",
",",
"cmap",
")",
"plt",
".",
"title",
"(",
"name",
")",
"# plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick",
"# plt.gca().yaxis.set_major_locator(plt.NullLocator())",
"if",
"saveable",
":",
"plt",
".",
"savefig",
"(",
"name",
"+",
"'.pdf'",
",",
"format",
"=",
"'pdf'",
")",
"else",
":",
"plt",
".",
"draw",
"(",
")",
"plt",
".",
"pause",
"(",
"second",
")"
] | Display a frame. Make sure OpenAI Gym render() is disable before using it.
Parameters
----------
I : numpy.array
The image.
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
name : str
A name to save the image, if saveable is True.
cmap : None or str
'gray' for greyscale, None for default, etc.
fig_idx : int
matplotlib figure index.
Examples
--------
>>> env = gym.make("Pong-v0")
>>> observation = env.reset()
>>> tl.visualize.frame(observation) | [
"Display",
"a",
"frame",
".",
"Make",
"sure",
"OpenAI",
"Gym",
"render",
"()",
"is",
"disable",
"before",
"using",
"it",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L358-L400 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | CNN2d | def CNN2d(CNN=None, second=10, saveable=True, name='cnn', fig_idx=3119362):
"""Display a group of RGB or Greyscale CNN masks.
Parameters
----------
CNN : numpy.array
The image. e.g: 64 5x5 RGB images can be (5, 5, 3, 64).
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
name : str
A name to save the image, if saveable is True.
fig_idx : int
The matplotlib figure index.
Examples
--------
>>> tl.visualize.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_mnist', fig_idx=2012)
"""
import matplotlib.pyplot as plt
# tl.logging.info(CNN.shape) # (5, 5, 3, 64)
# exit()
n_mask = CNN.shape[3]
n_row = CNN.shape[0]
n_col = CNN.shape[1]
n_color = CNN.shape[2]
row = int(np.sqrt(n_mask))
col = int(np.ceil(n_mask / row))
plt.ion() # active mode
fig = plt.figure(fig_idx)
count = 1
for _ir in range(1, row + 1):
for _ic in range(1, col + 1):
if count > n_mask:
break
fig.add_subplot(col, row, count)
# tl.logging.info(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5
# exit()
# plt.imshow(
# np.reshape(CNN[count-1,:,:,:], (n_row, n_col)),
# cmap='gray', interpolation="nearest") # theano
if n_color == 1:
plt.imshow(np.reshape(CNN[:, :, :, count - 1], (n_row, n_col)), cmap='gray', interpolation="nearest")
elif n_color == 3:
plt.imshow(
np.reshape(CNN[:, :, :, count - 1], (n_row, n_col, n_color)), cmap='gray', interpolation="nearest"
)
else:
raise Exception("Unknown n_color")
plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick
plt.gca().yaxis.set_major_locator(plt.NullLocator())
count = count + 1
if saveable:
plt.savefig(name + '.pdf', format='pdf')
else:
plt.draw()
plt.pause(second) | python | def CNN2d(CNN=None, second=10, saveable=True, name='cnn', fig_idx=3119362):
"""Display a group of RGB or Greyscale CNN masks.
Parameters
----------
CNN : numpy.array
The image. e.g: 64 5x5 RGB images can be (5, 5, 3, 64).
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
name : str
A name to save the image, if saveable is True.
fig_idx : int
The matplotlib figure index.
Examples
--------
>>> tl.visualize.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_mnist', fig_idx=2012)
"""
import matplotlib.pyplot as plt
# tl.logging.info(CNN.shape) # (5, 5, 3, 64)
# exit()
n_mask = CNN.shape[3]
n_row = CNN.shape[0]
n_col = CNN.shape[1]
n_color = CNN.shape[2]
row = int(np.sqrt(n_mask))
col = int(np.ceil(n_mask / row))
plt.ion() # active mode
fig = plt.figure(fig_idx)
count = 1
for _ir in range(1, row + 1):
for _ic in range(1, col + 1):
if count > n_mask:
break
fig.add_subplot(col, row, count)
# tl.logging.info(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5
# exit()
# plt.imshow(
# np.reshape(CNN[count-1,:,:,:], (n_row, n_col)),
# cmap='gray', interpolation="nearest") # theano
if n_color == 1:
plt.imshow(np.reshape(CNN[:, :, :, count - 1], (n_row, n_col)), cmap='gray', interpolation="nearest")
elif n_color == 3:
plt.imshow(
np.reshape(CNN[:, :, :, count - 1], (n_row, n_col, n_color)), cmap='gray', interpolation="nearest"
)
else:
raise Exception("Unknown n_color")
plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick
plt.gca().yaxis.set_major_locator(plt.NullLocator())
count = count + 1
if saveable:
plt.savefig(name + '.pdf', format='pdf')
else:
plt.draw()
plt.pause(second) | [
"def",
"CNN2d",
"(",
"CNN",
"=",
"None",
",",
"second",
"=",
"10",
",",
"saveable",
"=",
"True",
",",
"name",
"=",
"'cnn'",
",",
"fig_idx",
"=",
"3119362",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"# tl.logging.info(CNN.shape) # (5, 5, 3, 64)",
"# exit()",
"n_mask",
"=",
"CNN",
".",
"shape",
"[",
"3",
"]",
"n_row",
"=",
"CNN",
".",
"shape",
"[",
"0",
"]",
"n_col",
"=",
"CNN",
".",
"shape",
"[",
"1",
"]",
"n_color",
"=",
"CNN",
".",
"shape",
"[",
"2",
"]",
"row",
"=",
"int",
"(",
"np",
".",
"sqrt",
"(",
"n_mask",
")",
")",
"col",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"n_mask",
"/",
"row",
")",
")",
"plt",
".",
"ion",
"(",
")",
"# active mode",
"fig",
"=",
"plt",
".",
"figure",
"(",
"fig_idx",
")",
"count",
"=",
"1",
"for",
"_ir",
"in",
"range",
"(",
"1",
",",
"row",
"+",
"1",
")",
":",
"for",
"_ic",
"in",
"range",
"(",
"1",
",",
"col",
"+",
"1",
")",
":",
"if",
"count",
">",
"n_mask",
":",
"break",
"fig",
".",
"add_subplot",
"(",
"col",
",",
"row",
",",
"count",
")",
"# tl.logging.info(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5",
"# exit()",
"# plt.imshow(",
"# np.reshape(CNN[count-1,:,:,:], (n_row, n_col)),",
"# cmap='gray', interpolation=\"nearest\") # theano",
"if",
"n_color",
"==",
"1",
":",
"plt",
".",
"imshow",
"(",
"np",
".",
"reshape",
"(",
"CNN",
"[",
":",
",",
":",
",",
":",
",",
"count",
"-",
"1",
"]",
",",
"(",
"n_row",
",",
"n_col",
")",
")",
",",
"cmap",
"=",
"'gray'",
",",
"interpolation",
"=",
"\"nearest\"",
")",
"elif",
"n_color",
"==",
"3",
":",
"plt",
".",
"imshow",
"(",
"np",
".",
"reshape",
"(",
"CNN",
"[",
":",
",",
":",
",",
":",
",",
"count",
"-",
"1",
"]",
",",
"(",
"n_row",
",",
"n_col",
",",
"n_color",
")",
")",
",",
"cmap",
"=",
"'gray'",
",",
"interpolation",
"=",
"\"nearest\"",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unknown n_color\"",
")",
"plt",
".",
"gca",
"(",
")",
".",
"xaxis",
".",
"set_major_locator",
"(",
"plt",
".",
"NullLocator",
"(",
")",
")",
"# distable tick",
"plt",
".",
"gca",
"(",
")",
".",
"yaxis",
".",
"set_major_locator",
"(",
"plt",
".",
"NullLocator",
"(",
")",
")",
"count",
"=",
"count",
"+",
"1",
"if",
"saveable",
":",
"plt",
".",
"savefig",
"(",
"name",
"+",
"'.pdf'",
",",
"format",
"=",
"'pdf'",
")",
"else",
":",
"plt",
".",
"draw",
"(",
")",
"plt",
".",
"pause",
"(",
"second",
")"
] | Display a group of RGB or Greyscale CNN masks.
Parameters
----------
CNN : numpy.array
The image. e.g: 64 5x5 RGB images can be (5, 5, 3, 64).
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
name : str
A name to save the image, if saveable is True.
fig_idx : int
The matplotlib figure index.
Examples
--------
>>> tl.visualize.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_mnist', fig_idx=2012) | [
"Display",
"a",
"group",
"of",
"RGB",
"or",
"Greyscale",
"CNN",
"masks",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L403-L461 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | tsne_embedding | def tsne_embedding(embeddings, reverse_dictionary, plot_only=500, second=5, saveable=False, name='tsne', fig_idx=9862):
"""Visualize the embeddings by using t-SNE.
Parameters
----------
embeddings : numpy.array
The embedding matrix.
reverse_dictionary : dictionary
id_to_word, mapping id to unique word.
plot_only : int
The number of examples to plot, choice the most common words.
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
name : str
A name to save the image, if saveable is True.
fig_idx : int
matplotlib figure index.
Examples
--------
>>> see 'tutorial_word2vec_basic.py'
>>> final_embeddings = normalized_embeddings.eval()
>>> tl.visualize.tsne_embedding(final_embeddings, labels, reverse_dictionary,
... plot_only=500, second=5, saveable=False, name='tsne')
"""
import matplotlib.pyplot as plt
def plot_with_labels(low_dim_embs, labels, figsize=(18, 18), second=5, saveable=True, name='tsne', fig_idx=9862):
if low_dim_embs.shape[0] < len(labels):
raise AssertionError("More labels than embeddings")
if saveable is False:
plt.ion()
plt.figure(fig_idx)
plt.figure(figsize=figsize) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
if saveable:
plt.savefig(name + '.pdf', format='pdf')
else:
plt.draw()
plt.pause(second)
try:
from sklearn.manifold import TSNE
from six.moves import xrange
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
# plot_only = 500
low_dim_embs = tsne.fit_transform(embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, second=second, saveable=saveable, name=name, fig_idx=fig_idx)
except ImportError:
_err = "Please install sklearn and matplotlib to visualize embeddings."
tl.logging.error(_err)
raise ImportError(_err) | python | def tsne_embedding(embeddings, reverse_dictionary, plot_only=500, second=5, saveable=False, name='tsne', fig_idx=9862):
"""Visualize the embeddings by using t-SNE.
Parameters
----------
embeddings : numpy.array
The embedding matrix.
reverse_dictionary : dictionary
id_to_word, mapping id to unique word.
plot_only : int
The number of examples to plot, choice the most common words.
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
name : str
A name to save the image, if saveable is True.
fig_idx : int
matplotlib figure index.
Examples
--------
>>> see 'tutorial_word2vec_basic.py'
>>> final_embeddings = normalized_embeddings.eval()
>>> tl.visualize.tsne_embedding(final_embeddings, labels, reverse_dictionary,
... plot_only=500, second=5, saveable=False, name='tsne')
"""
import matplotlib.pyplot as plt
def plot_with_labels(low_dim_embs, labels, figsize=(18, 18), second=5, saveable=True, name='tsne', fig_idx=9862):
if low_dim_embs.shape[0] < len(labels):
raise AssertionError("More labels than embeddings")
if saveable is False:
plt.ion()
plt.figure(fig_idx)
plt.figure(figsize=figsize) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
if saveable:
plt.savefig(name + '.pdf', format='pdf')
else:
plt.draw()
plt.pause(second)
try:
from sklearn.manifold import TSNE
from six.moves import xrange
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
# plot_only = 500
low_dim_embs = tsne.fit_transform(embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, second=second, saveable=saveable, name=name, fig_idx=fig_idx)
except ImportError:
_err = "Please install sklearn and matplotlib to visualize embeddings."
tl.logging.error(_err)
raise ImportError(_err) | [
"def",
"tsne_embedding",
"(",
"embeddings",
",",
"reverse_dictionary",
",",
"plot_only",
"=",
"500",
",",
"second",
"=",
"5",
",",
"saveable",
"=",
"False",
",",
"name",
"=",
"'tsne'",
",",
"fig_idx",
"=",
"9862",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"def",
"plot_with_labels",
"(",
"low_dim_embs",
",",
"labels",
",",
"figsize",
"=",
"(",
"18",
",",
"18",
")",
",",
"second",
"=",
"5",
",",
"saveable",
"=",
"True",
",",
"name",
"=",
"'tsne'",
",",
"fig_idx",
"=",
"9862",
")",
":",
"if",
"low_dim_embs",
".",
"shape",
"[",
"0",
"]",
"<",
"len",
"(",
"labels",
")",
":",
"raise",
"AssertionError",
"(",
"\"More labels than embeddings\"",
")",
"if",
"saveable",
"is",
"False",
":",
"plt",
".",
"ion",
"(",
")",
"plt",
".",
"figure",
"(",
"fig_idx",
")",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"# in inches",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"labels",
")",
":",
"x",
",",
"y",
"=",
"low_dim_embs",
"[",
"i",
",",
":",
"]",
"plt",
".",
"scatter",
"(",
"x",
",",
"y",
")",
"plt",
".",
"annotate",
"(",
"label",
",",
"xy",
"=",
"(",
"x",
",",
"y",
")",
",",
"xytext",
"=",
"(",
"5",
",",
"2",
")",
",",
"textcoords",
"=",
"'offset points'",
",",
"ha",
"=",
"'right'",
",",
"va",
"=",
"'bottom'",
")",
"if",
"saveable",
":",
"plt",
".",
"savefig",
"(",
"name",
"+",
"'.pdf'",
",",
"format",
"=",
"'pdf'",
")",
"else",
":",
"plt",
".",
"draw",
"(",
")",
"plt",
".",
"pause",
"(",
"second",
")",
"try",
":",
"from",
"sklearn",
".",
"manifold",
"import",
"TSNE",
"from",
"six",
".",
"moves",
"import",
"xrange",
"tsne",
"=",
"TSNE",
"(",
"perplexity",
"=",
"30",
",",
"n_components",
"=",
"2",
",",
"init",
"=",
"'pca'",
",",
"n_iter",
"=",
"5000",
")",
"# plot_only = 500",
"low_dim_embs",
"=",
"tsne",
".",
"fit_transform",
"(",
"embeddings",
"[",
":",
"plot_only",
",",
":",
"]",
")",
"labels",
"=",
"[",
"reverse_dictionary",
"[",
"i",
"]",
"for",
"i",
"in",
"xrange",
"(",
"plot_only",
")",
"]",
"plot_with_labels",
"(",
"low_dim_embs",
",",
"labels",
",",
"second",
"=",
"second",
",",
"saveable",
"=",
"saveable",
",",
"name",
"=",
"name",
",",
"fig_idx",
"=",
"fig_idx",
")",
"except",
"ImportError",
":",
"_err",
"=",
"\"Please install sklearn and matplotlib to visualize embeddings.\"",
"tl",
".",
"logging",
".",
"error",
"(",
"_err",
")",
"raise",
"ImportError",
"(",
"_err",
")"
] | Visualize the embeddings by using t-SNE.
Parameters
----------
embeddings : numpy.array
The embedding matrix.
reverse_dictionary : dictionary
id_to_word, mapping id to unique word.
plot_only : int
The number of examples to plot, choice the most common words.
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
name : str
A name to save the image, if saveable is True.
fig_idx : int
matplotlib figure index.
Examples
--------
>>> see 'tutorial_word2vec_basic.py'
>>> final_embeddings = normalized_embeddings.eval()
>>> tl.visualize.tsne_embedding(final_embeddings, labels, reverse_dictionary,
... plot_only=500, second=5, saveable=False, name='tsne') | [
"Visualize",
"the",
"embeddings",
"by",
"using",
"t",
"-",
"SNE",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L529-L594 | valid |
tensorlayer/tensorlayer | tensorlayer/visualize.py | draw_weights | def draw_weights(W=None, second=10, saveable=True, shape=None, name='mnist', fig_idx=2396512):
"""Visualize every columns of the weight matrix to a group of Greyscale img.
Parameters
----------
W : numpy.array
The weight matrix
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
shape : a list with 2 int or None
The shape of feature image, MNIST is [28, 80].
name : a string
A name to save the image, if saveable is True.
fig_idx : int
matplotlib figure index.
Examples
--------
>>> tl.visualize.draw_weights(network.all_params[0].eval(), second=10, saveable=True, name='weight_of_1st_layer', fig_idx=2012)
"""
if shape is None:
shape = [28, 28]
import matplotlib.pyplot as plt
if saveable is False:
plt.ion()
fig = plt.figure(fig_idx) # show all feature images
n_units = W.shape[1]
num_r = int(np.sqrt(n_units)) # 每行显示的个数 若25个hidden unit -> 每行显示5个
num_c = int(np.ceil(n_units / num_r))
count = int(1)
for _row in range(1, num_r + 1):
for _col in range(1, num_c + 1):
if count > n_units:
break
fig.add_subplot(num_r, num_c, count)
# ------------------------------------------------------------
# plt.imshow(np.reshape(W[:,count-1],(28,28)), cmap='gray')
# ------------------------------------------------------------
feature = W[:, count - 1] / np.sqrt((W[:, count - 1]**2).sum())
# feature[feature<0.0001] = 0 # value threshold
# if count == 1 or count == 2:
# print(np.mean(feature))
# if np.std(feature) < 0.03: # condition threshold
# feature = np.zeros_like(feature)
# if np.mean(feature) < -0.015: # condition threshold
# feature = np.zeros_like(feature)
plt.imshow(
np.reshape(feature, (shape[0], shape[1])), cmap='gray', interpolation="nearest"
) # , vmin=np.min(feature), vmax=np.max(feature))
# plt.title(name)
# ------------------------------------------------------------
# plt.imshow(np.reshape(W[:,count-1] ,(np.sqrt(size),np.sqrt(size))), cmap='gray', interpolation="nearest")
plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick
plt.gca().yaxis.set_major_locator(plt.NullLocator())
count = count + 1
if saveable:
plt.savefig(name + '.pdf', format='pdf')
else:
plt.draw()
plt.pause(second) | python | def draw_weights(W=None, second=10, saveable=True, shape=None, name='mnist', fig_idx=2396512):
"""Visualize every columns of the weight matrix to a group of Greyscale img.
Parameters
----------
W : numpy.array
The weight matrix
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
shape : a list with 2 int or None
The shape of feature image, MNIST is [28, 80].
name : a string
A name to save the image, if saveable is True.
fig_idx : int
matplotlib figure index.
Examples
--------
>>> tl.visualize.draw_weights(network.all_params[0].eval(), second=10, saveable=True, name='weight_of_1st_layer', fig_idx=2012)
"""
if shape is None:
shape = [28, 28]
import matplotlib.pyplot as plt
if saveable is False:
plt.ion()
fig = plt.figure(fig_idx) # show all feature images
n_units = W.shape[1]
num_r = int(np.sqrt(n_units)) # 每行显示的个数 若25个hidden unit -> 每行显示5个
num_c = int(np.ceil(n_units / num_r))
count = int(1)
for _row in range(1, num_r + 1):
for _col in range(1, num_c + 1):
if count > n_units:
break
fig.add_subplot(num_r, num_c, count)
# ------------------------------------------------------------
# plt.imshow(np.reshape(W[:,count-1],(28,28)), cmap='gray')
# ------------------------------------------------------------
feature = W[:, count - 1] / np.sqrt((W[:, count - 1]**2).sum())
# feature[feature<0.0001] = 0 # value threshold
# if count == 1 or count == 2:
# print(np.mean(feature))
# if np.std(feature) < 0.03: # condition threshold
# feature = np.zeros_like(feature)
# if np.mean(feature) < -0.015: # condition threshold
# feature = np.zeros_like(feature)
plt.imshow(
np.reshape(feature, (shape[0], shape[1])), cmap='gray', interpolation="nearest"
) # , vmin=np.min(feature), vmax=np.max(feature))
# plt.title(name)
# ------------------------------------------------------------
# plt.imshow(np.reshape(W[:,count-1] ,(np.sqrt(size),np.sqrt(size))), cmap='gray', interpolation="nearest")
plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick
plt.gca().yaxis.set_major_locator(plt.NullLocator())
count = count + 1
if saveable:
plt.savefig(name + '.pdf', format='pdf')
else:
plt.draw()
plt.pause(second) | [
"def",
"draw_weights",
"(",
"W",
"=",
"None",
",",
"second",
"=",
"10",
",",
"saveable",
"=",
"True",
",",
"shape",
"=",
"None",
",",
"name",
"=",
"'mnist'",
",",
"fig_idx",
"=",
"2396512",
")",
":",
"if",
"shape",
"is",
"None",
":",
"shape",
"=",
"[",
"28",
",",
"28",
"]",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"saveable",
"is",
"False",
":",
"plt",
".",
"ion",
"(",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"fig_idx",
")",
"# show all feature images",
"n_units",
"=",
"W",
".",
"shape",
"[",
"1",
"]",
"num_r",
"=",
"int",
"(",
"np",
".",
"sqrt",
"(",
"n_units",
")",
")",
"# 每行显示的个数 若25个hidden unit -> 每行显示5个",
"num_c",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"n_units",
"/",
"num_r",
")",
")",
"count",
"=",
"int",
"(",
"1",
")",
"for",
"_row",
"in",
"range",
"(",
"1",
",",
"num_r",
"+",
"1",
")",
":",
"for",
"_col",
"in",
"range",
"(",
"1",
",",
"num_c",
"+",
"1",
")",
":",
"if",
"count",
">",
"n_units",
":",
"break",
"fig",
".",
"add_subplot",
"(",
"num_r",
",",
"num_c",
",",
"count",
")",
"# ------------------------------------------------------------",
"# plt.imshow(np.reshape(W[:,count-1],(28,28)), cmap='gray')",
"# ------------------------------------------------------------",
"feature",
"=",
"W",
"[",
":",
",",
"count",
"-",
"1",
"]",
"/",
"np",
".",
"sqrt",
"(",
"(",
"W",
"[",
":",
",",
"count",
"-",
"1",
"]",
"**",
"2",
")",
".",
"sum",
"(",
")",
")",
"# feature[feature<0.0001] = 0 # value threshold",
"# if count == 1 or count == 2:",
"# print(np.mean(feature))",
"# if np.std(feature) < 0.03: # condition threshold",
"# feature = np.zeros_like(feature)",
"# if np.mean(feature) < -0.015: # condition threshold",
"# feature = np.zeros_like(feature)",
"plt",
".",
"imshow",
"(",
"np",
".",
"reshape",
"(",
"feature",
",",
"(",
"shape",
"[",
"0",
"]",
",",
"shape",
"[",
"1",
"]",
")",
")",
",",
"cmap",
"=",
"'gray'",
",",
"interpolation",
"=",
"\"nearest\"",
")",
"# , vmin=np.min(feature), vmax=np.max(feature))",
"# plt.title(name)",
"# ------------------------------------------------------------",
"# plt.imshow(np.reshape(W[:,count-1] ,(np.sqrt(size),np.sqrt(size))), cmap='gray', interpolation=\"nearest\")",
"plt",
".",
"gca",
"(",
")",
".",
"xaxis",
".",
"set_major_locator",
"(",
"plt",
".",
"NullLocator",
"(",
")",
")",
"# distable tick",
"plt",
".",
"gca",
"(",
")",
".",
"yaxis",
".",
"set_major_locator",
"(",
"plt",
".",
"NullLocator",
"(",
")",
")",
"count",
"=",
"count",
"+",
"1",
"if",
"saveable",
":",
"plt",
".",
"savefig",
"(",
"name",
"+",
"'.pdf'",
",",
"format",
"=",
"'pdf'",
")",
"else",
":",
"plt",
".",
"draw",
"(",
")",
"plt",
".",
"pause",
"(",
"second",
")"
] | Visualize every columns of the weight matrix to a group of Greyscale img.
Parameters
----------
W : numpy.array
The weight matrix
second : int
The display second(s) for the image(s), if saveable is False.
saveable : boolean
Save or plot the figure.
shape : a list with 2 int or None
The shape of feature image, MNIST is [28, 80].
name : a string
A name to save the image, if saveable is True.
fig_idx : int
matplotlib figure index.
Examples
--------
>>> tl.visualize.draw_weights(network.all_params[0].eval(), second=10, saveable=True, name='weight_of_1st_layer', fig_idx=2012) | [
"Visualize",
"every",
"columns",
"of",
"the",
"weight",
"matrix",
"to",
"a",
"group",
"of",
"Greyscale",
"img",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L597-L661 | valid |
tensorlayer/tensorlayer | examples/basic_tutorials/tutorial_cifar10_tfrecord.py | data_to_tfrecord | def data_to_tfrecord(images, labels, filename):
"""Save data into TFRecord."""
if os.path.isfile(filename):
print("%s exists" % filename)
return
print("Converting data into %s ..." % filename)
# cwd = os.getcwd()
writer = tf.python_io.TFRecordWriter(filename)
for index, img in enumerate(images):
img_raw = img.tobytes()
# Visualize a image
# tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)
label = int(labels[index])
# print(label)
# Convert the bytes back to image as follow:
# image = Image.frombytes('RGB', (32, 32), img_raw)
# image = np.fromstring(img_raw, np.float32)
# image = image.reshape([32, 32, 3])
# tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)
example = tf.train.Example(
features=tf.train.Features(
feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
}
)
)
writer.write(example.SerializeToString()) # Serialize To String
writer.close() | python | def data_to_tfrecord(images, labels, filename):
"""Save data into TFRecord."""
if os.path.isfile(filename):
print("%s exists" % filename)
return
print("Converting data into %s ..." % filename)
# cwd = os.getcwd()
writer = tf.python_io.TFRecordWriter(filename)
for index, img in enumerate(images):
img_raw = img.tobytes()
# Visualize a image
# tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)
label = int(labels[index])
# print(label)
# Convert the bytes back to image as follow:
# image = Image.frombytes('RGB', (32, 32), img_raw)
# image = np.fromstring(img_raw, np.float32)
# image = image.reshape([32, 32, 3])
# tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)
example = tf.train.Example(
features=tf.train.Features(
feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
}
)
)
writer.write(example.SerializeToString()) # Serialize To String
writer.close() | [
"def",
"data_to_tfrecord",
"(",
"images",
",",
"labels",
",",
"filename",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"print",
"(",
"\"%s exists\"",
"%",
"filename",
")",
"return",
"print",
"(",
"\"Converting data into %s ...\"",
"%",
"filename",
")",
"# cwd = os.getcwd()",
"writer",
"=",
"tf",
".",
"python_io",
".",
"TFRecordWriter",
"(",
"filename",
")",
"for",
"index",
",",
"img",
"in",
"enumerate",
"(",
"images",
")",
":",
"img_raw",
"=",
"img",
".",
"tobytes",
"(",
")",
"# Visualize a image",
"# tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)",
"label",
"=",
"int",
"(",
"labels",
"[",
"index",
"]",
")",
"# print(label)",
"# Convert the bytes back to image as follow:",
"# image = Image.frombytes('RGB', (32, 32), img_raw)",
"# image = np.fromstring(img_raw, np.float32)",
"# image = image.reshape([32, 32, 3])",
"# tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)",
"example",
"=",
"tf",
".",
"train",
".",
"Example",
"(",
"features",
"=",
"tf",
".",
"train",
".",
"Features",
"(",
"feature",
"=",
"{",
"\"label\"",
":",
"tf",
".",
"train",
".",
"Feature",
"(",
"int64_list",
"=",
"tf",
".",
"train",
".",
"Int64List",
"(",
"value",
"=",
"[",
"label",
"]",
")",
")",
",",
"'img_raw'",
":",
"tf",
".",
"train",
".",
"Feature",
"(",
"bytes_list",
"=",
"tf",
".",
"train",
".",
"BytesList",
"(",
"value",
"=",
"[",
"img_raw",
"]",
")",
")",
",",
"}",
")",
")",
"writer",
".",
"write",
"(",
"example",
".",
"SerializeToString",
"(",
")",
")",
"# Serialize To String",
"writer",
".",
"close",
"(",
")"
] | Save data into TFRecord. | [
"Save",
"data",
"into",
"TFRecord",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/basic_tutorials/tutorial_cifar10_tfrecord.py#L64-L92 | valid |
tensorlayer/tensorlayer | examples/basic_tutorials/tutorial_cifar10_tfrecord.py | read_and_decode | def read_and_decode(filename, is_train=None):
"""Return tensor to read from TFRecord."""
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
}
)
# You can do more image distortion here for training data
img = tf.decode_raw(features['img_raw'], tf.float32)
img = tf.reshape(img, [32, 32, 3])
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
if is_train ==True:
# 1. Randomly crop a [height, width] section of the image.
img = tf.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
elif is_train == False:
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_image_with_crop_or_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
elif is_train == None:
img = img
label = tf.cast(features['label'], tf.int32)
return img, label | python | def read_and_decode(filename, is_train=None):
"""Return tensor to read from TFRecord."""
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
}
)
# You can do more image distortion here for training data
img = tf.decode_raw(features['img_raw'], tf.float32)
img = tf.reshape(img, [32, 32, 3])
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
if is_train ==True:
# 1. Randomly crop a [height, width] section of the image.
img = tf.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
elif is_train == False:
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_image_with_crop_or_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
elif is_train == None:
img = img
label = tf.cast(features['label'], tf.int32)
return img, label | [
"def",
"read_and_decode",
"(",
"filename",
",",
"is_train",
"=",
"None",
")",
":",
"filename_queue",
"=",
"tf",
".",
"train",
".",
"string_input_producer",
"(",
"[",
"filename",
"]",
")",
"reader",
"=",
"tf",
".",
"TFRecordReader",
"(",
")",
"_",
",",
"serialized_example",
"=",
"reader",
".",
"read",
"(",
"filename_queue",
")",
"features",
"=",
"tf",
".",
"parse_single_example",
"(",
"serialized_example",
",",
"features",
"=",
"{",
"'label'",
":",
"tf",
".",
"FixedLenFeature",
"(",
"[",
"]",
",",
"tf",
".",
"int64",
")",
",",
"'img_raw'",
":",
"tf",
".",
"FixedLenFeature",
"(",
"[",
"]",
",",
"tf",
".",
"string",
")",
",",
"}",
")",
"# You can do more image distortion here for training data",
"img",
"=",
"tf",
".",
"decode_raw",
"(",
"features",
"[",
"'img_raw'",
"]",
",",
"tf",
".",
"float32",
")",
"img",
"=",
"tf",
".",
"reshape",
"(",
"img",
",",
"[",
"32",
",",
"32",
",",
"3",
"]",
")",
"# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5",
"if",
"is_train",
"==",
"True",
":",
"# 1. Randomly crop a [height, width] section of the image.",
"img",
"=",
"tf",
".",
"random_crop",
"(",
"img",
",",
"[",
"24",
",",
"24",
",",
"3",
"]",
")",
"# 2. Randomly flip the image horizontally.",
"img",
"=",
"tf",
".",
"image",
".",
"random_flip_left_right",
"(",
"img",
")",
"# 3. Randomly change brightness.",
"img",
"=",
"tf",
".",
"image",
".",
"random_brightness",
"(",
"img",
",",
"max_delta",
"=",
"63",
")",
"# 4. Randomly change contrast.",
"img",
"=",
"tf",
".",
"image",
".",
"random_contrast",
"(",
"img",
",",
"lower",
"=",
"0.2",
",",
"upper",
"=",
"1.8",
")",
"# 5. Subtract off the mean and divide by the variance of the pixels.",
"img",
"=",
"tf",
".",
"image",
".",
"per_image_standardization",
"(",
"img",
")",
"elif",
"is_train",
"==",
"False",
":",
"# 1. Crop the central [height, width] of the image.",
"img",
"=",
"tf",
".",
"image",
".",
"resize_image_with_crop_or_pad",
"(",
"img",
",",
"24",
",",
"24",
")",
"# 2. Subtract off the mean and divide by the variance of the pixels.",
"img",
"=",
"tf",
".",
"image",
".",
"per_image_standardization",
"(",
"img",
")",
"elif",
"is_train",
"==",
"None",
":",
"img",
"=",
"img",
"label",
"=",
"tf",
".",
"cast",
"(",
"features",
"[",
"'label'",
"]",
",",
"tf",
".",
"int32",
")",
"return",
"img",
",",
"label"
] | Return tensor to read from TFRecord. | [
"Return",
"tensor",
"to",
"read",
"from",
"TFRecord",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/basic_tutorials/tutorial_cifar10_tfrecord.py#L95-L137 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/core.py | Layer.print_params | def print_params(self, details=True, session=None):
"""Print all info of parameters in the network"""
for i, p in enumerate(self.all_params):
if details:
try:
val = p.eval(session=session)
logging.info(
" param {:3}: {:20} {:15} {} (mean: {:<18}, median: {:<18}, std: {:<18}) ".
format(i, p.name, str(val.shape), p.dtype.name, val.mean(), np.median(val), val.std())
)
except Exception as e:
logging.info(str(e))
raise Exception(
"Hint: print params details after tl.layers.initialize_global_variables(sess) "
"or use network.print_params(False)."
)
else:
logging.info(" param {:3}: {:20} {:15} {}".format(i, p.name, str(p.get_shape()), p.dtype.name))
logging.info(" num of params: %d" % self.count_params()) | python | def print_params(self, details=True, session=None):
"""Print all info of parameters in the network"""
for i, p in enumerate(self.all_params):
if details:
try:
val = p.eval(session=session)
logging.info(
" param {:3}: {:20} {:15} {} (mean: {:<18}, median: {:<18}, std: {:<18}) ".
format(i, p.name, str(val.shape), p.dtype.name, val.mean(), np.median(val), val.std())
)
except Exception as e:
logging.info(str(e))
raise Exception(
"Hint: print params details after tl.layers.initialize_global_variables(sess) "
"or use network.print_params(False)."
)
else:
logging.info(" param {:3}: {:20} {:15} {}".format(i, p.name, str(p.get_shape()), p.dtype.name))
logging.info(" num of params: %d" % self.count_params()) | [
"def",
"print_params",
"(",
"self",
",",
"details",
"=",
"True",
",",
"session",
"=",
"None",
")",
":",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"self",
".",
"all_params",
")",
":",
"if",
"details",
":",
"try",
":",
"val",
"=",
"p",
".",
"eval",
"(",
"session",
"=",
"session",
")",
"logging",
".",
"info",
"(",
"\" param {:3}: {:20} {:15} {} (mean: {:<18}, median: {:<18}, std: {:<18}) \"",
".",
"format",
"(",
"i",
",",
"p",
".",
"name",
",",
"str",
"(",
"val",
".",
"shape",
")",
",",
"p",
".",
"dtype",
".",
"name",
",",
"val",
".",
"mean",
"(",
")",
",",
"np",
".",
"median",
"(",
"val",
")",
",",
"val",
".",
"std",
"(",
")",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"info",
"(",
"str",
"(",
"e",
")",
")",
"raise",
"Exception",
"(",
"\"Hint: print params details after tl.layers.initialize_global_variables(sess) \"",
"\"or use network.print_params(False).\"",
")",
"else",
":",
"logging",
".",
"info",
"(",
"\" param {:3}: {:20} {:15} {}\"",
".",
"format",
"(",
"i",
",",
"p",
".",
"name",
",",
"str",
"(",
"p",
".",
"get_shape",
"(",
")",
")",
",",
"p",
".",
"dtype",
".",
"name",
")",
")",
"logging",
".",
"info",
"(",
"\" num of params: %d\"",
"%",
"self",
".",
"count_params",
"(",
")",
")"
] | Print all info of parameters in the network | [
"Print",
"all",
"info",
"of",
"parameters",
"in",
"the",
"network"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/core.py#L171-L189 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/core.py | Layer.print_layers | def print_layers(self):
"""Print all info of layers in the network."""
for i, layer in enumerate(self.all_layers):
# logging.info(" layer %d: %s" % (i, str(layer)))
logging.info(
" layer {:3}: {:20} {:15} {}".format(i, layer.name, str(layer.get_shape()), layer.dtype.name)
) | python | def print_layers(self):
"""Print all info of layers in the network."""
for i, layer in enumerate(self.all_layers):
# logging.info(" layer %d: %s" % (i, str(layer)))
logging.info(
" layer {:3}: {:20} {:15} {}".format(i, layer.name, str(layer.get_shape()), layer.dtype.name)
) | [
"def",
"print_layers",
"(",
"self",
")",
":",
"for",
"i",
",",
"layer",
"in",
"enumerate",
"(",
"self",
".",
"all_layers",
")",
":",
"# logging.info(\" layer %d: %s\" % (i, str(layer)))",
"logging",
".",
"info",
"(",
"\" layer {:3}: {:20} {:15} {}\"",
".",
"format",
"(",
"i",
",",
"layer",
".",
"name",
",",
"str",
"(",
"layer",
".",
"get_shape",
"(",
")",
")",
",",
"layer",
".",
"dtype",
".",
"name",
")",
")"
] | Print all info of layers in the network. | [
"Print",
"all",
"info",
"of",
"layers",
"in",
"the",
"network",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/core.py#L191-L197 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/core.py | Layer.count_params | def count_params(self):
"""Returns the number of parameters in the network."""
n_params = 0
for _i, p in enumerate(self.all_params):
n = 1
# for s in p.eval().shape:
for s in p.get_shape():
try:
s = int(s)
except Exception:
s = 1
if s:
n = n * s
n_params = n_params + n
return n_params | python | def count_params(self):
"""Returns the number of parameters in the network."""
n_params = 0
for _i, p in enumerate(self.all_params):
n = 1
# for s in p.eval().shape:
for s in p.get_shape():
try:
s = int(s)
except Exception:
s = 1
if s:
n = n * s
n_params = n_params + n
return n_params | [
"def",
"count_params",
"(",
"self",
")",
":",
"n_params",
"=",
"0",
"for",
"_i",
",",
"p",
"in",
"enumerate",
"(",
"self",
".",
"all_params",
")",
":",
"n",
"=",
"1",
"# for s in p.eval().shape:",
"for",
"s",
"in",
"p",
".",
"get_shape",
"(",
")",
":",
"try",
":",
"s",
"=",
"int",
"(",
"s",
")",
"except",
"Exception",
":",
"s",
"=",
"1",
"if",
"s",
":",
"n",
"=",
"n",
"*",
"s",
"n_params",
"=",
"n_params",
"+",
"n",
"return",
"n_params"
] | Returns the number of parameters in the network. | [
"Returns",
"the",
"number",
"of",
"parameters",
"in",
"the",
"network",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/core.py#L199-L213 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/core.py | Layer.get_all_params | def get_all_params(self, session=None):
"""Return the parameters in a list of array."""
_params = []
for p in self.all_params:
if session is None:
_params.append(p.eval())
else:
_params.append(session.run(p))
return _params | python | def get_all_params(self, session=None):
"""Return the parameters in a list of array."""
_params = []
for p in self.all_params:
if session is None:
_params.append(p.eval())
else:
_params.append(session.run(p))
return _params | [
"def",
"get_all_params",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"_params",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"all_params",
":",
"if",
"session",
"is",
"None",
":",
"_params",
".",
"append",
"(",
"p",
".",
"eval",
"(",
")",
")",
"else",
":",
"_params",
".",
"append",
"(",
"session",
".",
"run",
"(",
"p",
")",
")",
"return",
"_params"
] | Return the parameters in a list of array. | [
"Return",
"the",
"parameters",
"in",
"a",
"list",
"of",
"array",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/core.py#L215-L223 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/core.py | Layer._get_init_args | def _get_init_args(self, skip=4):
"""Get all arguments of current layer for saving the graph."""
stack = inspect.stack()
if len(stack) < skip + 1:
raise ValueError("The length of the inspection stack is shorter than the requested start position.")
args, _, _, values = inspect.getargvalues(stack[skip][0])
params = {}
for arg in args:
# some args dont need to be saved into the graph. e.g. the input placeholder
if values[arg] is not None and arg not in ['self', 'prev_layer', 'inputs']:
val = values[arg]
# change function (e.g. act) into dictionary of module path and function name
if inspect.isfunction(val):
params[arg] = {"module_path": val.__module__, "func_name": val.__name__}
# ignore more args e.g. TF class
elif arg.endswith('init'):
continue
# for other data type, save them directly
else:
params[arg] = val
return params | python | def _get_init_args(self, skip=4):
"""Get all arguments of current layer for saving the graph."""
stack = inspect.stack()
if len(stack) < skip + 1:
raise ValueError("The length of the inspection stack is shorter than the requested start position.")
args, _, _, values = inspect.getargvalues(stack[skip][0])
params = {}
for arg in args:
# some args dont need to be saved into the graph. e.g. the input placeholder
if values[arg] is not None and arg not in ['self', 'prev_layer', 'inputs']:
val = values[arg]
# change function (e.g. act) into dictionary of module path and function name
if inspect.isfunction(val):
params[arg] = {"module_path": val.__module__, "func_name": val.__name__}
# ignore more args e.g. TF class
elif arg.endswith('init'):
continue
# for other data type, save them directly
else:
params[arg] = val
return params | [
"def",
"_get_init_args",
"(",
"self",
",",
"skip",
"=",
"4",
")",
":",
"stack",
"=",
"inspect",
".",
"stack",
"(",
")",
"if",
"len",
"(",
"stack",
")",
"<",
"skip",
"+",
"1",
":",
"raise",
"ValueError",
"(",
"\"The length of the inspection stack is shorter than the requested start position.\"",
")",
"args",
",",
"_",
",",
"_",
",",
"values",
"=",
"inspect",
".",
"getargvalues",
"(",
"stack",
"[",
"skip",
"]",
"[",
"0",
"]",
")",
"params",
"=",
"{",
"}",
"for",
"arg",
"in",
"args",
":",
"# some args dont need to be saved into the graph. e.g. the input placeholder",
"if",
"values",
"[",
"arg",
"]",
"is",
"not",
"None",
"and",
"arg",
"not",
"in",
"[",
"'self'",
",",
"'prev_layer'",
",",
"'inputs'",
"]",
":",
"val",
"=",
"values",
"[",
"arg",
"]",
"# change function (e.g. act) into dictionary of module path and function name",
"if",
"inspect",
".",
"isfunction",
"(",
"val",
")",
":",
"params",
"[",
"arg",
"]",
"=",
"{",
"\"module_path\"",
":",
"val",
".",
"__module__",
",",
"\"func_name\"",
":",
"val",
".",
"__name__",
"}",
"# ignore more args e.g. TF class",
"elif",
"arg",
".",
"endswith",
"(",
"'init'",
")",
":",
"continue",
"# for other data type, save them directly",
"else",
":",
"params",
"[",
"arg",
"]",
"=",
"val",
"return",
"params"
] | Get all arguments of current layer for saving the graph. | [
"Get",
"all",
"arguments",
"of",
"current",
"layer",
"for",
"saving",
"the",
"graph",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/core.py#L258-L286 | valid |
tensorlayer/tensorlayer | tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_ops.py | roi_pooling | def roi_pooling(input, rois, pool_height, pool_width):
"""
returns a tensorflow operation for computing the Region of Interest Pooling
@arg input: feature maps on which to perform the pooling operation
@arg rois: list of regions of interest in the format (feature map index, upper left, bottom right)
@arg pool_width: size of the pooling sections
"""
# TODO(maciek): ops scope
out = roi_pooling_module.roi_pooling(input, rois, pool_height=pool_height, pool_width=pool_width)
output, argmax_output = out[0], out[1]
return output | python | def roi_pooling(input, rois, pool_height, pool_width):
"""
returns a tensorflow operation for computing the Region of Interest Pooling
@arg input: feature maps on which to perform the pooling operation
@arg rois: list of regions of interest in the format (feature map index, upper left, bottom right)
@arg pool_width: size of the pooling sections
"""
# TODO(maciek): ops scope
out = roi_pooling_module.roi_pooling(input, rois, pool_height=pool_height, pool_width=pool_width)
output, argmax_output = out[0], out[1]
return output | [
"def",
"roi_pooling",
"(",
"input",
",",
"rois",
",",
"pool_height",
",",
"pool_width",
")",
":",
"# TODO(maciek): ops scope",
"out",
"=",
"roi_pooling_module",
".",
"roi_pooling",
"(",
"input",
",",
"rois",
",",
"pool_height",
"=",
"pool_height",
",",
"pool_width",
"=",
"pool_width",
")",
"output",
",",
"argmax_output",
"=",
"out",
"[",
"0",
"]",
",",
"out",
"[",
"1",
"]",
"return",
"output"
] | returns a tensorflow operation for computing the Region of Interest Pooling
@arg input: feature maps on which to perform the pooling operation
@arg rois: list of regions of interest in the format (feature map index, upper left, bottom right)
@arg pool_width: size of the pooling sections | [
"returns",
"a",
"tensorflow",
"operation",
"for",
"computing",
"the",
"Region",
"of",
"Interest",
"Pooling"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_ops.py#L12-L23 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_tfrecord3.py | _int64_feature | def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) | python | def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) | [
"def",
"_int64_feature",
"(",
"value",
")",
":",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"int64_list",
"=",
"tf",
".",
"train",
".",
"Int64List",
"(",
"value",
"=",
"[",
"value",
"]",
")",
")"
] | Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label. | [
"Wrapper",
"for",
"inserting",
"an",
"int64",
"Feature",
"into",
"a",
"SequenceExample",
"proto",
"e",
".",
"g",
"An",
"integer",
"label",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_tfrecord3.py#L26-L30 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_tfrecord3.py | _bytes_feature | def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) | python | def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) | [
"def",
"_bytes_feature",
"(",
"value",
")",
":",
"# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"bytes_list",
"=",
"tf",
".",
"train",
".",
"BytesList",
"(",
"value",
"=",
"[",
"value",
"]",
")",
")"
] | Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte | [
"Wrapper",
"for",
"inserting",
"a",
"bytes",
"Feature",
"into",
"a",
"SequenceExample",
"proto",
"e",
".",
"g",
"an",
"image",
"in",
"byte"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_tfrecord3.py#L33-L38 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_tfrecord3.py | _int64_feature_list | def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto,
e.g, sentence in list of ints
"""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values]) | python | def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto,
e.g, sentence in list of ints
"""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values]) | [
"def",
"_int64_feature_list",
"(",
"values",
")",
":",
"return",
"tf",
".",
"train",
".",
"FeatureList",
"(",
"feature",
"=",
"[",
"_int64_feature",
"(",
"v",
")",
"for",
"v",
"in",
"values",
"]",
")"
] | Wrapper for inserting an int64 FeatureList into a SequenceExample proto,
e.g, sentence in list of ints | [
"Wrapper",
"for",
"inserting",
"an",
"int64",
"FeatureList",
"into",
"a",
"SequenceExample",
"proto",
"e",
".",
"g",
"sentence",
"in",
"list",
"of",
"ints"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_tfrecord3.py#L41-L45 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_tfrecord3.py | _bytes_feature_list | def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto,
e.g, sentence in list of bytes
"""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values]) | python | def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto,
e.g, sentence in list of bytes
"""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values]) | [
"def",
"_bytes_feature_list",
"(",
"values",
")",
":",
"return",
"tf",
".",
"train",
".",
"FeatureList",
"(",
"feature",
"=",
"[",
"_bytes_feature",
"(",
"v",
")",
"for",
"v",
"in",
"values",
"]",
")"
] | Wrapper for inserting a bytes FeatureList into a SequenceExample proto,
e.g, sentence in list of bytes | [
"Wrapper",
"for",
"inserting",
"a",
"bytes",
"FeatureList",
"into",
"a",
"SequenceExample",
"proto",
"e",
".",
"g",
"sentence",
"in",
"list",
"of",
"bytes"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_tfrecord3.py#L48-L52 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_tfrecord3.py | distort_image | def distort_image(image, thread_id):
"""Perform random distortions on an image.
Args:
image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
Returns:````
distorted_image: A float32 Tensor of shape [height, width, 3] with values in
[0, 1].
"""
# Randomly flip horizontally.
with tf.name_scope("flip_horizontal"): # , values=[image]): # DH MOdify
# with tf.name_scope("flip_horizontal", values=[image]):
image = tf.image.random_flip_left_right(image)
# Randomly distort the colors based on thread id.
color_ordering = thread_id % 2
with tf.name_scope("distort_color"): # , values=[image]): # DH MOdify
# with tf.name_scope("distort_color", values=[image]): # DH MOdify
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image | python | def distort_image(image, thread_id):
"""Perform random distortions on an image.
Args:
image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
Returns:````
distorted_image: A float32 Tensor of shape [height, width, 3] with values in
[0, 1].
"""
# Randomly flip horizontally.
with tf.name_scope("flip_horizontal"): # , values=[image]): # DH MOdify
# with tf.name_scope("flip_horizontal", values=[image]):
image = tf.image.random_flip_left_right(image)
# Randomly distort the colors based on thread id.
color_ordering = thread_id % 2
with tf.name_scope("distort_color"): # , values=[image]): # DH MOdify
# with tf.name_scope("distort_color", values=[image]): # DH MOdify
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image | [
"def",
"distort_image",
"(",
"image",
",",
"thread_id",
")",
":",
"# Randomly flip horizontally.",
"with",
"tf",
".",
"name_scope",
"(",
"\"flip_horizontal\"",
")",
":",
"# , values=[image]): # DH MOdify",
"# with tf.name_scope(\"flip_horizontal\", values=[image]):",
"image",
"=",
"tf",
".",
"image",
".",
"random_flip_left_right",
"(",
"image",
")",
"# Randomly distort the colors based on thread id.",
"color_ordering",
"=",
"thread_id",
"%",
"2",
"with",
"tf",
".",
"name_scope",
"(",
"\"distort_color\"",
")",
":",
"# , values=[image]): # DH MOdify",
"# with tf.name_scope(\"distort_color\", values=[image]): # DH MOdify",
"if",
"color_ordering",
"==",
"0",
":",
"image",
"=",
"tf",
".",
"image",
".",
"random_brightness",
"(",
"image",
",",
"max_delta",
"=",
"32.",
"/",
"255.",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_saturation",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_hue",
"(",
"image",
",",
"max_delta",
"=",
"0.032",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_contrast",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"elif",
"color_ordering",
"==",
"1",
":",
"image",
"=",
"tf",
".",
"image",
".",
"random_brightness",
"(",
"image",
",",
"max_delta",
"=",
"32.",
"/",
"255.",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_contrast",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_saturation",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_hue",
"(",
"image",
",",
"max_delta",
"=",
"0.032",
")",
"# The random_* ops do not necessarily clamp.",
"image",
"=",
"tf",
".",
"clip_by_value",
"(",
"image",
",",
"0.0",
",",
"1.0",
")",
"return",
"image"
] | Perform random distortions on an image.
Args:
image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
Returns:````
distorted_image: A float32 Tensor of shape [height, width, 3] with values in
[0, 1]. | [
"Perform",
"random",
"distortions",
"on",
"an",
"image",
".",
"Args",
":",
"image",
":",
"A",
"float32",
"Tensor",
"of",
"shape",
"[",
"height",
"width",
"3",
"]",
"with",
"values",
"in",
"[",
"0",
"1",
")",
".",
"thread_id",
":",
"Preprocessing",
"thread",
"id",
"used",
"to",
"select",
"the",
"ordering",
"of",
"color",
"distortions",
".",
"There",
"should",
"be",
"a",
"multiple",
"of",
"2",
"preprocessing",
"threads",
".",
"Returns",
":",
"distorted_image",
":",
"A",
"float32",
"Tensor",
"of",
"shape",
"[",
"height",
"width",
"3",
"]",
"with",
"values",
"in",
"[",
"0",
"1",
"]",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_tfrecord3.py#L121-L152 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_tfrecord3.py | prefetch_input_data | def prefetch_input_data(
reader, file_pattern, is_training, batch_size, values_per_shard, input_queue_capacity_factor=16,
num_reader_threads=1, shard_queue_name="filename_queue", value_queue_name="input_queue"
):
"""Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values.
"""
data_files = []
for pattern in file_pattern.split(","):
data_files.extend(tf.gfile.Glob(pattern))
if not data_files:
tl.logging.fatal("Found no input files matching %s", file_pattern)
else:
tl.logging.info("Prefetching values from %d files matching %s", len(data_files), file_pattern)
if is_training:
print(" is_training == True : RandomShuffleQueue")
filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16, name=shard_queue_name)
min_queue_examples = values_per_shard * input_queue_capacity_factor
capacity = min_queue_examples + 100 * batch_size
values_queue = tf.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_queue_examples, dtypes=[tf.string],
name="random_" + value_queue_name
)
else:
print(" is_training == False : FIFOQueue")
filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1, name=shard_queue_name)
capacity = values_per_shard + 3 * batch_size
values_queue = tf.FIFOQueue(capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name)
enqueue_ops = []
for _ in range(num_reader_threads):
_, value = reader.read(filename_queue)
enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(values_queue, enqueue_ops))
tf.summary.scalar(
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
tf.cast(values_queue.size(), tf.float32) * (1. / capacity)
)
return values_queue | python | def prefetch_input_data(
reader, file_pattern, is_training, batch_size, values_per_shard, input_queue_capacity_factor=16,
num_reader_threads=1, shard_queue_name="filename_queue", value_queue_name="input_queue"
):
"""Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values.
"""
data_files = []
for pattern in file_pattern.split(","):
data_files.extend(tf.gfile.Glob(pattern))
if not data_files:
tl.logging.fatal("Found no input files matching %s", file_pattern)
else:
tl.logging.info("Prefetching values from %d files matching %s", len(data_files), file_pattern)
if is_training:
print(" is_training == True : RandomShuffleQueue")
filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16, name=shard_queue_name)
min_queue_examples = values_per_shard * input_queue_capacity_factor
capacity = min_queue_examples + 100 * batch_size
values_queue = tf.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_queue_examples, dtypes=[tf.string],
name="random_" + value_queue_name
)
else:
print(" is_training == False : FIFOQueue")
filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1, name=shard_queue_name)
capacity = values_per_shard + 3 * batch_size
values_queue = tf.FIFOQueue(capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name)
enqueue_ops = []
for _ in range(num_reader_threads):
_, value = reader.read(filename_queue)
enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(values_queue, enqueue_ops))
tf.summary.scalar(
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
tf.cast(values_queue.size(), tf.float32) * (1. / capacity)
)
return values_queue | [
"def",
"prefetch_input_data",
"(",
"reader",
",",
"file_pattern",
",",
"is_training",
",",
"batch_size",
",",
"values_per_shard",
",",
"input_queue_capacity_factor",
"=",
"16",
",",
"num_reader_threads",
"=",
"1",
",",
"shard_queue_name",
"=",
"\"filename_queue\"",
",",
"value_queue_name",
"=",
"\"input_queue\"",
")",
":",
"data_files",
"=",
"[",
"]",
"for",
"pattern",
"in",
"file_pattern",
".",
"split",
"(",
"\",\"",
")",
":",
"data_files",
".",
"extend",
"(",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"pattern",
")",
")",
"if",
"not",
"data_files",
":",
"tl",
".",
"logging",
".",
"fatal",
"(",
"\"Found no input files matching %s\"",
",",
"file_pattern",
")",
"else",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Prefetching values from %d files matching %s\"",
",",
"len",
"(",
"data_files",
")",
",",
"file_pattern",
")",
"if",
"is_training",
":",
"print",
"(",
"\" is_training == True : RandomShuffleQueue\"",
")",
"filename_queue",
"=",
"tf",
".",
"train",
".",
"string_input_producer",
"(",
"data_files",
",",
"shuffle",
"=",
"True",
",",
"capacity",
"=",
"16",
",",
"name",
"=",
"shard_queue_name",
")",
"min_queue_examples",
"=",
"values_per_shard",
"*",
"input_queue_capacity_factor",
"capacity",
"=",
"min_queue_examples",
"+",
"100",
"*",
"batch_size",
"values_queue",
"=",
"tf",
".",
"RandomShuffleQueue",
"(",
"capacity",
"=",
"capacity",
",",
"min_after_dequeue",
"=",
"min_queue_examples",
",",
"dtypes",
"=",
"[",
"tf",
".",
"string",
"]",
",",
"name",
"=",
"\"random_\"",
"+",
"value_queue_name",
")",
"else",
":",
"print",
"(",
"\" is_training == False : FIFOQueue\"",
")",
"filename_queue",
"=",
"tf",
".",
"train",
".",
"string_input_producer",
"(",
"data_files",
",",
"shuffle",
"=",
"False",
",",
"capacity",
"=",
"1",
",",
"name",
"=",
"shard_queue_name",
")",
"capacity",
"=",
"values_per_shard",
"+",
"3",
"*",
"batch_size",
"values_queue",
"=",
"tf",
".",
"FIFOQueue",
"(",
"capacity",
"=",
"capacity",
",",
"dtypes",
"=",
"[",
"tf",
".",
"string",
"]",
",",
"name",
"=",
"\"fifo_\"",
"+",
"value_queue_name",
")",
"enqueue_ops",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"num_reader_threads",
")",
":",
"_",
",",
"value",
"=",
"reader",
".",
"read",
"(",
"filename_queue",
")",
"enqueue_ops",
".",
"append",
"(",
"values_queue",
".",
"enqueue",
"(",
"[",
"value",
"]",
")",
")",
"tf",
".",
"train",
".",
"queue_runner",
".",
"add_queue_runner",
"(",
"tf",
".",
"train",
".",
"queue_runner",
".",
"QueueRunner",
"(",
"values_queue",
",",
"enqueue_ops",
")",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"queue/%s/fraction_of_%d_full\"",
"%",
"(",
"values_queue",
".",
"name",
",",
"capacity",
")",
",",
"tf",
".",
"cast",
"(",
"values_queue",
".",
"size",
"(",
")",
",",
"tf",
".",
"float32",
")",
"*",
"(",
"1.",
"/",
"capacity",
")",
")",
"return",
"values_queue"
] | Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values. | [
"Prefetches",
"string",
"values",
"from",
"disk",
"into",
"an",
"input",
"queue",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_tfrecord3.py#L231-L293 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_tfrecord3.py | batch_with_dynamic_pad | def batch_with_dynamic_pad(images_and_captions, batch_size, queue_capacity, add_summaries=True):
"""Batches input images and captions.
This function splits the caption into an input sequence and a target sequence,
where the target sequence is the input sequence right-shifted by 1. Input and
target sequences are batched and padded up to the maximum length of sequences
in the batch. A mask is created to distinguish real words from padding words.
Example:
Actual captions in the batch ('-' denotes padded character):
[
[ 1 2 5 4 5 ],
[ 1 2 3 4 - ],
[ 1 2 3 - - ],
]
input_seqs:
[
[ 1 2 3 4 ],
[ 1 2 3 - ],
[ 1 2 - - ],
]
target_seqs:
[
[ 2 3 4 5 ],
[ 2 3 4 - ],
[ 2 3 - - ],
]
mask:
[
[ 1 1 1 1 ],
[ 1 1 1 0 ],
[ 1 1 0 0 ],
]
Args:
images_and_captions: A list of pairs [image, caption], where image is a
Tensor of shape [height, width, channels] and caption is a 1-D Tensor of
any length. Each pair will be processed and added to the queue in a
separate thread.
batch_size: Batch size.
queue_capacity: Queue capacity.
add_summaries: If true, add caption length summaries.
Returns:
images: A Tensor of shape [batch_size, height, width, channels].
input_seqs: An int32 Tensor of shape [batch_size, padded_length].
target_seqs: An int32 Tensor of shape [batch_size, padded_length].
mask: An int32 0/1 Tensor of shape [batch_size, padded_length].
"""
enqueue_list = []
for image, caption in images_and_captions:
caption_length = tf.shape(caption)[0]
input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0)
input_seq = tf.slice(caption, [0], input_length)
target_seq = tf.slice(caption, [1], input_length)
indicator = tf.ones(input_length, dtype=tf.int32)
enqueue_list.append([image, input_seq, target_seq, indicator])
images, input_seqs, target_seqs, mask = tf.train.batch_join(
enqueue_list, batch_size=batch_size, capacity=queue_capacity, dynamic_pad=True, name="batch_and_pad"
)
if add_summaries:
lengths = tf.add(tf.reduce_sum(mask, 1), 1)
tf.summary.scalar("caption_length/batch_min", tf.reduce_min(lengths))
tf.summary.scalar("caption_length/batch_max", tf.reduce_max(lengths))
tf.summary.scalar("caption_length/batch_mean", tf.reduce_mean(lengths))
return images, input_seqs, target_seqs, mask | python | def batch_with_dynamic_pad(images_and_captions, batch_size, queue_capacity, add_summaries=True):
"""Batches input images and captions.
This function splits the caption into an input sequence and a target sequence,
where the target sequence is the input sequence right-shifted by 1. Input and
target sequences are batched and padded up to the maximum length of sequences
in the batch. A mask is created to distinguish real words from padding words.
Example:
Actual captions in the batch ('-' denotes padded character):
[
[ 1 2 5 4 5 ],
[ 1 2 3 4 - ],
[ 1 2 3 - - ],
]
input_seqs:
[
[ 1 2 3 4 ],
[ 1 2 3 - ],
[ 1 2 - - ],
]
target_seqs:
[
[ 2 3 4 5 ],
[ 2 3 4 - ],
[ 2 3 - - ],
]
mask:
[
[ 1 1 1 1 ],
[ 1 1 1 0 ],
[ 1 1 0 0 ],
]
Args:
images_and_captions: A list of pairs [image, caption], where image is a
Tensor of shape [height, width, channels] and caption is a 1-D Tensor of
any length. Each pair will be processed and added to the queue in a
separate thread.
batch_size: Batch size.
queue_capacity: Queue capacity.
add_summaries: If true, add caption length summaries.
Returns:
images: A Tensor of shape [batch_size, height, width, channels].
input_seqs: An int32 Tensor of shape [batch_size, padded_length].
target_seqs: An int32 Tensor of shape [batch_size, padded_length].
mask: An int32 0/1 Tensor of shape [batch_size, padded_length].
"""
enqueue_list = []
for image, caption in images_and_captions:
caption_length = tf.shape(caption)[0]
input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0)
input_seq = tf.slice(caption, [0], input_length)
target_seq = tf.slice(caption, [1], input_length)
indicator = tf.ones(input_length, dtype=tf.int32)
enqueue_list.append([image, input_seq, target_seq, indicator])
images, input_seqs, target_seqs, mask = tf.train.batch_join(
enqueue_list, batch_size=batch_size, capacity=queue_capacity, dynamic_pad=True, name="batch_and_pad"
)
if add_summaries:
lengths = tf.add(tf.reduce_sum(mask, 1), 1)
tf.summary.scalar("caption_length/batch_min", tf.reduce_min(lengths))
tf.summary.scalar("caption_length/batch_max", tf.reduce_max(lengths))
tf.summary.scalar("caption_length/batch_mean", tf.reduce_mean(lengths))
return images, input_seqs, target_seqs, mask | [
"def",
"batch_with_dynamic_pad",
"(",
"images_and_captions",
",",
"batch_size",
",",
"queue_capacity",
",",
"add_summaries",
"=",
"True",
")",
":",
"enqueue_list",
"=",
"[",
"]",
"for",
"image",
",",
"caption",
"in",
"images_and_captions",
":",
"caption_length",
"=",
"tf",
".",
"shape",
"(",
"caption",
")",
"[",
"0",
"]",
"input_length",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"subtract",
"(",
"caption_length",
",",
"1",
")",
",",
"0",
")",
"input_seq",
"=",
"tf",
".",
"slice",
"(",
"caption",
",",
"[",
"0",
"]",
",",
"input_length",
")",
"target_seq",
"=",
"tf",
".",
"slice",
"(",
"caption",
",",
"[",
"1",
"]",
",",
"input_length",
")",
"indicator",
"=",
"tf",
".",
"ones",
"(",
"input_length",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"enqueue_list",
".",
"append",
"(",
"[",
"image",
",",
"input_seq",
",",
"target_seq",
",",
"indicator",
"]",
")",
"images",
",",
"input_seqs",
",",
"target_seqs",
",",
"mask",
"=",
"tf",
".",
"train",
".",
"batch_join",
"(",
"enqueue_list",
",",
"batch_size",
"=",
"batch_size",
",",
"capacity",
"=",
"queue_capacity",
",",
"dynamic_pad",
"=",
"True",
",",
"name",
"=",
"\"batch_and_pad\"",
")",
"if",
"add_summaries",
":",
"lengths",
"=",
"tf",
".",
"add",
"(",
"tf",
".",
"reduce_sum",
"(",
"mask",
",",
"1",
")",
",",
"1",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"caption_length/batch_min\"",
",",
"tf",
".",
"reduce_min",
"(",
"lengths",
")",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"caption_length/batch_max\"",
",",
"tf",
".",
"reduce_max",
"(",
"lengths",
")",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"caption_length/batch_mean\"",
",",
"tf",
".",
"reduce_mean",
"(",
"lengths",
")",
")",
"return",
"images",
",",
"input_seqs",
",",
"target_seqs",
",",
"mask"
] | Batches input images and captions.
This function splits the caption into an input sequence and a target sequence,
where the target sequence is the input sequence right-shifted by 1. Input and
target sequences are batched and padded up to the maximum length of sequences
in the batch. A mask is created to distinguish real words from padding words.
Example:
Actual captions in the batch ('-' denotes padded character):
[
[ 1 2 5 4 5 ],
[ 1 2 3 4 - ],
[ 1 2 3 - - ],
]
input_seqs:
[
[ 1 2 3 4 ],
[ 1 2 3 - ],
[ 1 2 - - ],
]
target_seqs:
[
[ 2 3 4 5 ],
[ 2 3 4 - ],
[ 2 3 - - ],
]
mask:
[
[ 1 1 1 1 ],
[ 1 1 1 0 ],
[ 1 1 0 0 ],
]
Args:
images_and_captions: A list of pairs [image, caption], where image is a
Tensor of shape [height, width, channels] and caption is a 1-D Tensor of
any length. Each pair will be processed and added to the queue in a
separate thread.
batch_size: Batch size.
queue_capacity: Queue capacity.
add_summaries: If true, add caption length summaries.
Returns:
images: A Tensor of shape [batch_size, height, width, channels].
input_seqs: An int32 Tensor of shape [batch_size, padded_length].
target_seqs: An int32 Tensor of shape [batch_size, padded_length].
mask: An int32 0/1 Tensor of shape [batch_size, padded_length]. | [
"Batches",
"input",
"images",
"and",
"captions",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_tfrecord3.py#L371-L443 | valid |