repo
stringclasses 679
values | path
stringlengths 6
122
| func_name
stringlengths 2
76
| original_string
stringlengths 87
70.9k
| language
stringclasses 1
value | code
stringlengths 87
70.9k
| code_tokens
sequencelengths 20
6.91k
| docstring
stringlengths 1
21.7k
| docstring_tokens
sequencelengths 1
1.6k
| sha
stringclasses 679
values | url
stringlengths 92
213
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
tensorlayer/tensorlayer | tensorlayer/layers/normalization.py | _to_channel_first_bias | def _to_channel_first_bias(b):
"""Reshape [c] to [c, 1, 1]."""
channel_size = int(b.shape[0])
new_shape = (channel_size, 1, 1)
# new_shape = [-1, 1, 1] # doesn't work with tensorRT
return tf.reshape(b, new_shape) | python | def _to_channel_first_bias(b):
"""Reshape [c] to [c, 1, 1]."""
channel_size = int(b.shape[0])
new_shape = (channel_size, 1, 1)
# new_shape = [-1, 1, 1] # doesn't work with tensorRT
return tf.reshape(b, new_shape) | [
"def",
"_to_channel_first_bias",
"(",
"b",
")",
":",
"channel_size",
"=",
"int",
"(",
"b",
".",
"shape",
"[",
"0",
"]",
")",
"new_shape",
"=",
"(",
"channel_size",
",",
"1",
",",
"1",
")",
"# new_shape = [-1, 1, 1] # doesn't work with tensorRT",
"return",
"tf",
".",
"reshape",
"(",
"b",
",",
"new_shape",
")"
] | Reshape [c] to [c, 1, 1]. | [
"Reshape",
"[",
"c",
"]",
"to",
"[",
"c",
"1",
"1",
"]",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/normalization.py#L74-L79 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/normalization.py | _bias_scale | def _bias_scale(x, b, data_format):
"""The multiplication counter part of tf.nn.bias_add."""
if data_format == 'NHWC':
return x * b
elif data_format == 'NCHW':
return x * _to_channel_first_bias(b)
else:
raise ValueError('invalid data_format: %s' % data_format) | python | def _bias_scale(x, b, data_format):
"""The multiplication counter part of tf.nn.bias_add."""
if data_format == 'NHWC':
return x * b
elif data_format == 'NCHW':
return x * _to_channel_first_bias(b)
else:
raise ValueError('invalid data_format: %s' % data_format) | [
"def",
"_bias_scale",
"(",
"x",
",",
"b",
",",
"data_format",
")",
":",
"if",
"data_format",
"==",
"'NHWC'",
":",
"return",
"x",
"*",
"b",
"elif",
"data_format",
"==",
"'NCHW'",
":",
"return",
"x",
"*",
"_to_channel_first_bias",
"(",
"b",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'invalid data_format: %s'",
"%",
"data_format",
")"
] | The multiplication counter part of tf.nn.bias_add. | [
"The",
"multiplication",
"counter",
"part",
"of",
"tf",
".",
"nn",
".",
"bias_add",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/normalization.py#L82-L89 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/normalization.py | _bias_add | def _bias_add(x, b, data_format):
"""Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT."""
if data_format == 'NHWC':
return tf.add(x, b)
elif data_format == 'NCHW':
return tf.add(x, _to_channel_first_bias(b))
else:
raise ValueError('invalid data_format: %s' % data_format) | python | def _bias_add(x, b, data_format):
"""Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT."""
if data_format == 'NHWC':
return tf.add(x, b)
elif data_format == 'NCHW':
return tf.add(x, _to_channel_first_bias(b))
else:
raise ValueError('invalid data_format: %s' % data_format) | [
"def",
"_bias_add",
"(",
"x",
",",
"b",
",",
"data_format",
")",
":",
"if",
"data_format",
"==",
"'NHWC'",
":",
"return",
"tf",
".",
"add",
"(",
"x",
",",
"b",
")",
"elif",
"data_format",
"==",
"'NCHW'",
":",
"return",
"tf",
".",
"add",
"(",
"x",
",",
"_to_channel_first_bias",
"(",
"b",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'invalid data_format: %s'",
"%",
"data_format",
")"
] | Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT. | [
"Alternative",
"implementation",
"of",
"tf",
".",
"nn",
".",
"bias_add",
"which",
"is",
"compatiable",
"with",
"tensorRT",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/normalization.py#L92-L99 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/normalization.py | batch_normalization | def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, data_format, name=None):
"""Data Format aware version of tf.nn.batch_normalization."""
with ops.name_scope(name, 'batchnorm', [x, mean, variance, scale, offset]):
inv = math_ops.rsqrt(variance + variance_epsilon)
if scale is not None:
inv *= scale
a = math_ops.cast(inv, x.dtype)
b = math_ops.cast(offset - mean * inv if offset is not None else -mean * inv, x.dtype)
# Return a * x + b with customized data_format.
# Currently TF doesn't have bias_scale, and tensorRT has bug in converting tf.nn.bias_add
# So we reimplemted them to allow make the model work with tensorRT.
# See https://github.com/tensorlayer/openpose-plus/issues/75 for more details.
df = {'channels_first': 'NCHW', 'channels_last': 'NHWC'}
return _bias_add(_bias_scale(x, a, df[data_format]), b, df[data_format]) | python | def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, data_format, name=None):
"""Data Format aware version of tf.nn.batch_normalization."""
with ops.name_scope(name, 'batchnorm', [x, mean, variance, scale, offset]):
inv = math_ops.rsqrt(variance + variance_epsilon)
if scale is not None:
inv *= scale
a = math_ops.cast(inv, x.dtype)
b = math_ops.cast(offset - mean * inv if offset is not None else -mean * inv, x.dtype)
# Return a * x + b with customized data_format.
# Currently TF doesn't have bias_scale, and tensorRT has bug in converting tf.nn.bias_add
# So we reimplemted them to allow make the model work with tensorRT.
# See https://github.com/tensorlayer/openpose-plus/issues/75 for more details.
df = {'channels_first': 'NCHW', 'channels_last': 'NHWC'}
return _bias_add(_bias_scale(x, a, df[data_format]), b, df[data_format]) | [
"def",
"batch_normalization",
"(",
"x",
",",
"mean",
",",
"variance",
",",
"offset",
",",
"scale",
",",
"variance_epsilon",
",",
"data_format",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"'batchnorm'",
",",
"[",
"x",
",",
"mean",
",",
"variance",
",",
"scale",
",",
"offset",
"]",
")",
":",
"inv",
"=",
"math_ops",
".",
"rsqrt",
"(",
"variance",
"+",
"variance_epsilon",
")",
"if",
"scale",
"is",
"not",
"None",
":",
"inv",
"*=",
"scale",
"a",
"=",
"math_ops",
".",
"cast",
"(",
"inv",
",",
"x",
".",
"dtype",
")",
"b",
"=",
"math_ops",
".",
"cast",
"(",
"offset",
"-",
"mean",
"*",
"inv",
"if",
"offset",
"is",
"not",
"None",
"else",
"-",
"mean",
"*",
"inv",
",",
"x",
".",
"dtype",
")",
"# Return a * x + b with customized data_format.",
"# Currently TF doesn't have bias_scale, and tensorRT has bug in converting tf.nn.bias_add",
"# So we reimplemted them to allow make the model work with tensorRT.",
"# See https://github.com/tensorlayer/openpose-plus/issues/75 for more details.",
"df",
"=",
"{",
"'channels_first'",
":",
"'NCHW'",
",",
"'channels_last'",
":",
"'NHWC'",
"}",
"return",
"_bias_add",
"(",
"_bias_scale",
"(",
"x",
",",
"a",
",",
"df",
"[",
"data_format",
"]",
")",
",",
"b",
",",
"df",
"[",
"data_format",
"]",
")"
] | Data Format aware version of tf.nn.batch_normalization. | [
"Data",
"Format",
"aware",
"version",
"of",
"tf",
".",
"nn",
".",
"batch_normalization",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/normalization.py#L102-L117 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | compute_alpha | def compute_alpha(x):
"""Computing the scale parameter."""
threshold = _compute_threshold(x)
alpha1_temp1 = tf.where(tf.greater(x, threshold), x, tf.zeros_like(x, tf.float32))
alpha1_temp2 = tf.where(tf.less(x, -threshold), x, tf.zeros_like(x, tf.float32))
alpha_array = tf.add(alpha1_temp1, alpha1_temp2, name=None)
alpha_array_abs = tf.abs(alpha_array)
alpha_array_abs1 = tf.where(
tf.greater(alpha_array_abs, 0), tf.ones_like(alpha_array_abs, tf.float32),
tf.zeros_like(alpha_array_abs, tf.float32)
)
alpha_sum = tf.reduce_sum(alpha_array_abs)
n = tf.reduce_sum(alpha_array_abs1)
alpha = tf.div(alpha_sum, n)
return alpha | python | def compute_alpha(x):
"""Computing the scale parameter."""
threshold = _compute_threshold(x)
alpha1_temp1 = tf.where(tf.greater(x, threshold), x, tf.zeros_like(x, tf.float32))
alpha1_temp2 = tf.where(tf.less(x, -threshold), x, tf.zeros_like(x, tf.float32))
alpha_array = tf.add(alpha1_temp1, alpha1_temp2, name=None)
alpha_array_abs = tf.abs(alpha_array)
alpha_array_abs1 = tf.where(
tf.greater(alpha_array_abs, 0), tf.ones_like(alpha_array_abs, tf.float32),
tf.zeros_like(alpha_array_abs, tf.float32)
)
alpha_sum = tf.reduce_sum(alpha_array_abs)
n = tf.reduce_sum(alpha_array_abs1)
alpha = tf.div(alpha_sum, n)
return alpha | [
"def",
"compute_alpha",
"(",
"x",
")",
":",
"threshold",
"=",
"_compute_threshold",
"(",
"x",
")",
"alpha1_temp1",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"greater",
"(",
"x",
",",
"threshold",
")",
",",
"x",
",",
"tf",
".",
"zeros_like",
"(",
"x",
",",
"tf",
".",
"float32",
")",
")",
"alpha1_temp2",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"less",
"(",
"x",
",",
"-",
"threshold",
")",
",",
"x",
",",
"tf",
".",
"zeros_like",
"(",
"x",
",",
"tf",
".",
"float32",
")",
")",
"alpha_array",
"=",
"tf",
".",
"add",
"(",
"alpha1_temp1",
",",
"alpha1_temp2",
",",
"name",
"=",
"None",
")",
"alpha_array_abs",
"=",
"tf",
".",
"abs",
"(",
"alpha_array",
")",
"alpha_array_abs1",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"greater",
"(",
"alpha_array_abs",
",",
"0",
")",
",",
"tf",
".",
"ones_like",
"(",
"alpha_array_abs",
",",
"tf",
".",
"float32",
")",
",",
"tf",
".",
"zeros_like",
"(",
"alpha_array_abs",
",",
"tf",
".",
"float32",
")",
")",
"alpha_sum",
"=",
"tf",
".",
"reduce_sum",
"(",
"alpha_array_abs",
")",
"n",
"=",
"tf",
".",
"reduce_sum",
"(",
"alpha_array_abs1",
")",
"alpha",
"=",
"tf",
".",
"div",
"(",
"alpha_sum",
",",
"n",
")",
"return",
"alpha"
] | Computing the scale parameter. | [
"Computing",
"the",
"scale",
"parameter",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L47-L61 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | flatten_reshape | def flatten_reshape(variable, name='flatten'):
"""Reshapes a high-dimension vector input.
[batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask]
Parameters
----------
variable : TensorFlow variable or tensor
The variable or tensor to be flatten.
name : str
A unique layer name.
Returns
-------
Tensor
Flatten Tensor
Examples
--------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, [None, 128, 128, 3])
>>> # Convolution Layer with 32 filters and a kernel size of 5
>>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
>>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
>>> network = tf.layers.max_pooling2d(network, 2, 2)
>>> print(network.get_shape()[:].as_list())
>>> [None, 62, 62, 32]
>>> network = tl.layers.flatten_reshape(network)
>>> print(network.get_shape()[:].as_list()[1:])
>>> [None, 123008]
"""
dim = 1
for d in variable.get_shape()[1:].as_list():
dim *= d
return tf.reshape(variable, shape=[-1, dim], name=name) | python | def flatten_reshape(variable, name='flatten'):
"""Reshapes a high-dimension vector input.
[batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask]
Parameters
----------
variable : TensorFlow variable or tensor
The variable or tensor to be flatten.
name : str
A unique layer name.
Returns
-------
Tensor
Flatten Tensor
Examples
--------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, [None, 128, 128, 3])
>>> # Convolution Layer with 32 filters and a kernel size of 5
>>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
>>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
>>> network = tf.layers.max_pooling2d(network, 2, 2)
>>> print(network.get_shape()[:].as_list())
>>> [None, 62, 62, 32]
>>> network = tl.layers.flatten_reshape(network)
>>> print(network.get_shape()[:].as_list()[1:])
>>> [None, 123008]
"""
dim = 1
for d in variable.get_shape()[1:].as_list():
dim *= d
return tf.reshape(variable, shape=[-1, dim], name=name) | [
"def",
"flatten_reshape",
"(",
"variable",
",",
"name",
"=",
"'flatten'",
")",
":",
"dim",
"=",
"1",
"for",
"d",
"in",
"variable",
".",
"get_shape",
"(",
")",
"[",
"1",
":",
"]",
".",
"as_list",
"(",
")",
":",
"dim",
"*=",
"d",
"return",
"tf",
".",
"reshape",
"(",
"variable",
",",
"shape",
"=",
"[",
"-",
"1",
",",
"dim",
"]",
",",
"name",
"=",
"name",
")"
] | Reshapes a high-dimension vector input.
[batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask]
Parameters
----------
variable : TensorFlow variable or tensor
The variable or tensor to be flatten.
name : str
A unique layer name.
Returns
-------
Tensor
Flatten Tensor
Examples
--------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, [None, 128, 128, 3])
>>> # Convolution Layer with 32 filters and a kernel size of 5
>>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
>>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
>>> network = tf.layers.max_pooling2d(network, 2, 2)
>>> print(network.get_shape()[:].as_list())
>>> [None, 62, 62, 32]
>>> network = tl.layers.flatten_reshape(network)
>>> print(network.get_shape()[:].as_list()[1:])
>>> [None, 123008] | [
"Reshapes",
"a",
"high",
"-",
"dimension",
"vector",
"input",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L64-L99 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | get_layers_with_name | def get_layers_with_name(net, name="", verbose=False):
"""Get a list of layers' output in a network by a given name scope.
Parameters
-----------
net : :class:`Layer`
The last layer of the network.
name : str
Get the layers' output that contain this name.
verbose : boolean
If True, print information of all the layers' output
Returns
--------
list of Tensor
A list of layers' output (TensorFlow tensor)
Examples
---------
>>> import tensorlayer as tl
>>> layers = tl.layers.get_layers_with_name(net, "CNN", True)
"""
logging.info(" [*] geting layers with %s" % name)
layers = []
i = 0
for layer in net.all_layers:
# logging.info(type(layer.name))
if name in layer.name:
layers.append(layer)
if verbose:
logging.info(" got {:3}: {:15} {}".format(i, layer.name, str(layer.get_shape())))
i = i + 1
return layers | python | def get_layers_with_name(net, name="", verbose=False):
"""Get a list of layers' output in a network by a given name scope.
Parameters
-----------
net : :class:`Layer`
The last layer of the network.
name : str
Get the layers' output that contain this name.
verbose : boolean
If True, print information of all the layers' output
Returns
--------
list of Tensor
A list of layers' output (TensorFlow tensor)
Examples
---------
>>> import tensorlayer as tl
>>> layers = tl.layers.get_layers_with_name(net, "CNN", True)
"""
logging.info(" [*] geting layers with %s" % name)
layers = []
i = 0
for layer in net.all_layers:
# logging.info(type(layer.name))
if name in layer.name:
layers.append(layer)
if verbose:
logging.info(" got {:3}: {:15} {}".format(i, layer.name, str(layer.get_shape())))
i = i + 1
return layers | [
"def",
"get_layers_with_name",
"(",
"net",
",",
"name",
"=",
"\"\"",
",",
"verbose",
"=",
"False",
")",
":",
"logging",
".",
"info",
"(",
"\" [*] geting layers with %s\"",
"%",
"name",
")",
"layers",
"=",
"[",
"]",
"i",
"=",
"0",
"for",
"layer",
"in",
"net",
".",
"all_layers",
":",
"# logging.info(type(layer.name))",
"if",
"name",
"in",
"layer",
".",
"name",
":",
"layers",
".",
"append",
"(",
"layer",
")",
"if",
"verbose",
":",
"logging",
".",
"info",
"(",
"\" got {:3}: {:15} {}\"",
".",
"format",
"(",
"i",
",",
"layer",
".",
"name",
",",
"str",
"(",
"layer",
".",
"get_shape",
"(",
")",
")",
")",
")",
"i",
"=",
"i",
"+",
"1",
"return",
"layers"
] | Get a list of layers' output in a network by a given name scope.
Parameters
-----------
net : :class:`Layer`
The last layer of the network.
name : str
Get the layers' output that contain this name.
verbose : boolean
If True, print information of all the layers' output
Returns
--------
list of Tensor
A list of layers' output (TensorFlow tensor)
Examples
---------
>>> import tensorlayer as tl
>>> layers = tl.layers.get_layers_with_name(net, "CNN", True) | [
"Get",
"a",
"list",
"of",
"layers",
"output",
"in",
"a",
"network",
"by",
"a",
"given",
"name",
"scope",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L112-L149 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | get_variables_with_name | def get_variables_with_name(name=None, train_only=True, verbose=False):
"""Get a list of TensorFlow variables by a given name scope.
Parameters
----------
name : str
Get the variables that contain this name.
train_only : boolean
If Ture, only get the trainable variables.
verbose : boolean
If True, print the information of all variables.
Returns
-------
list of Tensor
A list of TensorFlow variables
Examples
--------
>>> import tensorlayer as tl
>>> dense_vars = tl.layers.get_variables_with_name('dense', True, True)
"""
if name is None:
raise Exception("please input a name")
logging.info(" [*] geting variables with %s" % name)
# tvar = tf.trainable_variables() if train_only else tf.all_variables()
if train_only:
t_vars = tf.trainable_variables()
else:
t_vars = tf.global_variables()
d_vars = [var for var in t_vars if name in var.name]
if verbose:
for idx, v in enumerate(d_vars):
logging.info(" got {:3}: {:15} {}".format(idx, v.name, str(v.get_shape())))
return d_vars | python | def get_variables_with_name(name=None, train_only=True, verbose=False):
"""Get a list of TensorFlow variables by a given name scope.
Parameters
----------
name : str
Get the variables that contain this name.
train_only : boolean
If Ture, only get the trainable variables.
verbose : boolean
If True, print the information of all variables.
Returns
-------
list of Tensor
A list of TensorFlow variables
Examples
--------
>>> import tensorlayer as tl
>>> dense_vars = tl.layers.get_variables_with_name('dense', True, True)
"""
if name is None:
raise Exception("please input a name")
logging.info(" [*] geting variables with %s" % name)
# tvar = tf.trainable_variables() if train_only else tf.all_variables()
if train_only:
t_vars = tf.trainable_variables()
else:
t_vars = tf.global_variables()
d_vars = [var for var in t_vars if name in var.name]
if verbose:
for idx, v in enumerate(d_vars):
logging.info(" got {:3}: {:15} {}".format(idx, v.name, str(v.get_shape())))
return d_vars | [
"def",
"get_variables_with_name",
"(",
"name",
"=",
"None",
",",
"train_only",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"please input a name\"",
")",
"logging",
".",
"info",
"(",
"\" [*] geting variables with %s\"",
"%",
"name",
")",
"# tvar = tf.trainable_variables() if train_only else tf.all_variables()",
"if",
"train_only",
":",
"t_vars",
"=",
"tf",
".",
"trainable_variables",
"(",
")",
"else",
":",
"t_vars",
"=",
"tf",
".",
"global_variables",
"(",
")",
"d_vars",
"=",
"[",
"var",
"for",
"var",
"in",
"t_vars",
"if",
"name",
"in",
"var",
".",
"name",
"]",
"if",
"verbose",
":",
"for",
"idx",
",",
"v",
"in",
"enumerate",
"(",
"d_vars",
")",
":",
"logging",
".",
"info",
"(",
"\" got {:3}: {:15} {}\"",
".",
"format",
"(",
"idx",
",",
"v",
".",
"name",
",",
"str",
"(",
"v",
".",
"get_shape",
"(",
")",
")",
")",
")",
"return",
"d_vars"
] | Get a list of TensorFlow variables by a given name scope.
Parameters
----------
name : str
Get the variables that contain this name.
train_only : boolean
If Ture, only get the trainable variables.
verbose : boolean
If True, print the information of all variables.
Returns
-------
list of Tensor
A list of TensorFlow variables
Examples
--------
>>> import tensorlayer as tl
>>> dense_vars = tl.layers.get_variables_with_name('dense', True, True) | [
"Get",
"a",
"list",
"of",
"TensorFlow",
"variables",
"by",
"a",
"given",
"name",
"scope",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L153-L194 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | initialize_rnn_state | def initialize_rnn_state(state, feed_dict=None):
"""Returns the initialized RNN state.
The inputs are `LSTMStateTuple` or `State` of `RNNCells`, and an optional `feed_dict`.
Parameters
----------
state : RNN state.
The TensorFlow's RNN state.
feed_dict : dictionary
Initial RNN state; if None, returns zero state.
Returns
-------
RNN state
The TensorFlow's RNN state.
"""
if isinstance(state, LSTMStateTuple):
c = state.c.eval(feed_dict=feed_dict)
h = state.h.eval(feed_dict=feed_dict)
return c, h
else:
new_state = state.eval(feed_dict=feed_dict)
return new_state | python | def initialize_rnn_state(state, feed_dict=None):
"""Returns the initialized RNN state.
The inputs are `LSTMStateTuple` or `State` of `RNNCells`, and an optional `feed_dict`.
Parameters
----------
state : RNN state.
The TensorFlow's RNN state.
feed_dict : dictionary
Initial RNN state; if None, returns zero state.
Returns
-------
RNN state
The TensorFlow's RNN state.
"""
if isinstance(state, LSTMStateTuple):
c = state.c.eval(feed_dict=feed_dict)
h = state.h.eval(feed_dict=feed_dict)
return c, h
else:
new_state = state.eval(feed_dict=feed_dict)
return new_state | [
"def",
"initialize_rnn_state",
"(",
"state",
",",
"feed_dict",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"state",
",",
"LSTMStateTuple",
")",
":",
"c",
"=",
"state",
".",
"c",
".",
"eval",
"(",
"feed_dict",
"=",
"feed_dict",
")",
"h",
"=",
"state",
".",
"h",
".",
"eval",
"(",
"feed_dict",
"=",
"feed_dict",
")",
"return",
"c",
",",
"h",
"else",
":",
"new_state",
"=",
"state",
".",
"eval",
"(",
"feed_dict",
"=",
"feed_dict",
")",
"return",
"new_state"
] | Returns the initialized RNN state.
The inputs are `LSTMStateTuple` or `State` of `RNNCells`, and an optional `feed_dict`.
Parameters
----------
state : RNN state.
The TensorFlow's RNN state.
feed_dict : dictionary
Initial RNN state; if None, returns zero state.
Returns
-------
RNN state
The TensorFlow's RNN state. | [
"Returns",
"the",
"initialized",
"RNN",
"state",
".",
"The",
"inputs",
"are",
"LSTMStateTuple",
"or",
"State",
"of",
"RNNCells",
"and",
"an",
"optional",
"feed_dict",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L216-L239 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | list_remove_repeat | def list_remove_repeat(x):
"""Remove the repeated items in a list, and return the processed list.
You may need it to create merged layer like Concat, Elementwise and etc.
Parameters
----------
x : list
Input
Returns
-------
list
A list that after removing it's repeated items
Examples
-------
>>> l = [2, 3, 4, 2, 3]
>>> l = list_remove_repeat(l)
[2, 3, 4]
"""
y = []
for i in x:
if i not in y:
y.append(i)
return y | python | def list_remove_repeat(x):
"""Remove the repeated items in a list, and return the processed list.
You may need it to create merged layer like Concat, Elementwise and etc.
Parameters
----------
x : list
Input
Returns
-------
list
A list that after removing it's repeated items
Examples
-------
>>> l = [2, 3, 4, 2, 3]
>>> l = list_remove_repeat(l)
[2, 3, 4]
"""
y = []
for i in x:
if i not in y:
y.append(i)
return y | [
"def",
"list_remove_repeat",
"(",
"x",
")",
":",
"y",
"=",
"[",
"]",
"for",
"i",
"in",
"x",
":",
"if",
"i",
"not",
"in",
"y",
":",
"y",
".",
"append",
"(",
"i",
")",
"return",
"y"
] | Remove the repeated items in a list, and return the processed list.
You may need it to create merged layer like Concat, Elementwise and etc.
Parameters
----------
x : list
Input
Returns
-------
list
A list that after removing it's repeated items
Examples
-------
>>> l = [2, 3, 4, 2, 3]
>>> l = list_remove_repeat(l)
[2, 3, 4] | [
"Remove",
"the",
"repeated",
"items",
"in",
"a",
"list",
"and",
"return",
"the",
"processed",
"list",
".",
"You",
"may",
"need",
"it",
"to",
"create",
"merged",
"layer",
"like",
"Concat",
"Elementwise",
"and",
"etc",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L242-L268 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | merge_networks | def merge_networks(layers=None):
"""Merge all parameters, layers and dropout probabilities to a :class:`Layer`.
The output of return network is the first network in the list.
Parameters
----------
layers : list of :class:`Layer`
Merge all parameters, layers and dropout probabilities to the first layer in the list.
Returns
--------
:class:`Layer`
The network after merging all parameters, layers and dropout probabilities to the first network in the list.
Examples
---------
>>> import tensorlayer as tl
>>> n1 = ...
>>> n2 = ...
>>> n1 = tl.layers.merge_networks([n1, n2])
"""
if layers is None:
raise Exception("layers should be a list of TensorLayer's Layers.")
layer = layers[0]
all_params = []
all_layers = []
all_drop = {}
for l in layers:
all_params.extend(l.all_params)
all_layers.extend(l.all_layers)
all_drop.update(l.all_drop)
layer.all_params = list(all_params)
layer.all_layers = list(all_layers)
layer.all_drop = dict(all_drop)
layer.all_layers = list_remove_repeat(layer.all_layers)
layer.all_params = list_remove_repeat(layer.all_params)
return layer | python | def merge_networks(layers=None):
"""Merge all parameters, layers and dropout probabilities to a :class:`Layer`.
The output of return network is the first network in the list.
Parameters
----------
layers : list of :class:`Layer`
Merge all parameters, layers and dropout probabilities to the first layer in the list.
Returns
--------
:class:`Layer`
The network after merging all parameters, layers and dropout probabilities to the first network in the list.
Examples
---------
>>> import tensorlayer as tl
>>> n1 = ...
>>> n2 = ...
>>> n1 = tl.layers.merge_networks([n1, n2])
"""
if layers is None:
raise Exception("layers should be a list of TensorLayer's Layers.")
layer = layers[0]
all_params = []
all_layers = []
all_drop = {}
for l in layers:
all_params.extend(l.all_params)
all_layers.extend(l.all_layers)
all_drop.update(l.all_drop)
layer.all_params = list(all_params)
layer.all_layers = list(all_layers)
layer.all_drop = dict(all_drop)
layer.all_layers = list_remove_repeat(layer.all_layers)
layer.all_params = list_remove_repeat(layer.all_params)
return layer | [
"def",
"merge_networks",
"(",
"layers",
"=",
"None",
")",
":",
"if",
"layers",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"layers should be a list of TensorLayer's Layers.\"",
")",
"layer",
"=",
"layers",
"[",
"0",
"]",
"all_params",
"=",
"[",
"]",
"all_layers",
"=",
"[",
"]",
"all_drop",
"=",
"{",
"}",
"for",
"l",
"in",
"layers",
":",
"all_params",
".",
"extend",
"(",
"l",
".",
"all_params",
")",
"all_layers",
".",
"extend",
"(",
"l",
".",
"all_layers",
")",
"all_drop",
".",
"update",
"(",
"l",
".",
"all_drop",
")",
"layer",
".",
"all_params",
"=",
"list",
"(",
"all_params",
")",
"layer",
".",
"all_layers",
"=",
"list",
"(",
"all_layers",
")",
"layer",
".",
"all_drop",
"=",
"dict",
"(",
"all_drop",
")",
"layer",
".",
"all_layers",
"=",
"list_remove_repeat",
"(",
"layer",
".",
"all_layers",
")",
"layer",
".",
"all_params",
"=",
"list_remove_repeat",
"(",
"layer",
".",
"all_params",
")",
"return",
"layer"
] | Merge all parameters, layers and dropout probabilities to a :class:`Layer`.
The output of return network is the first network in the list.
Parameters
----------
layers : list of :class:`Layer`
Merge all parameters, layers and dropout probabilities to the first layer in the list.
Returns
--------
:class:`Layer`
The network after merging all parameters, layers and dropout probabilities to the first network in the list.
Examples
---------
>>> import tensorlayer as tl
>>> n1 = ...
>>> n2 = ...
>>> n1 = tl.layers.merge_networks([n1, n2]) | [
"Merge",
"all",
"parameters",
"layers",
"and",
"dropout",
"probabilities",
"to",
"a",
":",
"class",
":",
"Layer",
".",
"The",
"output",
"of",
"return",
"network",
"is",
"the",
"first",
"network",
"in",
"the",
"list",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L271-L313 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | print_all_variables | def print_all_variables(train_only=False):
"""Print information of trainable or all variables,
without ``tl.layers.initialize_global_variables(sess)``.
Parameters
----------
train_only : boolean
Whether print trainable variables only.
- If True, print the trainable variables.
- If False, print all variables.
"""
# tvar = tf.trainable_variables() if train_only else tf.all_variables()
if train_only:
t_vars = tf.trainable_variables()
logging.info(" [*] printing trainable variables")
else:
t_vars = tf.global_variables()
logging.info(" [*] printing global variables")
for idx, v in enumerate(t_vars):
logging.info(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) | python | def print_all_variables(train_only=False):
"""Print information of trainable or all variables,
without ``tl.layers.initialize_global_variables(sess)``.
Parameters
----------
train_only : boolean
Whether print trainable variables only.
- If True, print the trainable variables.
- If False, print all variables.
"""
# tvar = tf.trainable_variables() if train_only else tf.all_variables()
if train_only:
t_vars = tf.trainable_variables()
logging.info(" [*] printing trainable variables")
else:
t_vars = tf.global_variables()
logging.info(" [*] printing global variables")
for idx, v in enumerate(t_vars):
logging.info(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) | [
"def",
"print_all_variables",
"(",
"train_only",
"=",
"False",
")",
":",
"# tvar = tf.trainable_variables() if train_only else tf.all_variables()",
"if",
"train_only",
":",
"t_vars",
"=",
"tf",
".",
"trainable_variables",
"(",
")",
"logging",
".",
"info",
"(",
"\" [*] printing trainable variables\"",
")",
"else",
":",
"t_vars",
"=",
"tf",
".",
"global_variables",
"(",
")",
"logging",
".",
"info",
"(",
"\" [*] printing global variables\"",
")",
"for",
"idx",
",",
"v",
"in",
"enumerate",
"(",
"t_vars",
")",
":",
"logging",
".",
"info",
"(",
"\" var {:3}: {:15} {}\"",
".",
"format",
"(",
"idx",
",",
"str",
"(",
"v",
".",
"get_shape",
"(",
")",
")",
",",
"v",
".",
"name",
")",
")"
] | Print information of trainable or all variables,
without ``tl.layers.initialize_global_variables(sess)``.
Parameters
----------
train_only : boolean
Whether print trainable variables only.
- If True, print the trainable variables.
- If False, print all variables. | [
"Print",
"information",
"of",
"trainable",
"or",
"all",
"variables",
"without",
"tl",
".",
"layers",
".",
"initialize_global_variables",
"(",
"sess",
")",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L316-L338 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | ternary_operation | def ternary_operation(x):
"""Ternary operation use threshold computed with weights."""
g = tf.get_default_graph()
with g.gradient_override_map({"Sign": "Identity"}):
threshold = _compute_threshold(x)
x = tf.sign(tf.add(tf.sign(tf.add(x, threshold)), tf.sign(tf.add(x, -threshold))))
return x | python | def ternary_operation(x):
"""Ternary operation use threshold computed with weights."""
g = tf.get_default_graph()
with g.gradient_override_map({"Sign": "Identity"}):
threshold = _compute_threshold(x)
x = tf.sign(tf.add(tf.sign(tf.add(x, threshold)), tf.sign(tf.add(x, -threshold))))
return x | [
"def",
"ternary_operation",
"(",
"x",
")",
":",
"g",
"=",
"tf",
".",
"get_default_graph",
"(",
")",
"with",
"g",
".",
"gradient_override_map",
"(",
"{",
"\"Sign\"",
":",
"\"Identity\"",
"}",
")",
":",
"threshold",
"=",
"_compute_threshold",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"sign",
"(",
"tf",
".",
"add",
"(",
"tf",
".",
"sign",
"(",
"tf",
".",
"add",
"(",
"x",
",",
"threshold",
")",
")",
",",
"tf",
".",
"sign",
"(",
"tf",
".",
"add",
"(",
"x",
",",
"-",
"threshold",
")",
")",
")",
")",
"return",
"x"
] | Ternary operation use threshold computed with weights. | [
"Ternary",
"operation",
"use",
"threshold",
"computed",
"with",
"weights",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L383-L389 | valid |
tensorlayer/tensorlayer | tensorlayer/layers/utils.py | _compute_threshold | def _compute_threshold(x):
"""
ref: https://github.com/XJTUWYD/TWN
Computing the threshold.
"""
x_sum = tf.reduce_sum(tf.abs(x), reduction_indices=None, keepdims=False, name=None)
threshold = tf.div(x_sum, tf.cast(tf.size(x), tf.float32), name=None)
threshold = tf.multiply(0.7, threshold, name=None)
return threshold | python | def _compute_threshold(x):
"""
ref: https://github.com/XJTUWYD/TWN
Computing the threshold.
"""
x_sum = tf.reduce_sum(tf.abs(x), reduction_indices=None, keepdims=False, name=None)
threshold = tf.div(x_sum, tf.cast(tf.size(x), tf.float32), name=None)
threshold = tf.multiply(0.7, threshold, name=None)
return threshold | [
"def",
"_compute_threshold",
"(",
"x",
")",
":",
"x_sum",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"abs",
"(",
"x",
")",
",",
"reduction_indices",
"=",
"None",
",",
"keepdims",
"=",
"False",
",",
"name",
"=",
"None",
")",
"threshold",
"=",
"tf",
".",
"div",
"(",
"x_sum",
",",
"tf",
".",
"cast",
"(",
"tf",
".",
"size",
"(",
"x",
")",
",",
"tf",
".",
"float32",
")",
",",
"name",
"=",
"None",
")",
"threshold",
"=",
"tf",
".",
"multiply",
"(",
"0.7",
",",
"threshold",
",",
"name",
"=",
"None",
")",
"return",
"threshold"
] | ref: https://github.com/XJTUWYD/TWN
Computing the threshold. | [
"ref",
":",
"https",
":",
"//",
"github",
".",
"com",
"/",
"XJTUWYD",
"/",
"TWN",
"Computing",
"the",
"threshold",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L418-L426 | valid |
tensorlayer/tensorlayer | examples/tutorial_work_with_onnx.py | freeze_graph | def freeze_graph(graph_path, checkpoint_path, output_path, end_node_names, is_binary_graph):
"""Reimplementation of the TensorFlow official freeze_graph function to freeze the graph and checkpoint together:
Parameters
-----------
graph_path : string
the path where your graph file save.
checkpoint_output_path : string
the path where your checkpoint save.
output_path : string
the path where you want to save the output proto buff
end_node_names : string
the name of the end node in your graph you want to get in your proto buff
is_binary_graph : boolean
declare your file whether is a binary graph
References
----------
- `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`__
- `tensorflow freeze_graph <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py>`
"""
_freeze_graph(
input_graph=graph_path, input_saver='', input_binary=is_binary_graph, input_checkpoint=checkpoint_path,
output_graph=output_path, output_node_names=end_node_names, restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0', clear_devices=True, initializer_nodes=None
) | python | def freeze_graph(graph_path, checkpoint_path, output_path, end_node_names, is_binary_graph):
"""Reimplementation of the TensorFlow official freeze_graph function to freeze the graph and checkpoint together:
Parameters
-----------
graph_path : string
the path where your graph file save.
checkpoint_output_path : string
the path where your checkpoint save.
output_path : string
the path where you want to save the output proto buff
end_node_names : string
the name of the end node in your graph you want to get in your proto buff
is_binary_graph : boolean
declare your file whether is a binary graph
References
----------
- `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`__
- `tensorflow freeze_graph <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py>`
"""
_freeze_graph(
input_graph=graph_path, input_saver='', input_binary=is_binary_graph, input_checkpoint=checkpoint_path,
output_graph=output_path, output_node_names=end_node_names, restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0', clear_devices=True, initializer_nodes=None
) | [
"def",
"freeze_graph",
"(",
"graph_path",
",",
"checkpoint_path",
",",
"output_path",
",",
"end_node_names",
",",
"is_binary_graph",
")",
":",
"_freeze_graph",
"(",
"input_graph",
"=",
"graph_path",
",",
"input_saver",
"=",
"''",
",",
"input_binary",
"=",
"is_binary_graph",
",",
"input_checkpoint",
"=",
"checkpoint_path",
",",
"output_graph",
"=",
"output_path",
",",
"output_node_names",
"=",
"end_node_names",
",",
"restore_op_name",
"=",
"'save/restore_all'",
",",
"filename_tensor_name",
"=",
"'save/Const:0'",
",",
"clear_devices",
"=",
"True",
",",
"initializer_nodes",
"=",
"None",
")"
] | Reimplementation of the TensorFlow official freeze_graph function to freeze the graph and checkpoint together:
Parameters
-----------
graph_path : string
the path where your graph file save.
checkpoint_output_path : string
the path where your checkpoint save.
output_path : string
the path where you want to save the output proto buff
end_node_names : string
the name of the end node in your graph you want to get in your proto buff
is_binary_graph : boolean
declare your file whether is a binary graph
References
----------
- `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`__
- `tensorflow freeze_graph <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py>` | [
"Reimplementation",
"of",
"the",
"TensorFlow",
"official",
"freeze_graph",
"function",
"to",
"freeze",
"the",
"graph",
"and",
"checkpoint",
"together",
":"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/tutorial_work_with_onnx.py#L251-L276 | valid |
tensorlayer/tensorlayer | examples/tutorial_work_with_onnx.py | convert_model_to_onnx | def convert_model_to_onnx(frozen_graph_path, end_node_names, onnx_output_path):
"""Reimplementation of the TensorFlow-onnx official tutorial convert the proto buff to onnx file:
Parameters
-----------
frozen_graph_path : string
the path where your frozen graph file save.
end_node_names : string
the name of the end node in your graph you want to get in your proto buff
onnx_output_path : string
the path where you want to save the onnx file.
References
-----------
- `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`
"""
with tf.gfile.GFile(frozen_graph_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
onnx_model = tensorflow_graph_to_onnx_model(graph_def, end_node_names, opset=6)
file = open(onnx_output_path, "wb")
file.write(onnx_model.SerializeToString())
file.close() | python | def convert_model_to_onnx(frozen_graph_path, end_node_names, onnx_output_path):
"""Reimplementation of the TensorFlow-onnx official tutorial convert the proto buff to onnx file:
Parameters
-----------
frozen_graph_path : string
the path where your frozen graph file save.
end_node_names : string
the name of the end node in your graph you want to get in your proto buff
onnx_output_path : string
the path where you want to save the onnx file.
References
-----------
- `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`
"""
with tf.gfile.GFile(frozen_graph_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
onnx_model = tensorflow_graph_to_onnx_model(graph_def, end_node_names, opset=6)
file = open(onnx_output_path, "wb")
file.write(onnx_model.SerializeToString())
file.close() | [
"def",
"convert_model_to_onnx",
"(",
"frozen_graph_path",
",",
"end_node_names",
",",
"onnx_output_path",
")",
":",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"frozen_graph_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"graph_def",
"=",
"tf",
".",
"GraphDef",
"(",
")",
"graph_def",
".",
"ParseFromString",
"(",
"f",
".",
"read",
"(",
")",
")",
"onnx_model",
"=",
"tensorflow_graph_to_onnx_model",
"(",
"graph_def",
",",
"end_node_names",
",",
"opset",
"=",
"6",
")",
"file",
"=",
"open",
"(",
"onnx_output_path",
",",
"\"wb\"",
")",
"file",
".",
"write",
"(",
"onnx_model",
".",
"SerializeToString",
"(",
")",
")",
"file",
".",
"close",
"(",
")"
] | Reimplementation of the TensorFlow-onnx official tutorial convert the proto buff to onnx file:
Parameters
-----------
frozen_graph_path : string
the path where your frozen graph file save.
end_node_names : string
the name of the end node in your graph you want to get in your proto buff
onnx_output_path : string
the path where you want to save the onnx file.
References
-----------
- `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>` | [
"Reimplementation",
"of",
"the",
"TensorFlow",
"-",
"onnx",
"official",
"tutorial",
"convert",
"the",
"proto",
"buff",
"to",
"onnx",
"file",
":"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/tutorial_work_with_onnx.py#L279-L301 | valid |
tensorlayer/tensorlayer | examples/tutorial_work_with_onnx.py | convert_onnx_to_model | def convert_onnx_to_model(onnx_input_path):
"""Reimplementation of the TensorFlow-onnx official tutorial convert the onnx file to specific: model
Parameters
-----------
onnx_input_path : string
the path where you save the onnx file.
References
-----------
- `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`__
"""
model = onnx.load(onnx_input_path)
tf_rep = prepare(model)
# Image Path
img = np.load("./assets/image.npz")
output = tf_rep.run(img.reshape([1, 784]))
print("The digit is classified as ", np.argmax(output)) | python | def convert_onnx_to_model(onnx_input_path):
"""Reimplementation of the TensorFlow-onnx official tutorial convert the onnx file to specific: model
Parameters
-----------
onnx_input_path : string
the path where you save the onnx file.
References
-----------
- `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`__
"""
model = onnx.load(onnx_input_path)
tf_rep = prepare(model)
# Image Path
img = np.load("./assets/image.npz")
output = tf_rep.run(img.reshape([1, 784]))
print("The digit is classified as ", np.argmax(output)) | [
"def",
"convert_onnx_to_model",
"(",
"onnx_input_path",
")",
":",
"model",
"=",
"onnx",
".",
"load",
"(",
"onnx_input_path",
")",
"tf_rep",
"=",
"prepare",
"(",
"model",
")",
"# Image Path",
"img",
"=",
"np",
".",
"load",
"(",
"\"./assets/image.npz\"",
")",
"output",
"=",
"tf_rep",
".",
"run",
"(",
"img",
".",
"reshape",
"(",
"[",
"1",
",",
"784",
"]",
")",
")",
"print",
"(",
"\"The digit is classified as \"",
",",
"np",
".",
"argmax",
"(",
"output",
")",
")"
] | Reimplementation of the TensorFlow-onnx official tutorial convert the onnx file to specific: model
Parameters
-----------
onnx_input_path : string
the path where you save the onnx file.
References
-----------
- `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`__ | [
"Reimplementation",
"of",
"the",
"TensorFlow",
"-",
"onnx",
"official",
"tutorial",
"convert",
"the",
"onnx",
"file",
"to",
"specific",
":",
"model"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/tutorial_work_with_onnx.py#L304-L321 | valid |
tensorlayer/tensorlayer | tensorlayer/decorators/utils.py | _add_deprecated_function_notice_to_docstring | def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
if instructions:
deprecation_message = """
.. warning::
**THIS FUNCTION IS DEPRECATED:** It will be removed after %s.
*Instructions for updating:* %s.
""" % (('in a future version' if date is None else ('after %s' % date)), instructions)
else:
deprecation_message = """
.. warning::
**THIS FUNCTION IS DEPRECATED:** It will be removed after %s.
""" % (('in a future version' if date is None else ('after %s' % date)))
main_text = [deprecation_message]
return _add_notice_to_docstring(doc, 'DEPRECATED FUNCTION', main_text) | python | def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
if instructions:
deprecation_message = """
.. warning::
**THIS FUNCTION IS DEPRECATED:** It will be removed after %s.
*Instructions for updating:* %s.
""" % (('in a future version' if date is None else ('after %s' % date)), instructions)
else:
deprecation_message = """
.. warning::
**THIS FUNCTION IS DEPRECATED:** It will be removed after %s.
""" % (('in a future version' if date is None else ('after %s' % date)))
main_text = [deprecation_message]
return _add_notice_to_docstring(doc, 'DEPRECATED FUNCTION', main_text) | [
"def",
"_add_deprecated_function_notice_to_docstring",
"(",
"doc",
",",
"date",
",",
"instructions",
")",
":",
"if",
"instructions",
":",
"deprecation_message",
"=",
"\"\"\"\n .. warning::\n **THIS FUNCTION IS DEPRECATED:** It will be removed after %s.\n *Instructions for updating:* %s.\n \"\"\"",
"%",
"(",
"(",
"'in a future version'",
"if",
"date",
"is",
"None",
"else",
"(",
"'after %s'",
"%",
"date",
")",
")",
",",
"instructions",
")",
"else",
":",
"deprecation_message",
"=",
"\"\"\"\n .. warning::\n **THIS FUNCTION IS DEPRECATED:** It will be removed after %s.\n \"\"\"",
"%",
"(",
"(",
"'in a future version'",
"if",
"date",
"is",
"None",
"else",
"(",
"'after %s'",
"%",
"date",
")",
")",
")",
"main_text",
"=",
"[",
"deprecation_message",
"]",
"return",
"_add_notice_to_docstring",
"(",
"doc",
",",
"'DEPRECATED FUNCTION'",
",",
"main_text",
")"
] | Adds a deprecation notice to a docstring for deprecated functions. | [
"Adds",
"a",
"deprecation",
"notice",
"to",
"a",
"docstring",
"for",
"deprecated",
"functions",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/decorators/utils.py#L41-L59 | valid |
tensorlayer/tensorlayer | tensorlayer/decorators/utils.py | _add_notice_to_docstring | def _add_notice_to_docstring(doc, no_doc_str, notice):
"""Adds a deprecation notice to a docstring."""
if not doc:
lines = [no_doc_str]
else:
lines = _normalize_docstring(doc).splitlines()
notice = [''] + notice
if len(lines) > 1:
# Make sure that we keep our distance from the main body
if lines[1].strip():
notice.append('')
lines[1:1] = notice
else:
lines += notice
return '\n'.join(lines) | python | def _add_notice_to_docstring(doc, no_doc_str, notice):
"""Adds a deprecation notice to a docstring."""
if not doc:
lines = [no_doc_str]
else:
lines = _normalize_docstring(doc).splitlines()
notice = [''] + notice
if len(lines) > 1:
# Make sure that we keep our distance from the main body
if lines[1].strip():
notice.append('')
lines[1:1] = notice
else:
lines += notice
return '\n'.join(lines) | [
"def",
"_add_notice_to_docstring",
"(",
"doc",
",",
"no_doc_str",
",",
"notice",
")",
":",
"if",
"not",
"doc",
":",
"lines",
"=",
"[",
"no_doc_str",
"]",
"else",
":",
"lines",
"=",
"_normalize_docstring",
"(",
"doc",
")",
".",
"splitlines",
"(",
")",
"notice",
"=",
"[",
"''",
"]",
"+",
"notice",
"if",
"len",
"(",
"lines",
")",
">",
"1",
":",
"# Make sure that we keep our distance from the main body",
"if",
"lines",
"[",
"1",
"]",
".",
"strip",
"(",
")",
":",
"notice",
".",
"append",
"(",
"''",
")",
"lines",
"[",
"1",
":",
"1",
"]",
"=",
"notice",
"else",
":",
"lines",
"+=",
"notice",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
] | Adds a deprecation notice to a docstring. | [
"Adds",
"a",
"deprecation",
"notice",
"to",
"a",
"docstring",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/decorators/utils.py#L62-L81 | valid |
tensorlayer/tensorlayer | tensorlayer/array_ops.py | alphas | def alphas(shape, alpha_value, name=None):
"""Creates a tensor with all elements set to `alpha_value`.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to alpha.
Parameters
----------
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`.
The shape of the desired tensor
alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`
The value used to fill the resulting `Tensor`.
name: str
A name for the operation (optional).
Returns
-------
A `Tensor` with all elements set to alpha.
Examples
--------
>>> tl.alphas([2, 3], tf.int32) # [[alpha, alpha, alpha], [alpha, alpha, alpha]]
"""
with ops.name_scope(name, "alphas", [shape]) as name:
alpha_tensor = convert_to_tensor(alpha_value)
alpha_dtype = dtypes.as_dtype(alpha_tensor.dtype).base_dtype
if not isinstance(shape, ops.Tensor):
try:
shape = constant_op._tensor_shape_tensor_conversion_function(tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
try:
output = constant(alpha_value, shape=shape, dtype=alpha_dtype, name=name)
except (TypeError, ValueError):
output = fill(shape, constant(alpha_value, dtype=alpha_dtype), name=name)
if output.dtype.base_dtype != alpha_dtype:
raise AssertionError("Dtypes do not corresponds: %s and %s" % (output.dtype.base_dtype, alpha_dtype))
return output | python | def alphas(shape, alpha_value, name=None):
"""Creates a tensor with all elements set to `alpha_value`.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to alpha.
Parameters
----------
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`.
The shape of the desired tensor
alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`
The value used to fill the resulting `Tensor`.
name: str
A name for the operation (optional).
Returns
-------
A `Tensor` with all elements set to alpha.
Examples
--------
>>> tl.alphas([2, 3], tf.int32) # [[alpha, alpha, alpha], [alpha, alpha, alpha]]
"""
with ops.name_scope(name, "alphas", [shape]) as name:
alpha_tensor = convert_to_tensor(alpha_value)
alpha_dtype = dtypes.as_dtype(alpha_tensor.dtype).base_dtype
if not isinstance(shape, ops.Tensor):
try:
shape = constant_op._tensor_shape_tensor_conversion_function(tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
try:
output = constant(alpha_value, shape=shape, dtype=alpha_dtype, name=name)
except (TypeError, ValueError):
output = fill(shape, constant(alpha_value, dtype=alpha_dtype), name=name)
if output.dtype.base_dtype != alpha_dtype:
raise AssertionError("Dtypes do not corresponds: %s and %s" % (output.dtype.base_dtype, alpha_dtype))
return output | [
"def",
"alphas",
"(",
"shape",
",",
"alpha_value",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"alphas\"",
",",
"[",
"shape",
"]",
")",
"as",
"name",
":",
"alpha_tensor",
"=",
"convert_to_tensor",
"(",
"alpha_value",
")",
"alpha_dtype",
"=",
"dtypes",
".",
"as_dtype",
"(",
"alpha_tensor",
".",
"dtype",
")",
".",
"base_dtype",
"if",
"not",
"isinstance",
"(",
"shape",
",",
"ops",
".",
"Tensor",
")",
":",
"try",
":",
"shape",
"=",
"constant_op",
".",
"_tensor_shape_tensor_conversion_function",
"(",
"tensor_shape",
".",
"TensorShape",
"(",
"shape",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"shape",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"shape",
",",
"dtype",
"=",
"dtypes",
".",
"int32",
")",
"if",
"not",
"shape",
".",
"_shape_tuple",
"(",
")",
":",
"shape",
"=",
"reshape",
"(",
"shape",
",",
"[",
"-",
"1",
"]",
")",
"# Ensure it's a vector",
"try",
":",
"output",
"=",
"constant",
"(",
"alpha_value",
",",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"alpha_dtype",
",",
"name",
"=",
"name",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"output",
"=",
"fill",
"(",
"shape",
",",
"constant",
"(",
"alpha_value",
",",
"dtype",
"=",
"alpha_dtype",
")",
",",
"name",
"=",
"name",
")",
"if",
"output",
".",
"dtype",
".",
"base_dtype",
"!=",
"alpha_dtype",
":",
"raise",
"AssertionError",
"(",
"\"Dtypes do not corresponds: %s and %s\"",
"%",
"(",
"output",
".",
"dtype",
".",
"base_dtype",
",",
"alpha_dtype",
")",
")",
"return",
"output"
] | Creates a tensor with all elements set to `alpha_value`.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to alpha.
Parameters
----------
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`.
The shape of the desired tensor
alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`
The value used to fill the resulting `Tensor`.
name: str
A name for the operation (optional).
Returns
-------
A `Tensor` with all elements set to alpha.
Examples
--------
>>> tl.alphas([2, 3], tf.int32) # [[alpha, alpha, alpha], [alpha, alpha, alpha]] | [
"Creates",
"a",
"tensor",
"with",
"all",
"elements",
"set",
"to",
"alpha_value",
".",
"This",
"operation",
"returns",
"a",
"tensor",
"of",
"type",
"dtype",
"with",
"shape",
"shape",
"and",
"all",
"elements",
"set",
"to",
"alpha",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/array_ops.py#L19-L64 | valid |
tensorlayer/tensorlayer | tensorlayer/array_ops.py | alphas_like | def alphas_like(tensor, alpha_value, name=None, optimize=True):
"""Creates a tensor with all elements set to `alpha_value`.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to `alpha_value`.
Parameters
----------
tensor: tf.Tensor
The Tensorflow Tensor that will be used as a template.
alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`
The value used to fill the resulting `Tensor`.
name: str
A name for the operation (optional).
optimize: bool
if true, attempt to statically determine the shape of 'tensor' and encode it as a constant.
Returns
-------
A `Tensor` with all elements set to `alpha_value`.
Examples
--------
>>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tl.alphas_like(tensor, 0.5) # [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]
"""
with ops.name_scope(name, "alphas_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if context.in_eager_mode(): # and dtype is not None and dtype != tensor.dtype:
ret = alphas(shape_internal(tensor, optimize=optimize), alpha_value=alpha_value, name=name)
else: # if context.in_graph_mode():
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor.shape.is_fully_defined()):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
ret = alphas(tensor.shape, alpha_value=alpha_value, name=name)
# elif dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
else:
ret = alphas(shape_internal(tensor, optimize=optimize), alpha_value=alpha_value, name=name)
ret.set_shape(tensor.get_shape())
return ret | python | def alphas_like(tensor, alpha_value, name=None, optimize=True):
"""Creates a tensor with all elements set to `alpha_value`.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to `alpha_value`.
Parameters
----------
tensor: tf.Tensor
The Tensorflow Tensor that will be used as a template.
alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`
The value used to fill the resulting `Tensor`.
name: str
A name for the operation (optional).
optimize: bool
if true, attempt to statically determine the shape of 'tensor' and encode it as a constant.
Returns
-------
A `Tensor` with all elements set to `alpha_value`.
Examples
--------
>>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tl.alphas_like(tensor, 0.5) # [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]
"""
with ops.name_scope(name, "alphas_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if context.in_eager_mode(): # and dtype is not None and dtype != tensor.dtype:
ret = alphas(shape_internal(tensor, optimize=optimize), alpha_value=alpha_value, name=name)
else: # if context.in_graph_mode():
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor.shape.is_fully_defined()):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
ret = alphas(tensor.shape, alpha_value=alpha_value, name=name)
# elif dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
else:
ret = alphas(shape_internal(tensor, optimize=optimize), alpha_value=alpha_value, name=name)
ret.set_shape(tensor.get_shape())
return ret | [
"def",
"alphas_like",
"(",
"tensor",
",",
"alpha_value",
",",
"name",
"=",
"None",
",",
"optimize",
"=",
"True",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"alphas_like\"",
",",
"[",
"tensor",
"]",
")",
"as",
"name",
":",
"tensor",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"tensor",
",",
"name",
"=",
"\"tensor\"",
")",
"if",
"context",
".",
"in_eager_mode",
"(",
")",
":",
"# and dtype is not None and dtype != tensor.dtype:",
"ret",
"=",
"alphas",
"(",
"shape_internal",
"(",
"tensor",
",",
"optimize",
"=",
"optimize",
")",
",",
"alpha_value",
"=",
"alpha_value",
",",
"name",
"=",
"name",
")",
"else",
":",
"# if context.in_graph_mode():",
"# For now, variant types must be created via zeros_like; as we need to",
"# pass the input variant object to the proper zeros callback.",
"if",
"(",
"optimize",
"and",
"tensor",
".",
"shape",
".",
"is_fully_defined",
"(",
")",
")",
":",
"# We can produce a zeros tensor independent of the value of 'tensor',",
"# since the shape is known statically.",
"ret",
"=",
"alphas",
"(",
"tensor",
".",
"shape",
",",
"alpha_value",
"=",
"alpha_value",
",",
"name",
"=",
"name",
")",
"# elif dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:",
"else",
":",
"ret",
"=",
"alphas",
"(",
"shape_internal",
"(",
"tensor",
",",
"optimize",
"=",
"optimize",
")",
",",
"alpha_value",
"=",
"alpha_value",
",",
"name",
"=",
"name",
")",
"ret",
".",
"set_shape",
"(",
"tensor",
".",
"get_shape",
"(",
")",
")",
"return",
"ret"
] | Creates a tensor with all elements set to `alpha_value`.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to `alpha_value`.
Parameters
----------
tensor: tf.Tensor
The Tensorflow Tensor that will be used as a template.
alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`
The value used to fill the resulting `Tensor`.
name: str
A name for the operation (optional).
optimize: bool
if true, attempt to statically determine the shape of 'tensor' and encode it as a constant.
Returns
-------
A `Tensor` with all elements set to `alpha_value`.
Examples
--------
>>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tl.alphas_like(tensor, 0.5) # [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]] | [
"Creates",
"a",
"tensor",
"with",
"all",
"elements",
"set",
"to",
"alpha_value",
".",
"Given",
"a",
"single",
"tensor",
"(",
"tensor",
")",
"this",
"operation",
"returns",
"a",
"tensor",
"of",
"the",
"same",
"type",
"and",
"shape",
"as",
"tensor",
"with",
"all",
"elements",
"set",
"to",
"alpha_value",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/array_ops.py#L67-L114 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_fast_affine_transform.py | example1 | def example1():
""" Example 1: Applying transformation one-by-one is very SLOW ! """
st = time.time()
for _ in range(100): # Try 100 times and compute the averaged speed
xx = tl.prepro.rotation(image, rg=-20, is_random=False)
xx = tl.prepro.flip_axis(xx, axis=1, is_random=False)
xx = tl.prepro.shear2(xx, shear=(0., -0.2), is_random=False)
xx = tl.prepro.zoom(xx, zoom_range=1 / 0.8)
xx = tl.prepro.shift(xx, wrg=-0.1, hrg=0, is_random=False)
print("apply transforms one-by-one took %fs for each image" % ((time.time() - st) / 100))
tl.vis.save_image(xx, '_result_slow.png') | python | def example1():
""" Example 1: Applying transformation one-by-one is very SLOW ! """
st = time.time()
for _ in range(100): # Try 100 times and compute the averaged speed
xx = tl.prepro.rotation(image, rg=-20, is_random=False)
xx = tl.prepro.flip_axis(xx, axis=1, is_random=False)
xx = tl.prepro.shear2(xx, shear=(0., -0.2), is_random=False)
xx = tl.prepro.zoom(xx, zoom_range=1 / 0.8)
xx = tl.prepro.shift(xx, wrg=-0.1, hrg=0, is_random=False)
print("apply transforms one-by-one took %fs for each image" % ((time.time() - st) / 100))
tl.vis.save_image(xx, '_result_slow.png') | [
"def",
"example1",
"(",
")",
":",
"st",
"=",
"time",
".",
"time",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"100",
")",
":",
"# Try 100 times and compute the averaged speed",
"xx",
"=",
"tl",
".",
"prepro",
".",
"rotation",
"(",
"image",
",",
"rg",
"=",
"-",
"20",
",",
"is_random",
"=",
"False",
")",
"xx",
"=",
"tl",
".",
"prepro",
".",
"flip_axis",
"(",
"xx",
",",
"axis",
"=",
"1",
",",
"is_random",
"=",
"False",
")",
"xx",
"=",
"tl",
".",
"prepro",
".",
"shear2",
"(",
"xx",
",",
"shear",
"=",
"(",
"0.",
",",
"-",
"0.2",
")",
",",
"is_random",
"=",
"False",
")",
"xx",
"=",
"tl",
".",
"prepro",
".",
"zoom",
"(",
"xx",
",",
"zoom_range",
"=",
"1",
"/",
"0.8",
")",
"xx",
"=",
"tl",
".",
"prepro",
".",
"shift",
"(",
"xx",
",",
"wrg",
"=",
"-",
"0.1",
",",
"hrg",
"=",
"0",
",",
"is_random",
"=",
"False",
")",
"print",
"(",
"\"apply transforms one-by-one took %fs for each image\"",
"%",
"(",
"(",
"time",
".",
"time",
"(",
")",
"-",
"st",
")",
"/",
"100",
")",
")",
"tl",
".",
"vis",
".",
"save_image",
"(",
"xx",
",",
"'_result_slow.png'",
")"
] | Example 1: Applying transformation one-by-one is very SLOW ! | [
"Example",
"1",
":",
"Applying",
"transformation",
"one",
"-",
"by",
"-",
"one",
"is",
"very",
"SLOW",
"!"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_fast_affine_transform.py#L39-L49 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_fast_affine_transform.py | example2 | def example2():
""" Example 2: Applying all transforms in one is very FAST ! """
st = time.time()
for _ in range(100): # Repeat 100 times and compute the averaged speed
transform_matrix = create_transformation_matrix()
result = tl.prepro.affine_transform_cv2(image, transform_matrix) # Transform the image using a single operation
print("apply all transforms once took %fs for each image" % ((time.time() - st) / 100)) # usually 50x faster
tl.vis.save_image(result, '_result_fast.png') | python | def example2():
""" Example 2: Applying all transforms in one is very FAST ! """
st = time.time()
for _ in range(100): # Repeat 100 times and compute the averaged speed
transform_matrix = create_transformation_matrix()
result = tl.prepro.affine_transform_cv2(image, transform_matrix) # Transform the image using a single operation
print("apply all transforms once took %fs for each image" % ((time.time() - st) / 100)) # usually 50x faster
tl.vis.save_image(result, '_result_fast.png') | [
"def",
"example2",
"(",
")",
":",
"st",
"=",
"time",
".",
"time",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"100",
")",
":",
"# Repeat 100 times and compute the averaged speed",
"transform_matrix",
"=",
"create_transformation_matrix",
"(",
")",
"result",
"=",
"tl",
".",
"prepro",
".",
"affine_transform_cv2",
"(",
"image",
",",
"transform_matrix",
")",
"# Transform the image using a single operation",
"print",
"(",
"\"apply all transforms once took %fs for each image\"",
"%",
"(",
"(",
"time",
".",
"time",
"(",
")",
"-",
"st",
")",
"/",
"100",
")",
")",
"# usually 50x faster",
"tl",
".",
"vis",
".",
"save_image",
"(",
"result",
",",
"'_result_fast.png'",
")"
] | Example 2: Applying all transforms in one is very FAST ! | [
"Example",
"2",
":",
"Applying",
"all",
"transforms",
"in",
"one",
"is",
"very",
"FAST",
"!"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_fast_affine_transform.py#L52-L59 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_fast_affine_transform.py | example3 | def example3():
""" Example 3: Using TF dataset API to load and process image for training """
n_data = 100
imgs_file_list = ['tiger.jpeg'] * n_data
train_targets = [np.ones(1)] * n_data
def generator():
if len(imgs_file_list) != len(train_targets):
raise RuntimeError('len(imgs_file_list) != len(train_targets)')
for _input, _target in zip(imgs_file_list, train_targets):
yield _input, _target
def _data_aug_fn(image):
transform_matrix = create_transformation_matrix()
result = tl.prepro.affine_transform_cv2(image, transform_matrix) # Transform the image using a single operation
return result
def _map_fn(image_path, target):
image = tf.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3) # Get RGB with 0~1
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.py_func(_data_aug_fn, [image], [tf.float32])
# image = tf.reshape(image, (h, w, 3))
target = tf.reshape(target, ())
return image, target
n_epoch = 10
batch_size = 5
dataset = tf.data.Dataset().from_generator(generator, output_types=(tf.string, tf.int64))
dataset = dataset.shuffle(buffer_size=4096) # shuffle before loading images
dataset = dataset.repeat(n_epoch)
dataset = dataset.map(_map_fn, num_parallel_calls=multiprocessing.cpu_count())
dataset = dataset.batch(batch_size) # TODO: consider using tf.contrib.map_and_batch
dataset = dataset.prefetch(1) # prefetch 1 batch
iterator = dataset.make_one_shot_iterator()
one_element = iterator.get_next()
sess = tf.Session()
# feed `one_element` into a network, for demo, we simply get the data as follows
n_step = round(n_epoch * n_data / batch_size)
st = time.time()
for _ in range(n_step):
_images, _targets = sess.run(one_element)
print("dataset APIs took %fs for each image" % ((time.time() - st) / batch_size / n_step)) | python | def example3():
""" Example 3: Using TF dataset API to load and process image for training """
n_data = 100
imgs_file_list = ['tiger.jpeg'] * n_data
train_targets = [np.ones(1)] * n_data
def generator():
if len(imgs_file_list) != len(train_targets):
raise RuntimeError('len(imgs_file_list) != len(train_targets)')
for _input, _target in zip(imgs_file_list, train_targets):
yield _input, _target
def _data_aug_fn(image):
transform_matrix = create_transformation_matrix()
result = tl.prepro.affine_transform_cv2(image, transform_matrix) # Transform the image using a single operation
return result
def _map_fn(image_path, target):
image = tf.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3) # Get RGB with 0~1
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.py_func(_data_aug_fn, [image], [tf.float32])
# image = tf.reshape(image, (h, w, 3))
target = tf.reshape(target, ())
return image, target
n_epoch = 10
batch_size = 5
dataset = tf.data.Dataset().from_generator(generator, output_types=(tf.string, tf.int64))
dataset = dataset.shuffle(buffer_size=4096) # shuffle before loading images
dataset = dataset.repeat(n_epoch)
dataset = dataset.map(_map_fn, num_parallel_calls=multiprocessing.cpu_count())
dataset = dataset.batch(batch_size) # TODO: consider using tf.contrib.map_and_batch
dataset = dataset.prefetch(1) # prefetch 1 batch
iterator = dataset.make_one_shot_iterator()
one_element = iterator.get_next()
sess = tf.Session()
# feed `one_element` into a network, for demo, we simply get the data as follows
n_step = round(n_epoch * n_data / batch_size)
st = time.time()
for _ in range(n_step):
_images, _targets = sess.run(one_element)
print("dataset APIs took %fs for each image" % ((time.time() - st) / batch_size / n_step)) | [
"def",
"example3",
"(",
")",
":",
"n_data",
"=",
"100",
"imgs_file_list",
"=",
"[",
"'tiger.jpeg'",
"]",
"*",
"n_data",
"train_targets",
"=",
"[",
"np",
".",
"ones",
"(",
"1",
")",
"]",
"*",
"n_data",
"def",
"generator",
"(",
")",
":",
"if",
"len",
"(",
"imgs_file_list",
")",
"!=",
"len",
"(",
"train_targets",
")",
":",
"raise",
"RuntimeError",
"(",
"'len(imgs_file_list) != len(train_targets)'",
")",
"for",
"_input",
",",
"_target",
"in",
"zip",
"(",
"imgs_file_list",
",",
"train_targets",
")",
":",
"yield",
"_input",
",",
"_target",
"def",
"_data_aug_fn",
"(",
"image",
")",
":",
"transform_matrix",
"=",
"create_transformation_matrix",
"(",
")",
"result",
"=",
"tl",
".",
"prepro",
".",
"affine_transform_cv2",
"(",
"image",
",",
"transform_matrix",
")",
"# Transform the image using a single operation",
"return",
"result",
"def",
"_map_fn",
"(",
"image_path",
",",
"target",
")",
":",
"image",
"=",
"tf",
".",
"read_file",
"(",
"image_path",
")",
"image",
"=",
"tf",
".",
"image",
".",
"decode_jpeg",
"(",
"image",
",",
"channels",
"=",
"3",
")",
"# Get RGB with 0~1",
"image",
"=",
"tf",
".",
"image",
".",
"convert_image_dtype",
"(",
"image",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"image",
"=",
"tf",
".",
"py_func",
"(",
"_data_aug_fn",
",",
"[",
"image",
"]",
",",
"[",
"tf",
".",
"float32",
"]",
")",
"# image = tf.reshape(image, (h, w, 3))",
"target",
"=",
"tf",
".",
"reshape",
"(",
"target",
",",
"(",
")",
")",
"return",
"image",
",",
"target",
"n_epoch",
"=",
"10",
"batch_size",
"=",
"5",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
"(",
")",
".",
"from_generator",
"(",
"generator",
",",
"output_types",
"=",
"(",
"tf",
".",
"string",
",",
"tf",
".",
"int64",
")",
")",
"dataset",
"=",
"dataset",
".",
"shuffle",
"(",
"buffer_size",
"=",
"4096",
")",
"# shuffle before loading images",
"dataset",
"=",
"dataset",
".",
"repeat",
"(",
"n_epoch",
")",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"_map_fn",
",",
"num_parallel_calls",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
"dataset",
"=",
"dataset",
".",
"batch",
"(",
"batch_size",
")",
"# TODO: consider using tf.contrib.map_and_batch",
"dataset",
"=",
"dataset",
".",
"prefetch",
"(",
"1",
")",
"# prefetch 1 batch",
"iterator",
"=",
"dataset",
".",
"make_one_shot_iterator",
"(",
")",
"one_element",
"=",
"iterator",
".",
"get_next",
"(",
")",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"# feed `one_element` into a network, for demo, we simply get the data as follows",
"n_step",
"=",
"round",
"(",
"n_epoch",
"*",
"n_data",
"/",
"batch_size",
")",
"st",
"=",
"time",
".",
"time",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"n_step",
")",
":",
"_images",
",",
"_targets",
"=",
"sess",
".",
"run",
"(",
"one_element",
")",
"print",
"(",
"\"dataset APIs took %fs for each image\"",
"%",
"(",
"(",
"time",
".",
"time",
"(",
")",
"-",
"st",
")",
"/",
"batch_size",
"/",
"n_step",
")",
")"
] | Example 3: Using TF dataset API to load and process image for training | [
"Example",
"3",
":",
"Using",
"TF",
"dataset",
"API",
"to",
"load",
"and",
"process",
"image",
"for",
"training"
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_fast_affine_transform.py#L62-L104 | valid |
tensorlayer/tensorlayer | examples/data_process/tutorial_fast_affine_transform.py | example4 | def example4():
""" Example 4: Transforming coordinates using affine matrix. """
transform_matrix = create_transformation_matrix()
result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster
# Transform keypoint coordinates
coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]]
coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)
def imwrite(image, coords_list, name):
coords_list_ = []
for coords in coords_list:
coords = np.array(coords, np.int32)
coords = coords.reshape((-1, 1, 2))
coords_list_.append(coords)
image = cv2.polylines(image, coords_list_, True, (0, 255, 255), 3)
cv2.imwrite(name, image[..., ::-1])
imwrite(image, coords, '_with_keypoints_origin.png')
imwrite(result, coords_result, '_with_keypoints_result.png') | python | def example4():
""" Example 4: Transforming coordinates using affine matrix. """
transform_matrix = create_transformation_matrix()
result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster
# Transform keypoint coordinates
coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]]
coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)
def imwrite(image, coords_list, name):
coords_list_ = []
for coords in coords_list:
coords = np.array(coords, np.int32)
coords = coords.reshape((-1, 1, 2))
coords_list_.append(coords)
image = cv2.polylines(image, coords_list_, True, (0, 255, 255), 3)
cv2.imwrite(name, image[..., ::-1])
imwrite(image, coords, '_with_keypoints_origin.png')
imwrite(result, coords_result, '_with_keypoints_result.png') | [
"def",
"example4",
"(",
")",
":",
"transform_matrix",
"=",
"create_transformation_matrix",
"(",
")",
"result",
"=",
"tl",
".",
"prepro",
".",
"affine_transform_cv2",
"(",
"image",
",",
"transform_matrix",
")",
"# 76 times faster",
"# Transform keypoint coordinates",
"coords",
"=",
"[",
"[",
"(",
"50",
",",
"100",
")",
",",
"(",
"100",
",",
"100",
")",
",",
"(",
"100",
",",
"50",
")",
",",
"(",
"200",
",",
"200",
")",
"]",
",",
"[",
"(",
"250",
",",
"50",
")",
",",
"(",
"200",
",",
"50",
")",
",",
"(",
"200",
",",
"100",
")",
"]",
"]",
"coords_result",
"=",
"tl",
".",
"prepro",
".",
"affine_transform_keypoints",
"(",
"coords",
",",
"transform_matrix",
")",
"def",
"imwrite",
"(",
"image",
",",
"coords_list",
",",
"name",
")",
":",
"coords_list_",
"=",
"[",
"]",
"for",
"coords",
"in",
"coords_list",
":",
"coords",
"=",
"np",
".",
"array",
"(",
"coords",
",",
"np",
".",
"int32",
")",
"coords",
"=",
"coords",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
",",
"2",
")",
")",
"coords_list_",
".",
"append",
"(",
"coords",
")",
"image",
"=",
"cv2",
".",
"polylines",
"(",
"image",
",",
"coords_list_",
",",
"True",
",",
"(",
"0",
",",
"255",
",",
"255",
")",
",",
"3",
")",
"cv2",
".",
"imwrite",
"(",
"name",
",",
"image",
"[",
"...",
",",
":",
":",
"-",
"1",
"]",
")",
"imwrite",
"(",
"image",
",",
"coords",
",",
"'_with_keypoints_origin.png'",
")",
"imwrite",
"(",
"result",
",",
"coords_result",
",",
"'_with_keypoints_result.png'",
")"
] | Example 4: Transforming coordinates using affine matrix. | [
"Example",
"4",
":",
"Transforming",
"coordinates",
"using",
"affine",
"matrix",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/data_process/tutorial_fast_affine_transform.py#L107-L125 | valid |
tensorlayer/tensorlayer | examples/basic_tutorials/tutorial_cifar10_placeholder.py | distort_fn | def distort_fn(x, is_train=False):
"""
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
"""
# print('begin',x.shape, np.min(x), np.max(x))
x = tl.prepro.crop(x, 24, 24, is_random=is_train)
# print('after crop',x.shape, np.min(x), np.max(x))
if is_train:
# x = tl.prepro.zoom(x, zoom_range=(0.9, 1.0), is_random=True)
# print('after zoom', x.shape, np.min(x), np.max(x))
x = tl.prepro.flip_axis(x, axis=1, is_random=True)
# print('after flip',x.shape, np.min(x), np.max(x))
x = tl.prepro.brightness(x, gamma=0.1, gain=1, is_random=True)
# print('after brightness',x.shape, np.min(x), np.max(x))
# tmp = np.max(x)
# x += np.random.uniform(-20, 20)
# x /= tmp
# normalize the image
x = (x - np.mean(x)) / max(np.std(x), 1e-5) # avoid values divided by 0
# print('after norm', x.shape, np.min(x), np.max(x), np.mean(x))
return x | python | def distort_fn(x, is_train=False):
"""
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
"""
# print('begin',x.shape, np.min(x), np.max(x))
x = tl.prepro.crop(x, 24, 24, is_random=is_train)
# print('after crop',x.shape, np.min(x), np.max(x))
if is_train:
# x = tl.prepro.zoom(x, zoom_range=(0.9, 1.0), is_random=True)
# print('after zoom', x.shape, np.min(x), np.max(x))
x = tl.prepro.flip_axis(x, axis=1, is_random=True)
# print('after flip',x.shape, np.min(x), np.max(x))
x = tl.prepro.brightness(x, gamma=0.1, gain=1, is_random=True)
# print('after brightness',x.shape, np.min(x), np.max(x))
# tmp = np.max(x)
# x += np.random.uniform(-20, 20)
# x /= tmp
# normalize the image
x = (x - np.mean(x)) / max(np.std(x), 1e-5) # avoid values divided by 0
# print('after norm', x.shape, np.min(x), np.max(x), np.mean(x))
return x | [
"def",
"distort_fn",
"(",
"x",
",",
"is_train",
"=",
"False",
")",
":",
"# print('begin',x.shape, np.min(x), np.max(x))",
"x",
"=",
"tl",
".",
"prepro",
".",
"crop",
"(",
"x",
",",
"24",
",",
"24",
",",
"is_random",
"=",
"is_train",
")",
"# print('after crop',x.shape, np.min(x), np.max(x))",
"if",
"is_train",
":",
"# x = tl.prepro.zoom(x, zoom_range=(0.9, 1.0), is_random=True)",
"# print('after zoom', x.shape, np.min(x), np.max(x))",
"x",
"=",
"tl",
".",
"prepro",
".",
"flip_axis",
"(",
"x",
",",
"axis",
"=",
"1",
",",
"is_random",
"=",
"True",
")",
"# print('after flip',x.shape, np.min(x), np.max(x))",
"x",
"=",
"tl",
".",
"prepro",
".",
"brightness",
"(",
"x",
",",
"gamma",
"=",
"0.1",
",",
"gain",
"=",
"1",
",",
"is_random",
"=",
"True",
")",
"# print('after brightness',x.shape, np.min(x), np.max(x))",
"# tmp = np.max(x)",
"# x += np.random.uniform(-20, 20)",
"# x /= tmp",
"# normalize the image",
"x",
"=",
"(",
"x",
"-",
"np",
".",
"mean",
"(",
"x",
")",
")",
"/",
"max",
"(",
"np",
".",
"std",
"(",
"x",
")",
",",
"1e-5",
")",
"# avoid values divided by 0",
"# print('after norm', x.shape, np.min(x), np.max(x), np.mean(x))",
"return",
"x"
] | The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness. | [
"The",
"images",
"are",
"processed",
"as",
"follows",
":",
"..",
"They",
"are",
"cropped",
"to",
"24",
"x",
"24",
"pixels",
"centrally",
"for",
"evaluation",
"or",
"randomly",
"for",
"training",
".",
"..",
"They",
"are",
"approximately",
"whitened",
"to",
"make",
"the",
"model",
"insensitive",
"to",
"dynamic",
"range",
".",
"For",
"training",
"we",
"additionally",
"apply",
"a",
"series",
"of",
"random",
"distortions",
"to",
"artificially",
"increase",
"the",
"data",
"set",
"size",
":",
"..",
"Randomly",
"flip",
"the",
"image",
"from",
"left",
"to",
"right",
".",
"..",
"Randomly",
"distort",
"the",
"image",
"brightness",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/basic_tutorials/tutorial_cifar10_placeholder.py#L85-L111 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | fit | def fit(
sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_size=100, n_epoch=100, print_freq=5,
X_val=None, y_val=None, eval_train=True, tensorboard_dir=None, tensorboard_epoch_freq=5,
tensorboard_weight_histograms=True, tensorboard_graph_vis=True
):
"""Training a given non time-series network by the given cost function, training data, batch_size, n_epoch etc.
- MNIST example click `here <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py>`_.
- In order to control the training details, the authors HIGHLY recommend ``tl.iterate`` see two MNIST examples `1 <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py>`_, `2 <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py>`_.
Parameters
----------
sess : Session
TensorFlow Session.
network : TensorLayer layer
the network to be trained.
train_op : TensorFlow optimizer
The optimizer for training e.g. tf.train.AdamOptimizer.
X_train : numpy.array
The input of training data
y_train : numpy.array
The target of training data
x : placeholder
For inputs.
y_ : placeholder
For targets.
acc : TensorFlow expression or None
Metric for accuracy or others. If None, would not print the information.
batch_size : int
The batch size for training and evaluating.
n_epoch : int
The number of training epochs.
print_freq : int
Print the training information every ``print_freq`` epochs.
X_val : numpy.array or None
The input of validation data. If None, would not perform validation.
y_val : numpy.array or None
The target of validation data. If None, would not perform validation.
eval_train : boolean
Whether to evaluate the model during training.
If X_val and y_val are not None, it reflects whether to evaluate the model on training data.
tensorboard_dir : string
path to log dir, if set, summary data will be stored to the tensorboard_dir/ directory for visualization with tensorboard. (default None)
Also runs `tl.layers.initialize_global_variables(sess)` internally in fit() to setup the summary nodes.
tensorboard_epoch_freq : int
How many epochs between storing tensorboard checkpoint for visualization to log/ directory (default 5).
tensorboard_weight_histograms : boolean
If True updates tensorboard data in the logs/ directory for visualization
of the weight histograms every tensorboard_epoch_freq epoch (default True).
tensorboard_graph_vis : boolean
If True stores the graph in the tensorboard summaries saved to log/ (default True).
Examples
--------
See `tutorial_mnist_simple.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py>`_
>>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,
... acc=acc, batch_size=500, n_epoch=200, print_freq=5,
... X_val=X_val, y_val=y_val, eval_train=False)
>>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,
... acc=acc, batch_size=500, n_epoch=200, print_freq=5,
... X_val=X_val, y_val=y_val, eval_train=False,
... tensorboard=True, tensorboard_weight_histograms=True, tensorboard_graph_vis=True)
Notes
--------
If tensorboard_dir not None, the `global_variables_initializer` will be run inside the fit function
in order to initialize the automatically generated summary nodes used for tensorboard visualization,
thus `tf.global_variables_initializer().run()` before the `fit()` call will be undefined.
"""
if X_train.shape[0] < batch_size:
raise AssertionError("Number of training examples should be bigger than the batch size")
if tensorboard_dir is not None:
tl.logging.info("Setting up tensorboard ...")
#Set up tensorboard summaries and saver
tl.files.exists_or_mkdir(tensorboard_dir)
#Only write summaries for more recent TensorFlow versions
if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
if tensorboard_graph_vis:
train_writer = tf.summary.FileWriter(tensorboard_dir + '/train', sess.graph)
val_writer = tf.summary.FileWriter(tensorboard_dir + '/validation', sess.graph)
else:
train_writer = tf.summary.FileWriter(tensorboard_dir + '/train')
val_writer = tf.summary.FileWriter(tensorboard_dir + '/validation')
#Set up summary nodes
if (tensorboard_weight_histograms):
for param in network.all_params:
if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'):
tl.logging.info('Param name %s' % param.name)
tf.summary.histogram(param.name, param)
if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'):
tf.summary.scalar('cost', cost)
merged = tf.summary.merge_all()
#Initalize all variables and summaries
tl.layers.initialize_global_variables(sess)
tl.logging.info("Finished! use `tensorboard --logdir=%s/` to start tensorboard" % tensorboard_dir)
tl.logging.info("Start training the network ...")
start_time_begin = time.time()
tensorboard_train_index, tensorboard_val_index = 0, 0
for epoch in range(n_epoch):
start_time = time.time()
loss_ep = 0
n_step = 0
for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
feed_dict = {x: X_train_a, y_: y_train_a}
feed_dict.update(network.all_drop) # enable noise layers
loss, _ = sess.run([cost, train_op], feed_dict=feed_dict)
loss_ep += loss
n_step += 1
loss_ep = loss_ep / n_step
if tensorboard_dir is not None and hasattr(tf, 'summary'):
if epoch + 1 == 1 or (epoch + 1) % tensorboard_epoch_freq == 0:
for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {x: X_train_a, y_: y_train_a}
feed_dict.update(dp_dict)
result = sess.run(merged, feed_dict=feed_dict)
train_writer.add_summary(result, tensorboard_train_index)
tensorboard_train_index += 1
if (X_val is not None) and (y_val is not None):
for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True):
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {x: X_val_a, y_: y_val_a}
feed_dict.update(dp_dict)
result = sess.run(merged, feed_dict=feed_dict)
val_writer.add_summary(result, tensorboard_val_index)
tensorboard_val_index += 1
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
if (X_val is not None) and (y_val is not None):
tl.logging.info("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time))
if eval_train is True:
train_loss, train_acc, n_batch = 0, 0, 0
for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {x: X_train_a, y_: y_train_a}
feed_dict.update(dp_dict)
if acc is not None:
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
train_acc += ac
else:
err = sess.run(cost, feed_dict=feed_dict)
train_loss += err
n_batch += 1
tl.logging.info(" train loss: %f" % (train_loss / n_batch))
if acc is not None:
tl.logging.info(" train acc: %f" % (train_acc / n_batch))
val_loss, val_acc, n_batch = 0, 0, 0
for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True):
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {x: X_val_a, y_: y_val_a}
feed_dict.update(dp_dict)
if acc is not None:
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
val_acc += ac
else:
err = sess.run(cost, feed_dict=feed_dict)
val_loss += err
n_batch += 1
tl.logging.info(" val loss: %f" % (val_loss / n_batch))
if acc is not None:
tl.logging.info(" val acc: %f" % (val_acc / n_batch))
else:
tl.logging.info(
"Epoch %d of %d took %fs, loss %f" % (epoch + 1, n_epoch, time.time() - start_time, loss_ep)
)
tl.logging.info("Total training time: %fs" % (time.time() - start_time_begin)) | python | def fit(
sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_size=100, n_epoch=100, print_freq=5,
X_val=None, y_val=None, eval_train=True, tensorboard_dir=None, tensorboard_epoch_freq=5,
tensorboard_weight_histograms=True, tensorboard_graph_vis=True
):
"""Training a given non time-series network by the given cost function, training data, batch_size, n_epoch etc.
- MNIST example click `here <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py>`_.
- In order to control the training details, the authors HIGHLY recommend ``tl.iterate`` see two MNIST examples `1 <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py>`_, `2 <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py>`_.
Parameters
----------
sess : Session
TensorFlow Session.
network : TensorLayer layer
the network to be trained.
train_op : TensorFlow optimizer
The optimizer for training e.g. tf.train.AdamOptimizer.
X_train : numpy.array
The input of training data
y_train : numpy.array
The target of training data
x : placeholder
For inputs.
y_ : placeholder
For targets.
acc : TensorFlow expression or None
Metric for accuracy or others. If None, would not print the information.
batch_size : int
The batch size for training and evaluating.
n_epoch : int
The number of training epochs.
print_freq : int
Print the training information every ``print_freq`` epochs.
X_val : numpy.array or None
The input of validation data. If None, would not perform validation.
y_val : numpy.array or None
The target of validation data. If None, would not perform validation.
eval_train : boolean
Whether to evaluate the model during training.
If X_val and y_val are not None, it reflects whether to evaluate the model on training data.
tensorboard_dir : string
path to log dir, if set, summary data will be stored to the tensorboard_dir/ directory for visualization with tensorboard. (default None)
Also runs `tl.layers.initialize_global_variables(sess)` internally in fit() to setup the summary nodes.
tensorboard_epoch_freq : int
How many epochs between storing tensorboard checkpoint for visualization to log/ directory (default 5).
tensorboard_weight_histograms : boolean
If True updates tensorboard data in the logs/ directory for visualization
of the weight histograms every tensorboard_epoch_freq epoch (default True).
tensorboard_graph_vis : boolean
If True stores the graph in the tensorboard summaries saved to log/ (default True).
Examples
--------
See `tutorial_mnist_simple.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py>`_
>>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,
... acc=acc, batch_size=500, n_epoch=200, print_freq=5,
... X_val=X_val, y_val=y_val, eval_train=False)
>>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,
... acc=acc, batch_size=500, n_epoch=200, print_freq=5,
... X_val=X_val, y_val=y_val, eval_train=False,
... tensorboard=True, tensorboard_weight_histograms=True, tensorboard_graph_vis=True)
Notes
--------
If tensorboard_dir not None, the `global_variables_initializer` will be run inside the fit function
in order to initialize the automatically generated summary nodes used for tensorboard visualization,
thus `tf.global_variables_initializer().run()` before the `fit()` call will be undefined.
"""
if X_train.shape[0] < batch_size:
raise AssertionError("Number of training examples should be bigger than the batch size")
if tensorboard_dir is not None:
tl.logging.info("Setting up tensorboard ...")
#Set up tensorboard summaries and saver
tl.files.exists_or_mkdir(tensorboard_dir)
#Only write summaries for more recent TensorFlow versions
if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
if tensorboard_graph_vis:
train_writer = tf.summary.FileWriter(tensorboard_dir + '/train', sess.graph)
val_writer = tf.summary.FileWriter(tensorboard_dir + '/validation', sess.graph)
else:
train_writer = tf.summary.FileWriter(tensorboard_dir + '/train')
val_writer = tf.summary.FileWriter(tensorboard_dir + '/validation')
#Set up summary nodes
if (tensorboard_weight_histograms):
for param in network.all_params:
if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'):
tl.logging.info('Param name %s' % param.name)
tf.summary.histogram(param.name, param)
if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'):
tf.summary.scalar('cost', cost)
merged = tf.summary.merge_all()
#Initalize all variables and summaries
tl.layers.initialize_global_variables(sess)
tl.logging.info("Finished! use `tensorboard --logdir=%s/` to start tensorboard" % tensorboard_dir)
tl.logging.info("Start training the network ...")
start_time_begin = time.time()
tensorboard_train_index, tensorboard_val_index = 0, 0
for epoch in range(n_epoch):
start_time = time.time()
loss_ep = 0
n_step = 0
for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
feed_dict = {x: X_train_a, y_: y_train_a}
feed_dict.update(network.all_drop) # enable noise layers
loss, _ = sess.run([cost, train_op], feed_dict=feed_dict)
loss_ep += loss
n_step += 1
loss_ep = loss_ep / n_step
if tensorboard_dir is not None and hasattr(tf, 'summary'):
if epoch + 1 == 1 or (epoch + 1) % tensorboard_epoch_freq == 0:
for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {x: X_train_a, y_: y_train_a}
feed_dict.update(dp_dict)
result = sess.run(merged, feed_dict=feed_dict)
train_writer.add_summary(result, tensorboard_train_index)
tensorboard_train_index += 1
if (X_val is not None) and (y_val is not None):
for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True):
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {x: X_val_a, y_: y_val_a}
feed_dict.update(dp_dict)
result = sess.run(merged, feed_dict=feed_dict)
val_writer.add_summary(result, tensorboard_val_index)
tensorboard_val_index += 1
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
if (X_val is not None) and (y_val is not None):
tl.logging.info("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time))
if eval_train is True:
train_loss, train_acc, n_batch = 0, 0, 0
for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {x: X_train_a, y_: y_train_a}
feed_dict.update(dp_dict)
if acc is not None:
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
train_acc += ac
else:
err = sess.run(cost, feed_dict=feed_dict)
train_loss += err
n_batch += 1
tl.logging.info(" train loss: %f" % (train_loss / n_batch))
if acc is not None:
tl.logging.info(" train acc: %f" % (train_acc / n_batch))
val_loss, val_acc, n_batch = 0, 0, 0
for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True):
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {x: X_val_a, y_: y_val_a}
feed_dict.update(dp_dict)
if acc is not None:
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
val_acc += ac
else:
err = sess.run(cost, feed_dict=feed_dict)
val_loss += err
n_batch += 1
tl.logging.info(" val loss: %f" % (val_loss / n_batch))
if acc is not None:
tl.logging.info(" val acc: %f" % (val_acc / n_batch))
else:
tl.logging.info(
"Epoch %d of %d took %fs, loss %f" % (epoch + 1, n_epoch, time.time() - start_time, loss_ep)
)
tl.logging.info("Total training time: %fs" % (time.time() - start_time_begin)) | [
"def",
"fit",
"(",
"sess",
",",
"network",
",",
"train_op",
",",
"cost",
",",
"X_train",
",",
"y_train",
",",
"x",
",",
"y_",
",",
"acc",
"=",
"None",
",",
"batch_size",
"=",
"100",
",",
"n_epoch",
"=",
"100",
",",
"print_freq",
"=",
"5",
",",
"X_val",
"=",
"None",
",",
"y_val",
"=",
"None",
",",
"eval_train",
"=",
"True",
",",
"tensorboard_dir",
"=",
"None",
",",
"tensorboard_epoch_freq",
"=",
"5",
",",
"tensorboard_weight_histograms",
"=",
"True",
",",
"tensorboard_graph_vis",
"=",
"True",
")",
":",
"if",
"X_train",
".",
"shape",
"[",
"0",
"]",
"<",
"batch_size",
":",
"raise",
"AssertionError",
"(",
"\"Number of training examples should be bigger than the batch size\"",
")",
"if",
"tensorboard_dir",
"is",
"not",
"None",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Setting up tensorboard ...\"",
")",
"#Set up tensorboard summaries and saver",
"tl",
".",
"files",
".",
"exists_or_mkdir",
"(",
"tensorboard_dir",
")",
"#Only write summaries for more recent TensorFlow versions",
"if",
"hasattr",
"(",
"tf",
",",
"'summary'",
")",
"and",
"hasattr",
"(",
"tf",
".",
"summary",
",",
"'FileWriter'",
")",
":",
"if",
"tensorboard_graph_vis",
":",
"train_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"tensorboard_dir",
"+",
"'/train'",
",",
"sess",
".",
"graph",
")",
"val_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"tensorboard_dir",
"+",
"'/validation'",
",",
"sess",
".",
"graph",
")",
"else",
":",
"train_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"tensorboard_dir",
"+",
"'/train'",
")",
"val_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"tensorboard_dir",
"+",
"'/validation'",
")",
"#Set up summary nodes",
"if",
"(",
"tensorboard_weight_histograms",
")",
":",
"for",
"param",
"in",
"network",
".",
"all_params",
":",
"if",
"hasattr",
"(",
"tf",
",",
"'summary'",
")",
"and",
"hasattr",
"(",
"tf",
".",
"summary",
",",
"'histogram'",
")",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'Param name %s'",
"%",
"param",
".",
"name",
")",
"tf",
".",
"summary",
".",
"histogram",
"(",
"param",
".",
"name",
",",
"param",
")",
"if",
"hasattr",
"(",
"tf",
",",
"'summary'",
")",
"and",
"hasattr",
"(",
"tf",
".",
"summary",
",",
"'histogram'",
")",
":",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'cost'",
",",
"cost",
")",
"merged",
"=",
"tf",
".",
"summary",
".",
"merge_all",
"(",
")",
"#Initalize all variables and summaries",
"tl",
".",
"layers",
".",
"initialize_global_variables",
"(",
"sess",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Finished! use `tensorboard --logdir=%s/` to start tensorboard\"",
"%",
"tensorboard_dir",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Start training the network ...\"",
")",
"start_time_begin",
"=",
"time",
".",
"time",
"(",
")",
"tensorboard_train_index",
",",
"tensorboard_val_index",
"=",
"0",
",",
"0",
"for",
"epoch",
"in",
"range",
"(",
"n_epoch",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"loss_ep",
"=",
"0",
"n_step",
"=",
"0",
"for",
"X_train_a",
",",
"y_train_a",
"in",
"tl",
".",
"iterate",
".",
"minibatches",
"(",
"X_train",
",",
"y_train",
",",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
":",
"feed_dict",
"=",
"{",
"x",
":",
"X_train_a",
",",
"y_",
":",
"y_train_a",
"}",
"feed_dict",
".",
"update",
"(",
"network",
".",
"all_drop",
")",
"# enable noise layers",
"loss",
",",
"_",
"=",
"sess",
".",
"run",
"(",
"[",
"cost",
",",
"train_op",
"]",
",",
"feed_dict",
"=",
"feed_dict",
")",
"loss_ep",
"+=",
"loss",
"n_step",
"+=",
"1",
"loss_ep",
"=",
"loss_ep",
"/",
"n_step",
"if",
"tensorboard_dir",
"is",
"not",
"None",
"and",
"hasattr",
"(",
"tf",
",",
"'summary'",
")",
":",
"if",
"epoch",
"+",
"1",
"==",
"1",
"or",
"(",
"epoch",
"+",
"1",
")",
"%",
"tensorboard_epoch_freq",
"==",
"0",
":",
"for",
"X_train_a",
",",
"y_train_a",
"in",
"tl",
".",
"iterate",
".",
"minibatches",
"(",
"X_train",
",",
"y_train",
",",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
":",
"dp_dict",
"=",
"dict_to_one",
"(",
"network",
".",
"all_drop",
")",
"# disable noise layers",
"feed_dict",
"=",
"{",
"x",
":",
"X_train_a",
",",
"y_",
":",
"y_train_a",
"}",
"feed_dict",
".",
"update",
"(",
"dp_dict",
")",
"result",
"=",
"sess",
".",
"run",
"(",
"merged",
",",
"feed_dict",
"=",
"feed_dict",
")",
"train_writer",
".",
"add_summary",
"(",
"result",
",",
"tensorboard_train_index",
")",
"tensorboard_train_index",
"+=",
"1",
"if",
"(",
"X_val",
"is",
"not",
"None",
")",
"and",
"(",
"y_val",
"is",
"not",
"None",
")",
":",
"for",
"X_val_a",
",",
"y_val_a",
"in",
"tl",
".",
"iterate",
".",
"minibatches",
"(",
"X_val",
",",
"y_val",
",",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
":",
"dp_dict",
"=",
"dict_to_one",
"(",
"network",
".",
"all_drop",
")",
"# disable noise layers",
"feed_dict",
"=",
"{",
"x",
":",
"X_val_a",
",",
"y_",
":",
"y_val_a",
"}",
"feed_dict",
".",
"update",
"(",
"dp_dict",
")",
"result",
"=",
"sess",
".",
"run",
"(",
"merged",
",",
"feed_dict",
"=",
"feed_dict",
")",
"val_writer",
".",
"add_summary",
"(",
"result",
",",
"tensorboard_val_index",
")",
"tensorboard_val_index",
"+=",
"1",
"if",
"epoch",
"+",
"1",
"==",
"1",
"or",
"(",
"epoch",
"+",
"1",
")",
"%",
"print_freq",
"==",
"0",
":",
"if",
"(",
"X_val",
"is",
"not",
"None",
")",
"and",
"(",
"y_val",
"is",
"not",
"None",
")",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Epoch %d of %d took %fs\"",
"%",
"(",
"epoch",
"+",
"1",
",",
"n_epoch",
",",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
"if",
"eval_train",
"is",
"True",
":",
"train_loss",
",",
"train_acc",
",",
"n_batch",
"=",
"0",
",",
"0",
",",
"0",
"for",
"X_train_a",
",",
"y_train_a",
"in",
"tl",
".",
"iterate",
".",
"minibatches",
"(",
"X_train",
",",
"y_train",
",",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
":",
"dp_dict",
"=",
"dict_to_one",
"(",
"network",
".",
"all_drop",
")",
"# disable noise layers",
"feed_dict",
"=",
"{",
"x",
":",
"X_train_a",
",",
"y_",
":",
"y_train_a",
"}",
"feed_dict",
".",
"update",
"(",
"dp_dict",
")",
"if",
"acc",
"is",
"not",
"None",
":",
"err",
",",
"ac",
"=",
"sess",
".",
"run",
"(",
"[",
"cost",
",",
"acc",
"]",
",",
"feed_dict",
"=",
"feed_dict",
")",
"train_acc",
"+=",
"ac",
"else",
":",
"err",
"=",
"sess",
".",
"run",
"(",
"cost",
",",
"feed_dict",
"=",
"feed_dict",
")",
"train_loss",
"+=",
"err",
"n_batch",
"+=",
"1",
"tl",
".",
"logging",
".",
"info",
"(",
"\" train loss: %f\"",
"%",
"(",
"train_loss",
"/",
"n_batch",
")",
")",
"if",
"acc",
"is",
"not",
"None",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\" train acc: %f\"",
"%",
"(",
"train_acc",
"/",
"n_batch",
")",
")",
"val_loss",
",",
"val_acc",
",",
"n_batch",
"=",
"0",
",",
"0",
",",
"0",
"for",
"X_val_a",
",",
"y_val_a",
"in",
"tl",
".",
"iterate",
".",
"minibatches",
"(",
"X_val",
",",
"y_val",
",",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
":",
"dp_dict",
"=",
"dict_to_one",
"(",
"network",
".",
"all_drop",
")",
"# disable noise layers",
"feed_dict",
"=",
"{",
"x",
":",
"X_val_a",
",",
"y_",
":",
"y_val_a",
"}",
"feed_dict",
".",
"update",
"(",
"dp_dict",
")",
"if",
"acc",
"is",
"not",
"None",
":",
"err",
",",
"ac",
"=",
"sess",
".",
"run",
"(",
"[",
"cost",
",",
"acc",
"]",
",",
"feed_dict",
"=",
"feed_dict",
")",
"val_acc",
"+=",
"ac",
"else",
":",
"err",
"=",
"sess",
".",
"run",
"(",
"cost",
",",
"feed_dict",
"=",
"feed_dict",
")",
"val_loss",
"+=",
"err",
"n_batch",
"+=",
"1",
"tl",
".",
"logging",
".",
"info",
"(",
"\" val loss: %f\"",
"%",
"(",
"val_loss",
"/",
"n_batch",
")",
")",
"if",
"acc",
"is",
"not",
"None",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\" val acc: %f\"",
"%",
"(",
"val_acc",
"/",
"n_batch",
")",
")",
"else",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Epoch %d of %d took %fs, loss %f\"",
"%",
"(",
"epoch",
"+",
"1",
",",
"n_epoch",
",",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
",",
"loss_ep",
")",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Total training time: %fs\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time_begin",
")",
")"
] | Training a given non time-series network by the given cost function, training data, batch_size, n_epoch etc.
- MNIST example click `here <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py>`_.
- In order to control the training details, the authors HIGHLY recommend ``tl.iterate`` see two MNIST examples `1 <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py>`_, `2 <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py>`_.
Parameters
----------
sess : Session
TensorFlow Session.
network : TensorLayer layer
the network to be trained.
train_op : TensorFlow optimizer
The optimizer for training e.g. tf.train.AdamOptimizer.
X_train : numpy.array
The input of training data
y_train : numpy.array
The target of training data
x : placeholder
For inputs.
y_ : placeholder
For targets.
acc : TensorFlow expression or None
Metric for accuracy or others. If None, would not print the information.
batch_size : int
The batch size for training and evaluating.
n_epoch : int
The number of training epochs.
print_freq : int
Print the training information every ``print_freq`` epochs.
X_val : numpy.array or None
The input of validation data. If None, would not perform validation.
y_val : numpy.array or None
The target of validation data. If None, would not perform validation.
eval_train : boolean
Whether to evaluate the model during training.
If X_val and y_val are not None, it reflects whether to evaluate the model on training data.
tensorboard_dir : string
path to log dir, if set, summary data will be stored to the tensorboard_dir/ directory for visualization with tensorboard. (default None)
Also runs `tl.layers.initialize_global_variables(sess)` internally in fit() to setup the summary nodes.
tensorboard_epoch_freq : int
How many epochs between storing tensorboard checkpoint for visualization to log/ directory (default 5).
tensorboard_weight_histograms : boolean
If True updates tensorboard data in the logs/ directory for visualization
of the weight histograms every tensorboard_epoch_freq epoch (default True).
tensorboard_graph_vis : boolean
If True stores the graph in the tensorboard summaries saved to log/ (default True).
Examples
--------
See `tutorial_mnist_simple.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py>`_
>>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,
... acc=acc, batch_size=500, n_epoch=200, print_freq=5,
... X_val=X_val, y_val=y_val, eval_train=False)
>>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,
... acc=acc, batch_size=500, n_epoch=200, print_freq=5,
... X_val=X_val, y_val=y_val, eval_train=False,
... tensorboard=True, tensorboard_weight_histograms=True, tensorboard_graph_vis=True)
Notes
--------
If tensorboard_dir not None, the `global_variables_initializer` will be run inside the fit function
in order to initialize the automatically generated summary nodes used for tensorboard visualization,
thus `tf.global_variables_initializer().run()` before the `fit()` call will be undefined. | [
"Training",
"a",
"given",
"non",
"time",
"-",
"series",
"network",
"by",
"the",
"given",
"cost",
"function",
"training",
"data",
"batch_size",
"n_epoch",
"etc",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L42-L219 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | predict | def predict(sess, network, X, x, y_op, batch_size=None):
"""
Return the predict results of given non time-series network.
Parameters
----------
sess : Session
TensorFlow Session.
network : TensorLayer layer
The network.
X : numpy.array
The inputs.
x : placeholder
For inputs.
y_op : placeholder
The argmax expression of softmax outputs.
batch_size : int or None
The batch size for prediction, when dataset is large, we should use minibatche for prediction;
if dataset is small, we can set it to None.
Examples
--------
See `tutorial_mnist_simple.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py>`_
>>> y = network.outputs
>>> y_op = tf.argmax(tf.nn.softmax(y), 1)
>>> print(tl.utils.predict(sess, network, X_test, x, y_op))
"""
if batch_size is None:
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {
x: X,
}
feed_dict.update(dp_dict)
return sess.run(y_op, feed_dict=feed_dict)
else:
result = None
for X_a, _ in tl.iterate.minibatches(X, X, batch_size, shuffle=False):
dp_dict = dict_to_one(network.all_drop)
feed_dict = {
x: X_a,
}
feed_dict.update(dp_dict)
result_a = sess.run(y_op, feed_dict=feed_dict)
if result is None:
result = result_a
else:
result = np.concatenate((result, result_a))
if result is None:
if len(X) % batch_size != 0:
dp_dict = dict_to_one(network.all_drop)
feed_dict = {
x: X[-(len(X) % batch_size):, :],
}
feed_dict.update(dp_dict)
result_a = sess.run(y_op, feed_dict=feed_dict)
result = result_a
else:
if len(X) != len(result) and len(X) % batch_size != 0:
dp_dict = dict_to_one(network.all_drop)
feed_dict = {
x: X[-(len(X) % batch_size):, :],
}
feed_dict.update(dp_dict)
result_a = sess.run(y_op, feed_dict=feed_dict)
result = np.concatenate((result, result_a))
return result | python | def predict(sess, network, X, x, y_op, batch_size=None):
"""
Return the predict results of given non time-series network.
Parameters
----------
sess : Session
TensorFlow Session.
network : TensorLayer layer
The network.
X : numpy.array
The inputs.
x : placeholder
For inputs.
y_op : placeholder
The argmax expression of softmax outputs.
batch_size : int or None
The batch size for prediction, when dataset is large, we should use minibatche for prediction;
if dataset is small, we can set it to None.
Examples
--------
See `tutorial_mnist_simple.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py>`_
>>> y = network.outputs
>>> y_op = tf.argmax(tf.nn.softmax(y), 1)
>>> print(tl.utils.predict(sess, network, X_test, x, y_op))
"""
if batch_size is None:
dp_dict = dict_to_one(network.all_drop) # disable noise layers
feed_dict = {
x: X,
}
feed_dict.update(dp_dict)
return sess.run(y_op, feed_dict=feed_dict)
else:
result = None
for X_a, _ in tl.iterate.minibatches(X, X, batch_size, shuffle=False):
dp_dict = dict_to_one(network.all_drop)
feed_dict = {
x: X_a,
}
feed_dict.update(dp_dict)
result_a = sess.run(y_op, feed_dict=feed_dict)
if result is None:
result = result_a
else:
result = np.concatenate((result, result_a))
if result is None:
if len(X) % batch_size != 0:
dp_dict = dict_to_one(network.all_drop)
feed_dict = {
x: X[-(len(X) % batch_size):, :],
}
feed_dict.update(dp_dict)
result_a = sess.run(y_op, feed_dict=feed_dict)
result = result_a
else:
if len(X) != len(result) and len(X) % batch_size != 0:
dp_dict = dict_to_one(network.all_drop)
feed_dict = {
x: X[-(len(X) % batch_size):, :],
}
feed_dict.update(dp_dict)
result_a = sess.run(y_op, feed_dict=feed_dict)
result = np.concatenate((result, result_a))
return result | [
"def",
"predict",
"(",
"sess",
",",
"network",
",",
"X",
",",
"x",
",",
"y_op",
",",
"batch_size",
"=",
"None",
")",
":",
"if",
"batch_size",
"is",
"None",
":",
"dp_dict",
"=",
"dict_to_one",
"(",
"network",
".",
"all_drop",
")",
"# disable noise layers",
"feed_dict",
"=",
"{",
"x",
":",
"X",
",",
"}",
"feed_dict",
".",
"update",
"(",
"dp_dict",
")",
"return",
"sess",
".",
"run",
"(",
"y_op",
",",
"feed_dict",
"=",
"feed_dict",
")",
"else",
":",
"result",
"=",
"None",
"for",
"X_a",
",",
"_",
"in",
"tl",
".",
"iterate",
".",
"minibatches",
"(",
"X",
",",
"X",
",",
"batch_size",
",",
"shuffle",
"=",
"False",
")",
":",
"dp_dict",
"=",
"dict_to_one",
"(",
"network",
".",
"all_drop",
")",
"feed_dict",
"=",
"{",
"x",
":",
"X_a",
",",
"}",
"feed_dict",
".",
"update",
"(",
"dp_dict",
")",
"result_a",
"=",
"sess",
".",
"run",
"(",
"y_op",
",",
"feed_dict",
"=",
"feed_dict",
")",
"if",
"result",
"is",
"None",
":",
"result",
"=",
"result_a",
"else",
":",
"result",
"=",
"np",
".",
"concatenate",
"(",
"(",
"result",
",",
"result_a",
")",
")",
"if",
"result",
"is",
"None",
":",
"if",
"len",
"(",
"X",
")",
"%",
"batch_size",
"!=",
"0",
":",
"dp_dict",
"=",
"dict_to_one",
"(",
"network",
".",
"all_drop",
")",
"feed_dict",
"=",
"{",
"x",
":",
"X",
"[",
"-",
"(",
"len",
"(",
"X",
")",
"%",
"batch_size",
")",
":",
",",
":",
"]",
",",
"}",
"feed_dict",
".",
"update",
"(",
"dp_dict",
")",
"result_a",
"=",
"sess",
".",
"run",
"(",
"y_op",
",",
"feed_dict",
"=",
"feed_dict",
")",
"result",
"=",
"result_a",
"else",
":",
"if",
"len",
"(",
"X",
")",
"!=",
"len",
"(",
"result",
")",
"and",
"len",
"(",
"X",
")",
"%",
"batch_size",
"!=",
"0",
":",
"dp_dict",
"=",
"dict_to_one",
"(",
"network",
".",
"all_drop",
")",
"feed_dict",
"=",
"{",
"x",
":",
"X",
"[",
"-",
"(",
"len",
"(",
"X",
")",
"%",
"batch_size",
")",
":",
",",
":",
"]",
",",
"}",
"feed_dict",
".",
"update",
"(",
"dp_dict",
")",
"result_a",
"=",
"sess",
".",
"run",
"(",
"y_op",
",",
"feed_dict",
"=",
"feed_dict",
")",
"result",
"=",
"np",
".",
"concatenate",
"(",
"(",
"result",
",",
"result_a",
")",
")",
"return",
"result"
] | Return the predict results of given non time-series network.
Parameters
----------
sess : Session
TensorFlow Session.
network : TensorLayer layer
The network.
X : numpy.array
The inputs.
x : placeholder
For inputs.
y_op : placeholder
The argmax expression of softmax outputs.
batch_size : int or None
The batch size for prediction, when dataset is large, we should use minibatche for prediction;
if dataset is small, we can set it to None.
Examples
--------
See `tutorial_mnist_simple.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py>`_
>>> y = network.outputs
>>> y_op = tf.argmax(tf.nn.softmax(y), 1)
>>> print(tl.utils.predict(sess, network, X_test, x, y_op)) | [
"Return",
"the",
"predict",
"results",
"of",
"given",
"non",
"time",
"-",
"series",
"network",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L289-L356 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | evaluation | def evaluation(y_test=None, y_predict=None, n_classes=None):
"""
Input the predicted results, targets results and
the number of class, return the confusion matrix, F1-score of each class,
accuracy and macro F1-score.
Parameters
----------
y_test : list
The target results
y_predict : list
The predicted results
n_classes : int
The number of classes
Examples
--------
>>> c_mat, f1, acc, f1_macro = tl.utils.evaluation(y_test, y_predict, n_classes)
"""
c_mat = confusion_matrix(y_test, y_predict, labels=[x for x in range(n_classes)])
f1 = f1_score(y_test, y_predict, average=None, labels=[x for x in range(n_classes)])
f1_macro = f1_score(y_test, y_predict, average='macro')
acc = accuracy_score(y_test, y_predict)
tl.logging.info('confusion matrix: \n%s' % c_mat)
tl.logging.info('f1-score : %s' % f1)
tl.logging.info('f1-score(macro) : %f' % f1_macro) # same output with > f1_score(y_true, y_pred, average='macro')
tl.logging.info('accuracy-score : %f' % acc)
return c_mat, f1, acc, f1_macro | python | def evaluation(y_test=None, y_predict=None, n_classes=None):
"""
Input the predicted results, targets results and
the number of class, return the confusion matrix, F1-score of each class,
accuracy and macro F1-score.
Parameters
----------
y_test : list
The target results
y_predict : list
The predicted results
n_classes : int
The number of classes
Examples
--------
>>> c_mat, f1, acc, f1_macro = tl.utils.evaluation(y_test, y_predict, n_classes)
"""
c_mat = confusion_matrix(y_test, y_predict, labels=[x for x in range(n_classes)])
f1 = f1_score(y_test, y_predict, average=None, labels=[x for x in range(n_classes)])
f1_macro = f1_score(y_test, y_predict, average='macro')
acc = accuracy_score(y_test, y_predict)
tl.logging.info('confusion matrix: \n%s' % c_mat)
tl.logging.info('f1-score : %s' % f1)
tl.logging.info('f1-score(macro) : %f' % f1_macro) # same output with > f1_score(y_true, y_pred, average='macro')
tl.logging.info('accuracy-score : %f' % acc)
return c_mat, f1, acc, f1_macro | [
"def",
"evaluation",
"(",
"y_test",
"=",
"None",
",",
"y_predict",
"=",
"None",
",",
"n_classes",
"=",
"None",
")",
":",
"c_mat",
"=",
"confusion_matrix",
"(",
"y_test",
",",
"y_predict",
",",
"labels",
"=",
"[",
"x",
"for",
"x",
"in",
"range",
"(",
"n_classes",
")",
"]",
")",
"f1",
"=",
"f1_score",
"(",
"y_test",
",",
"y_predict",
",",
"average",
"=",
"None",
",",
"labels",
"=",
"[",
"x",
"for",
"x",
"in",
"range",
"(",
"n_classes",
")",
"]",
")",
"f1_macro",
"=",
"f1_score",
"(",
"y_test",
",",
"y_predict",
",",
"average",
"=",
"'macro'",
")",
"acc",
"=",
"accuracy_score",
"(",
"y_test",
",",
"y_predict",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"'confusion matrix: \\n%s'",
"%",
"c_mat",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"'f1-score : %s'",
"%",
"f1",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"'f1-score(macro) : %f'",
"%",
"f1_macro",
")",
"# same output with > f1_score(y_true, y_pred, average='macro')",
"tl",
".",
"logging",
".",
"info",
"(",
"'accuracy-score : %f'",
"%",
"acc",
")",
"return",
"c_mat",
",",
"f1",
",",
"acc",
",",
"f1_macro"
] | Input the predicted results, targets results and
the number of class, return the confusion matrix, F1-score of each class,
accuracy and macro F1-score.
Parameters
----------
y_test : list
The target results
y_predict : list
The predicted results
n_classes : int
The number of classes
Examples
--------
>>> c_mat, f1, acc, f1_macro = tl.utils.evaluation(y_test, y_predict, n_classes) | [
"Input",
"the",
"predicted",
"results",
"targets",
"results",
"and",
"the",
"number",
"of",
"class",
"return",
"the",
"confusion",
"matrix",
"F1",
"-",
"score",
"of",
"each",
"class",
"accuracy",
"and",
"macro",
"F1",
"-",
"score",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L360-L388 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | class_balancing_oversample | def class_balancing_oversample(X_train=None, y_train=None, printable=True):
"""Input the features and labels, return the features and labels after oversampling.
Parameters
----------
X_train : numpy.array
The inputs.
y_train : numpy.array
The targets.
Examples
--------
One X
>>> X_train, y_train = class_balancing_oversample(X_train, y_train, printable=True)
Two X
>>> X, y = tl.utils.class_balancing_oversample(X_train=np.hstack((X1, X2)), y_train=y, printable=False)
>>> X1 = X[:, 0:5]
>>> X2 = X[:, 5:]
"""
# ======== Classes balancing
if printable:
tl.logging.info("Classes balancing for training examples...")
c = Counter(y_train)
if printable:
tl.logging.info('the occurrence number of each stage: %s' % c.most_common())
tl.logging.info('the least stage is Label %s have %s instances' % c.most_common()[-1])
tl.logging.info('the most stage is Label %s have %s instances' % c.most_common(1)[0])
most_num = c.most_common(1)[0][1]
if printable:
tl.logging.info('most num is %d, all classes tend to be this num' % most_num)
locations = {}
number = {}
for lab, num in c.most_common(): # find the index from y_train
number[lab] = num
locations[lab] = np.where(np.array(y_train) == lab)[0]
if printable:
tl.logging.info('convert list(np.array) to dict format')
X = {} # convert list to dict
for lab, num in number.items():
X[lab] = X_train[locations[lab]]
# oversampling
if printable:
tl.logging.info('start oversampling')
for key in X:
temp = X[key]
while True:
if len(X[key]) >= most_num:
break
X[key] = np.vstack((X[key], temp))
if printable:
tl.logging.info('first features of label 0 > %d' % len(X[0][0]))
tl.logging.info('the occurrence num of each stage after oversampling')
for key in X:
tl.logging.info("%s %d" % (key, len(X[key])))
if printable:
tl.logging.info('make each stage have same num of instances')
for key in X:
X[key] = X[key][0:most_num, :]
tl.logging.info("%s %d" % (key, len(X[key])))
# convert dict to list
if printable:
tl.logging.info('convert from dict to list format')
y_train = []
X_train = np.empty(shape=(0, len(X[0][0])))
for key in X:
X_train = np.vstack((X_train, X[key]))
y_train.extend([key for i in range(len(X[key]))])
# tl.logging.info(len(X_train), len(y_train))
c = Counter(y_train)
if printable:
tl.logging.info('the occurrence number of each stage after oversampling: %s' % c.most_common())
# ================ End of Classes balancing
return X_train, y_train | python | def class_balancing_oversample(X_train=None, y_train=None, printable=True):
"""Input the features and labels, return the features and labels after oversampling.
Parameters
----------
X_train : numpy.array
The inputs.
y_train : numpy.array
The targets.
Examples
--------
One X
>>> X_train, y_train = class_balancing_oversample(X_train, y_train, printable=True)
Two X
>>> X, y = tl.utils.class_balancing_oversample(X_train=np.hstack((X1, X2)), y_train=y, printable=False)
>>> X1 = X[:, 0:5]
>>> X2 = X[:, 5:]
"""
# ======== Classes balancing
if printable:
tl.logging.info("Classes balancing for training examples...")
c = Counter(y_train)
if printable:
tl.logging.info('the occurrence number of each stage: %s' % c.most_common())
tl.logging.info('the least stage is Label %s have %s instances' % c.most_common()[-1])
tl.logging.info('the most stage is Label %s have %s instances' % c.most_common(1)[0])
most_num = c.most_common(1)[0][1]
if printable:
tl.logging.info('most num is %d, all classes tend to be this num' % most_num)
locations = {}
number = {}
for lab, num in c.most_common(): # find the index from y_train
number[lab] = num
locations[lab] = np.where(np.array(y_train) == lab)[0]
if printable:
tl.logging.info('convert list(np.array) to dict format')
X = {} # convert list to dict
for lab, num in number.items():
X[lab] = X_train[locations[lab]]
# oversampling
if printable:
tl.logging.info('start oversampling')
for key in X:
temp = X[key]
while True:
if len(X[key]) >= most_num:
break
X[key] = np.vstack((X[key], temp))
if printable:
tl.logging.info('first features of label 0 > %d' % len(X[0][0]))
tl.logging.info('the occurrence num of each stage after oversampling')
for key in X:
tl.logging.info("%s %d" % (key, len(X[key])))
if printable:
tl.logging.info('make each stage have same num of instances')
for key in X:
X[key] = X[key][0:most_num, :]
tl.logging.info("%s %d" % (key, len(X[key])))
# convert dict to list
if printable:
tl.logging.info('convert from dict to list format')
y_train = []
X_train = np.empty(shape=(0, len(X[0][0])))
for key in X:
X_train = np.vstack((X_train, X[key]))
y_train.extend([key for i in range(len(X[key]))])
# tl.logging.info(len(X_train), len(y_train))
c = Counter(y_train)
if printable:
tl.logging.info('the occurrence number of each stage after oversampling: %s' % c.most_common())
# ================ End of Classes balancing
return X_train, y_train | [
"def",
"class_balancing_oversample",
"(",
"X_train",
"=",
"None",
",",
"y_train",
"=",
"None",
",",
"printable",
"=",
"True",
")",
":",
"# ======== Classes balancing",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Classes balancing for training examples...\"",
")",
"c",
"=",
"Counter",
"(",
"y_train",
")",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'the occurrence number of each stage: %s'",
"%",
"c",
".",
"most_common",
"(",
")",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"'the least stage is Label %s have %s instances'",
"%",
"c",
".",
"most_common",
"(",
")",
"[",
"-",
"1",
"]",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"'the most stage is Label %s have %s instances'",
"%",
"c",
".",
"most_common",
"(",
"1",
")",
"[",
"0",
"]",
")",
"most_num",
"=",
"c",
".",
"most_common",
"(",
"1",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'most num is %d, all classes tend to be this num'",
"%",
"most_num",
")",
"locations",
"=",
"{",
"}",
"number",
"=",
"{",
"}",
"for",
"lab",
",",
"num",
"in",
"c",
".",
"most_common",
"(",
")",
":",
"# find the index from y_train",
"number",
"[",
"lab",
"]",
"=",
"num",
"locations",
"[",
"lab",
"]",
"=",
"np",
".",
"where",
"(",
"np",
".",
"array",
"(",
"y_train",
")",
"==",
"lab",
")",
"[",
"0",
"]",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'convert list(np.array) to dict format'",
")",
"X",
"=",
"{",
"}",
"# convert list to dict",
"for",
"lab",
",",
"num",
"in",
"number",
".",
"items",
"(",
")",
":",
"X",
"[",
"lab",
"]",
"=",
"X_train",
"[",
"locations",
"[",
"lab",
"]",
"]",
"# oversampling",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'start oversampling'",
")",
"for",
"key",
"in",
"X",
":",
"temp",
"=",
"X",
"[",
"key",
"]",
"while",
"True",
":",
"if",
"len",
"(",
"X",
"[",
"key",
"]",
")",
">=",
"most_num",
":",
"break",
"X",
"[",
"key",
"]",
"=",
"np",
".",
"vstack",
"(",
"(",
"X",
"[",
"key",
"]",
",",
"temp",
")",
")",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'first features of label 0 > %d'",
"%",
"len",
"(",
"X",
"[",
"0",
"]",
"[",
"0",
"]",
")",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"'the occurrence num of each stage after oversampling'",
")",
"for",
"key",
"in",
"X",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"%s %d\"",
"%",
"(",
"key",
",",
"len",
"(",
"X",
"[",
"key",
"]",
")",
")",
")",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'make each stage have same num of instances'",
")",
"for",
"key",
"in",
"X",
":",
"X",
"[",
"key",
"]",
"=",
"X",
"[",
"key",
"]",
"[",
"0",
":",
"most_num",
",",
":",
"]",
"tl",
".",
"logging",
".",
"info",
"(",
"\"%s %d\"",
"%",
"(",
"key",
",",
"len",
"(",
"X",
"[",
"key",
"]",
")",
")",
")",
"# convert dict to list",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'convert from dict to list format'",
")",
"y_train",
"=",
"[",
"]",
"X_train",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"0",
",",
"len",
"(",
"X",
"[",
"0",
"]",
"[",
"0",
"]",
")",
")",
")",
"for",
"key",
"in",
"X",
":",
"X_train",
"=",
"np",
".",
"vstack",
"(",
"(",
"X_train",
",",
"X",
"[",
"key",
"]",
")",
")",
"y_train",
".",
"extend",
"(",
"[",
"key",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"X",
"[",
"key",
"]",
")",
")",
"]",
")",
"# tl.logging.info(len(X_train), len(y_train))",
"c",
"=",
"Counter",
"(",
"y_train",
")",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'the occurrence number of each stage after oversampling: %s'",
"%",
"c",
".",
"most_common",
"(",
")",
")",
"# ================ End of Classes balancing",
"return",
"X_train",
",",
"y_train"
] | Input the features and labels, return the features and labels after oversampling.
Parameters
----------
X_train : numpy.array
The inputs.
y_train : numpy.array
The targets.
Examples
--------
One X
>>> X_train, y_train = class_balancing_oversample(X_train, y_train, printable=True)
Two X
>>> X, y = tl.utils.class_balancing_oversample(X_train=np.hstack((X1, X2)), y_train=y, printable=False)
>>> X1 = X[:, 0:5]
>>> X2 = X[:, 5:] | [
"Input",
"the",
"features",
"and",
"labels",
"return",
"the",
"features",
"and",
"labels",
"after",
"oversampling",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L427-L511 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | get_random_int | def get_random_int(min_v=0, max_v=10, number=5, seed=None):
"""Return a list of random integer by the given range and quantity.
Parameters
-----------
min_v : number
The minimum value.
max_v : number
The maximum value.
number : int
Number of value.
seed : int or None
The seed for random.
Examples
---------
>>> r = get_random_int(min_v=0, max_v=10, number=5)
[10, 2, 3, 3, 7]
"""
rnd = random.Random()
if seed:
rnd = random.Random(seed)
# return [random.randint(min,max) for p in range(0, number)]
return [rnd.randint(min_v, max_v) for p in range(0, number)] | python | def get_random_int(min_v=0, max_v=10, number=5, seed=None):
"""Return a list of random integer by the given range and quantity.
Parameters
-----------
min_v : number
The minimum value.
max_v : number
The maximum value.
number : int
Number of value.
seed : int or None
The seed for random.
Examples
---------
>>> r = get_random_int(min_v=0, max_v=10, number=5)
[10, 2, 3, 3, 7]
"""
rnd = random.Random()
if seed:
rnd = random.Random(seed)
# return [random.randint(min,max) for p in range(0, number)]
return [rnd.randint(min_v, max_v) for p in range(0, number)] | [
"def",
"get_random_int",
"(",
"min_v",
"=",
"0",
",",
"max_v",
"=",
"10",
",",
"number",
"=",
"5",
",",
"seed",
"=",
"None",
")",
":",
"rnd",
"=",
"random",
".",
"Random",
"(",
")",
"if",
"seed",
":",
"rnd",
"=",
"random",
".",
"Random",
"(",
"seed",
")",
"# return [random.randint(min,max) for p in range(0, number)]",
"return",
"[",
"rnd",
".",
"randint",
"(",
"min_v",
",",
"max_v",
")",
"for",
"p",
"in",
"range",
"(",
"0",
",",
"number",
")",
"]"
] | Return a list of random integer by the given range and quantity.
Parameters
-----------
min_v : number
The minimum value.
max_v : number
The maximum value.
number : int
Number of value.
seed : int or None
The seed for random.
Examples
---------
>>> r = get_random_int(min_v=0, max_v=10, number=5)
[10, 2, 3, 3, 7] | [
"Return",
"a",
"list",
"of",
"random",
"integer",
"by",
"the",
"given",
"range",
"and",
"quantity",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L515-L539 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | list_string_to_dict | def list_string_to_dict(string):
"""Inputs ``['a', 'b', 'c']``, returns ``{'a': 0, 'b': 1, 'c': 2}``."""
dictionary = {}
for idx, c in enumerate(string):
dictionary.update({c: idx})
return dictionary | python | def list_string_to_dict(string):
"""Inputs ``['a', 'b', 'c']``, returns ``{'a': 0, 'b': 1, 'c': 2}``."""
dictionary = {}
for idx, c in enumerate(string):
dictionary.update({c: idx})
return dictionary | [
"def",
"list_string_to_dict",
"(",
"string",
")",
":",
"dictionary",
"=",
"{",
"}",
"for",
"idx",
",",
"c",
"in",
"enumerate",
"(",
"string",
")",
":",
"dictionary",
".",
"update",
"(",
"{",
"c",
":",
"idx",
"}",
")",
"return",
"dictionary"
] | Inputs ``['a', 'b', 'c']``, returns ``{'a': 0, 'b': 1, 'c': 2}``. | [
"Inputs",
"[",
"a",
"b",
"c",
"]",
"returns",
"{",
"a",
":",
"0",
"b",
":",
"1",
"c",
":",
"2",
"}",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L542-L547 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | exit_tensorflow | def exit_tensorflow(sess=None, port=6006):
"""Close TensorFlow session, TensorBoard and Nvidia-process if available.
Parameters
----------
sess : Session
TensorFlow Session.
tb_port : int
TensorBoard port you want to close, `6006` as default.
"""
text = "[TL] Close tensorboard and nvidia-process if available"
text2 = "[TL] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on "
if sess is not None:
sess.close()
if _platform == "linux" or _platform == "linux2":
tl.logging.info('linux: %s' % text)
os.system('nvidia-smi')
os.system('fuser ' + port + '/tcp -k') # kill tensorboard 6006
os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process
_exit()
elif _platform == "darwin":
tl.logging.info('OS X: %s' % text)
subprocess.Popen(
"lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill", shell=True
) # kill tensorboard
elif _platform == "win32":
raise NotImplementedError("this function is not supported on the Windows platform")
else:
tl.logging.info(text2 + _platform) | python | def exit_tensorflow(sess=None, port=6006):
"""Close TensorFlow session, TensorBoard and Nvidia-process if available.
Parameters
----------
sess : Session
TensorFlow Session.
tb_port : int
TensorBoard port you want to close, `6006` as default.
"""
text = "[TL] Close tensorboard and nvidia-process if available"
text2 = "[TL] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on "
if sess is not None:
sess.close()
if _platform == "linux" or _platform == "linux2":
tl.logging.info('linux: %s' % text)
os.system('nvidia-smi')
os.system('fuser ' + port + '/tcp -k') # kill tensorboard 6006
os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process
_exit()
elif _platform == "darwin":
tl.logging.info('OS X: %s' % text)
subprocess.Popen(
"lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill", shell=True
) # kill tensorboard
elif _platform == "win32":
raise NotImplementedError("this function is not supported on the Windows platform")
else:
tl.logging.info(text2 + _platform) | [
"def",
"exit_tensorflow",
"(",
"sess",
"=",
"None",
",",
"port",
"=",
"6006",
")",
":",
"text",
"=",
"\"[TL] Close tensorboard and nvidia-process if available\"",
"text2",
"=",
"\"[TL] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on \"",
"if",
"sess",
"is",
"not",
"None",
":",
"sess",
".",
"close",
"(",
")",
"if",
"_platform",
"==",
"\"linux\"",
"or",
"_platform",
"==",
"\"linux2\"",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'linux: %s'",
"%",
"text",
")",
"os",
".",
"system",
"(",
"'nvidia-smi'",
")",
"os",
".",
"system",
"(",
"'fuser '",
"+",
"port",
"+",
"'/tcp -k'",
")",
"# kill tensorboard 6006",
"os",
".",
"system",
"(",
"\"nvidia-smi | grep python |awk '{print $3}'|xargs kill\"",
")",
"# kill all nvidia-smi python process",
"_exit",
"(",
")",
"elif",
"_platform",
"==",
"\"darwin\"",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'OS X: %s'",
"%",
"text",
")",
"subprocess",
".",
"Popen",
"(",
"\"lsof -i tcp:\"",
"+",
"str",
"(",
"port",
")",
"+",
"\" | grep -v PID | awk '{print $2}' | xargs kill\"",
",",
"shell",
"=",
"True",
")",
"# kill tensorboard",
"elif",
"_platform",
"==",
"\"win32\"",
":",
"raise",
"NotImplementedError",
"(",
"\"this function is not supported on the Windows platform\"",
")",
"else",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"text2",
"+",
"_platform",
")"
] | Close TensorFlow session, TensorBoard and Nvidia-process if available.
Parameters
----------
sess : Session
TensorFlow Session.
tb_port : int
TensorBoard port you want to close, `6006` as default. | [
"Close",
"TensorFlow",
"session",
"TensorBoard",
"and",
"Nvidia",
"-",
"process",
"if",
"available",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L550-L583 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | open_tensorboard | def open_tensorboard(log_dir='/tmp/tensorflow', port=6006):
"""Open Tensorboard.
Parameters
----------
log_dir : str
Directory where your tensorboard logs are saved
port : int
TensorBoard port you want to open, 6006 is tensorboard default
"""
text = "[TL] Open tensorboard, go to localhost:" + str(port) + " to access"
text2 = " not yet supported by this function (tl.ops.open_tb)"
if not tl.files.exists_or_mkdir(log_dir, verbose=False):
tl.logging.info("[TL] Log reportory was created at %s" % log_dir)
if _platform == "linux" or _platform == "linux2":
raise NotImplementedError()
elif _platform == "darwin":
tl.logging.info('OS X: %s' % text)
subprocess.Popen(
sys.prefix + " | python -m tensorflow.tensorboard --logdir=" + log_dir + " --port=" + str(port), shell=True
) # open tensorboard in localhost:6006/ or whatever port you chose
elif _platform == "win32":
raise NotImplementedError("this function is not supported on the Windows platform")
else:
tl.logging.info(_platform + text2) | python | def open_tensorboard(log_dir='/tmp/tensorflow', port=6006):
"""Open Tensorboard.
Parameters
----------
log_dir : str
Directory where your tensorboard logs are saved
port : int
TensorBoard port you want to open, 6006 is tensorboard default
"""
text = "[TL] Open tensorboard, go to localhost:" + str(port) + " to access"
text2 = " not yet supported by this function (tl.ops.open_tb)"
if not tl.files.exists_or_mkdir(log_dir, verbose=False):
tl.logging.info("[TL] Log reportory was created at %s" % log_dir)
if _platform == "linux" or _platform == "linux2":
raise NotImplementedError()
elif _platform == "darwin":
tl.logging.info('OS X: %s' % text)
subprocess.Popen(
sys.prefix + " | python -m tensorflow.tensorboard --logdir=" + log_dir + " --port=" + str(port), shell=True
) # open tensorboard in localhost:6006/ or whatever port you chose
elif _platform == "win32":
raise NotImplementedError("this function is not supported on the Windows platform")
else:
tl.logging.info(_platform + text2) | [
"def",
"open_tensorboard",
"(",
"log_dir",
"=",
"'/tmp/tensorflow'",
",",
"port",
"=",
"6006",
")",
":",
"text",
"=",
"\"[TL] Open tensorboard, go to localhost:\"",
"+",
"str",
"(",
"port",
")",
"+",
"\" to access\"",
"text2",
"=",
"\" not yet supported by this function (tl.ops.open_tb)\"",
"if",
"not",
"tl",
".",
"files",
".",
"exists_or_mkdir",
"(",
"log_dir",
",",
"verbose",
"=",
"False",
")",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"[TL] Log reportory was created at %s\"",
"%",
"log_dir",
")",
"if",
"_platform",
"==",
"\"linux\"",
"or",
"_platform",
"==",
"\"linux2\"",
":",
"raise",
"NotImplementedError",
"(",
")",
"elif",
"_platform",
"==",
"\"darwin\"",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'OS X: %s'",
"%",
"text",
")",
"subprocess",
".",
"Popen",
"(",
"sys",
".",
"prefix",
"+",
"\" | python -m tensorflow.tensorboard --logdir=\"",
"+",
"log_dir",
"+",
"\" --port=\"",
"+",
"str",
"(",
"port",
")",
",",
"shell",
"=",
"True",
")",
"# open tensorboard in localhost:6006/ or whatever port you chose",
"elif",
"_platform",
"==",
"\"win32\"",
":",
"raise",
"NotImplementedError",
"(",
"\"this function is not supported on the Windows platform\"",
")",
"else",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"_platform",
"+",
"text2",
")"
] | Open Tensorboard.
Parameters
----------
log_dir : str
Directory where your tensorboard logs are saved
port : int
TensorBoard port you want to open, 6006 is tensorboard default | [
"Open",
"Tensorboard",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L586-L613 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | clear_all_placeholder_variables | def clear_all_placeholder_variables(printable=True):
"""Clears all the placeholder variables of keep prob,
including keeping probabilities of all dropout, denoising, dropconnect etc.
Parameters
----------
printable : boolean
If True, print all deleted variables.
"""
tl.logging.info('clear all .....................................')
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
if 'class' in str(globals()[var]): continue
if printable:
tl.logging.info(" clear_all ------- %s" % str(globals()[var]))
del globals()[var] | python | def clear_all_placeholder_variables(printable=True):
"""Clears all the placeholder variables of keep prob,
including keeping probabilities of all dropout, denoising, dropconnect etc.
Parameters
----------
printable : boolean
If True, print all deleted variables.
"""
tl.logging.info('clear all .....................................')
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
if 'class' in str(globals()[var]): continue
if printable:
tl.logging.info(" clear_all ------- %s" % str(globals()[var]))
del globals()[var] | [
"def",
"clear_all_placeholder_variables",
"(",
"printable",
"=",
"True",
")",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'clear all .....................................'",
")",
"gl",
"=",
"globals",
"(",
")",
".",
"copy",
"(",
")",
"for",
"var",
"in",
"gl",
":",
"if",
"var",
"[",
"0",
"]",
"==",
"'_'",
":",
"continue",
"if",
"'func'",
"in",
"str",
"(",
"globals",
"(",
")",
"[",
"var",
"]",
")",
":",
"continue",
"if",
"'module'",
"in",
"str",
"(",
"globals",
"(",
")",
"[",
"var",
"]",
")",
":",
"continue",
"if",
"'class'",
"in",
"str",
"(",
"globals",
"(",
")",
"[",
"var",
"]",
")",
":",
"continue",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\" clear_all ------- %s\"",
"%",
"str",
"(",
"globals",
"(",
")",
"[",
"var",
"]",
")",
")",
"del",
"globals",
"(",
")",
"[",
"var",
"]"
] | Clears all the placeholder variables of keep prob,
including keeping probabilities of all dropout, denoising, dropconnect etc.
Parameters
----------
printable : boolean
If True, print all deleted variables. | [
"Clears",
"all",
"the",
"placeholder",
"variables",
"of",
"keep",
"prob",
"including",
"keeping",
"probabilities",
"of",
"all",
"dropout",
"denoising",
"dropconnect",
"etc",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L616-L637 | valid |
tensorlayer/tensorlayer | tensorlayer/utils.py | set_gpu_fraction | def set_gpu_fraction(gpu_fraction=0.3):
"""Set the GPU memory fraction for the application.
Parameters
----------
gpu_fraction : float
Fraction of GPU memory, (0 ~ 1]
References
----------
- `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`__
"""
tl.logging.info("[TL]: GPU MEM Fraction %f" % gpu_fraction)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
return sess | python | def set_gpu_fraction(gpu_fraction=0.3):
"""Set the GPU memory fraction for the application.
Parameters
----------
gpu_fraction : float
Fraction of GPU memory, (0 ~ 1]
References
----------
- `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`__
"""
tl.logging.info("[TL]: GPU MEM Fraction %f" % gpu_fraction)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
return sess | [
"def",
"set_gpu_fraction",
"(",
"gpu_fraction",
"=",
"0.3",
")",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"[TL]: GPU MEM Fraction %f\"",
"%",
"gpu_fraction",
")",
"gpu_options",
"=",
"tf",
".",
"GPUOptions",
"(",
"per_process_gpu_memory_fraction",
"=",
"gpu_fraction",
")",
"sess",
"=",
"tf",
".",
"Session",
"(",
"config",
"=",
"tf",
".",
"ConfigProto",
"(",
"gpu_options",
"=",
"gpu_options",
")",
")",
"return",
"sess"
] | Set the GPU memory fraction for the application.
Parameters
----------
gpu_fraction : float
Fraction of GPU memory, (0 ~ 1]
References
----------
- `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`__ | [
"Set",
"the",
"GPU",
"memory",
"fraction",
"for",
"the",
"application",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/utils.py#L640-L656 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | generate_skip_gram_batch | def generate_skip_gram_batch(data, batch_size, num_skips, skip_window, data_index=0):
"""Generate a training batch for the Skip-Gram model.
See `Word2Vec example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_word2vec_basic.py>`__.
Parameters
----------
data : list of data
To present context, usually a list of integers.
batch_size : int
Batch size to return.
num_skips : int
How many times to reuse an input to generate a label.
skip_window : int
How many words to consider left and right.
data_index : int
Index of the context location. This code use `data_index` to instead of yield like ``tl.iterate``.
Returns
-------
batch : list of data
Inputs.
labels : list of data
Labels
data_index : int
Index of the context location.
Examples
--------
Setting num_skips=2, skip_window=1, use the right and left words.
In the same way, num_skips=4, skip_window=2 means use the nearby 4 words.
>>> data = [1,2,3,4,5,6,7,8,9,10,11]
>>> batch, labels, data_index = tl.nlp.generate_skip_gram_batch(data=data, batch_size=8, num_skips=2, skip_window=1, data_index=0)
>>> print(batch)
[2 2 3 3 4 4 5 5]
>>> print(labels)
[[3]
[1]
[4]
[2]
[5]
[3]
[4]
[6]]
"""
# global data_index # you can put data_index outside the function, then
# modify the global data_index in the function without return it.
# note: without using yield, this code use data_index to instead.
if batch_size % num_skips != 0:
raise Exception("batch_size should be able to be divided by num_skips.")
if num_skips > 2 * skip_window:
raise Exception("num_skips <= 2 * skip_window")
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels, data_index | python | def generate_skip_gram_batch(data, batch_size, num_skips, skip_window, data_index=0):
"""Generate a training batch for the Skip-Gram model.
See `Word2Vec example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_word2vec_basic.py>`__.
Parameters
----------
data : list of data
To present context, usually a list of integers.
batch_size : int
Batch size to return.
num_skips : int
How many times to reuse an input to generate a label.
skip_window : int
How many words to consider left and right.
data_index : int
Index of the context location. This code use `data_index` to instead of yield like ``tl.iterate``.
Returns
-------
batch : list of data
Inputs.
labels : list of data
Labels
data_index : int
Index of the context location.
Examples
--------
Setting num_skips=2, skip_window=1, use the right and left words.
In the same way, num_skips=4, skip_window=2 means use the nearby 4 words.
>>> data = [1,2,3,4,5,6,7,8,9,10,11]
>>> batch, labels, data_index = tl.nlp.generate_skip_gram_batch(data=data, batch_size=8, num_skips=2, skip_window=1, data_index=0)
>>> print(batch)
[2 2 3 3 4 4 5 5]
>>> print(labels)
[[3]
[1]
[4]
[2]
[5]
[3]
[4]
[6]]
"""
# global data_index # you can put data_index outside the function, then
# modify the global data_index in the function without return it.
# note: without using yield, this code use data_index to instead.
if batch_size % num_skips != 0:
raise Exception("batch_size should be able to be divided by num_skips.")
if num_skips > 2 * skip_window:
raise Exception("num_skips <= 2 * skip_window")
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels, data_index | [
"def",
"generate_skip_gram_batch",
"(",
"data",
",",
"batch_size",
",",
"num_skips",
",",
"skip_window",
",",
"data_index",
"=",
"0",
")",
":",
"# global data_index # you can put data_index outside the function, then",
"# modify the global data_index in the function without return it.",
"# note: without using yield, this code use data_index to instead.",
"if",
"batch_size",
"%",
"num_skips",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"\"batch_size should be able to be divided by num_skips.\"",
")",
"if",
"num_skips",
">",
"2",
"*",
"skip_window",
":",
"raise",
"Exception",
"(",
"\"num_skips <= 2 * skip_window\"",
")",
"batch",
"=",
"np",
".",
"ndarray",
"(",
"shape",
"=",
"(",
"batch_size",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"labels",
"=",
"np",
".",
"ndarray",
"(",
"shape",
"=",
"(",
"batch_size",
",",
"1",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"span",
"=",
"2",
"*",
"skip_window",
"+",
"1",
"# [ skip_window target skip_window ]",
"buffer",
"=",
"collections",
".",
"deque",
"(",
"maxlen",
"=",
"span",
")",
"for",
"_",
"in",
"range",
"(",
"span",
")",
":",
"buffer",
".",
"append",
"(",
"data",
"[",
"data_index",
"]",
")",
"data_index",
"=",
"(",
"data_index",
"+",
"1",
")",
"%",
"len",
"(",
"data",
")",
"for",
"i",
"in",
"range",
"(",
"batch_size",
"//",
"num_skips",
")",
":",
"target",
"=",
"skip_window",
"# target label at the center of the buffer",
"targets_to_avoid",
"=",
"[",
"skip_window",
"]",
"for",
"j",
"in",
"range",
"(",
"num_skips",
")",
":",
"while",
"target",
"in",
"targets_to_avoid",
":",
"target",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"span",
"-",
"1",
")",
"targets_to_avoid",
".",
"append",
"(",
"target",
")",
"batch",
"[",
"i",
"*",
"num_skips",
"+",
"j",
"]",
"=",
"buffer",
"[",
"skip_window",
"]",
"labels",
"[",
"i",
"*",
"num_skips",
"+",
"j",
",",
"0",
"]",
"=",
"buffer",
"[",
"target",
"]",
"buffer",
".",
"append",
"(",
"data",
"[",
"data_index",
"]",
")",
"data_index",
"=",
"(",
"data_index",
"+",
"1",
")",
"%",
"len",
"(",
"data",
")",
"return",
"batch",
",",
"labels",
",",
"data_index"
] | Generate a training batch for the Skip-Gram model.
See `Word2Vec example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_word2vec_basic.py>`__.
Parameters
----------
data : list of data
To present context, usually a list of integers.
batch_size : int
Batch size to return.
num_skips : int
How many times to reuse an input to generate a label.
skip_window : int
How many words to consider left and right.
data_index : int
Index of the context location. This code use `data_index` to instead of yield like ``tl.iterate``.
Returns
-------
batch : list of data
Inputs.
labels : list of data
Labels
data_index : int
Index of the context location.
Examples
--------
Setting num_skips=2, skip_window=1, use the right and left words.
In the same way, num_skips=4, skip_window=2 means use the nearby 4 words.
>>> data = [1,2,3,4,5,6,7,8,9,10,11]
>>> batch, labels, data_index = tl.nlp.generate_skip_gram_batch(data=data, batch_size=8, num_skips=2, skip_window=1, data_index=0)
>>> print(batch)
[2 2 3 3 4 4 5 5]
>>> print(labels)
[[3]
[1]
[4]
[2]
[5]
[3]
[4]
[6]] | [
"Generate",
"a",
"training",
"batch",
"for",
"the",
"Skip",
"-",
"Gram",
"model",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L52-L125 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | sample | def sample(a=None, temperature=1.0):
"""Sample an index from a probability array.
Parameters
----------
a : list of float
List of probabilities.
temperature : float or None
The higher the more uniform. When a = [0.1, 0.2, 0.7],
- temperature = 0.7, the distribution will be sharpen [0.05048273, 0.13588945, 0.81362782]
- temperature = 1.0, the distribution will be the same [0.1, 0.2, 0.7]
- temperature = 1.5, the distribution will be filtered [0.16008435, 0.25411807, 0.58579758]
- If None, it will be ``np.argmax(a)``
Notes
------
- No matter what is the temperature and input list, the sum of all probabilities will be one. Even if input list = [1, 100, 200], the sum of all probabilities will still be one.
- For large vocabulary size, choice a higher temperature or ``tl.nlp.sample_top`` to avoid error.
"""
if a is None:
raise Exception("a : list of float")
b = np.copy(a)
try:
if temperature == 1:
return np.argmax(np.random.multinomial(1, a, 1))
if temperature is None:
return np.argmax(a)
else:
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
except Exception:
# np.set_printoptions(threshold=np.nan)
# tl.logging.info(a)
# tl.logging.info(np.sum(a))
# tl.logging.info(np.max(a))
# tl.logging.info(np.min(a))
# exit()
message = "For large vocabulary_size, choice a higher temperature\
to avoid log error. Hint : use ``sample_top``. "
warnings.warn(message, Warning)
# tl.logging.info(a)
# tl.logging.info(b)
return np.argmax(np.random.multinomial(1, b, 1)) | python | def sample(a=None, temperature=1.0):
"""Sample an index from a probability array.
Parameters
----------
a : list of float
List of probabilities.
temperature : float or None
The higher the more uniform. When a = [0.1, 0.2, 0.7],
- temperature = 0.7, the distribution will be sharpen [0.05048273, 0.13588945, 0.81362782]
- temperature = 1.0, the distribution will be the same [0.1, 0.2, 0.7]
- temperature = 1.5, the distribution will be filtered [0.16008435, 0.25411807, 0.58579758]
- If None, it will be ``np.argmax(a)``
Notes
------
- No matter what is the temperature and input list, the sum of all probabilities will be one. Even if input list = [1, 100, 200], the sum of all probabilities will still be one.
- For large vocabulary size, choice a higher temperature or ``tl.nlp.sample_top`` to avoid error.
"""
if a is None:
raise Exception("a : list of float")
b = np.copy(a)
try:
if temperature == 1:
return np.argmax(np.random.multinomial(1, a, 1))
if temperature is None:
return np.argmax(a)
else:
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
except Exception:
# np.set_printoptions(threshold=np.nan)
# tl.logging.info(a)
# tl.logging.info(np.sum(a))
# tl.logging.info(np.max(a))
# tl.logging.info(np.min(a))
# exit()
message = "For large vocabulary_size, choice a higher temperature\
to avoid log error. Hint : use ``sample_top``. "
warnings.warn(message, Warning)
# tl.logging.info(a)
# tl.logging.info(b)
return np.argmax(np.random.multinomial(1, b, 1)) | [
"def",
"sample",
"(",
"a",
"=",
"None",
",",
"temperature",
"=",
"1.0",
")",
":",
"if",
"a",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"a : list of float\"",
")",
"b",
"=",
"np",
".",
"copy",
"(",
"a",
")",
"try",
":",
"if",
"temperature",
"==",
"1",
":",
"return",
"np",
".",
"argmax",
"(",
"np",
".",
"random",
".",
"multinomial",
"(",
"1",
",",
"a",
",",
"1",
")",
")",
"if",
"temperature",
"is",
"None",
":",
"return",
"np",
".",
"argmax",
"(",
"a",
")",
"else",
":",
"a",
"=",
"np",
".",
"log",
"(",
"a",
")",
"/",
"temperature",
"a",
"=",
"np",
".",
"exp",
"(",
"a",
")",
"/",
"np",
".",
"sum",
"(",
"np",
".",
"exp",
"(",
"a",
")",
")",
"return",
"np",
".",
"argmax",
"(",
"np",
".",
"random",
".",
"multinomial",
"(",
"1",
",",
"a",
",",
"1",
")",
")",
"except",
"Exception",
":",
"# np.set_printoptions(threshold=np.nan)",
"# tl.logging.info(a)",
"# tl.logging.info(np.sum(a))",
"# tl.logging.info(np.max(a))",
"# tl.logging.info(np.min(a))",
"# exit()",
"message",
"=",
"\"For large vocabulary_size, choice a higher temperature\\\n to avoid log error. Hint : use ``sample_top``. \"",
"warnings",
".",
"warn",
"(",
"message",
",",
"Warning",
")",
"# tl.logging.info(a)",
"# tl.logging.info(b)",
"return",
"np",
".",
"argmax",
"(",
"np",
".",
"random",
".",
"multinomial",
"(",
"1",
",",
"b",
",",
"1",
")",
")"
] | Sample an index from a probability array.
Parameters
----------
a : list of float
List of probabilities.
temperature : float or None
The higher the more uniform. When a = [0.1, 0.2, 0.7],
- temperature = 0.7, the distribution will be sharpen [0.05048273, 0.13588945, 0.81362782]
- temperature = 1.0, the distribution will be the same [0.1, 0.2, 0.7]
- temperature = 1.5, the distribution will be filtered [0.16008435, 0.25411807, 0.58579758]
- If None, it will be ``np.argmax(a)``
Notes
------
- No matter what is the temperature and input list, the sum of all probabilities will be one. Even if input list = [1, 100, 200], the sum of all probabilities will still be one.
- For large vocabulary size, choice a higher temperature or ``tl.nlp.sample_top`` to avoid error. | [
"Sample",
"an",
"index",
"from",
"a",
"probability",
"array",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L128-L173 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | sample_top | def sample_top(a=None, top_k=10):
"""Sample from ``top_k`` probabilities.
Parameters
----------
a : list of float
List of probabilities.
top_k : int
Number of candidates to be considered.
"""
if a is None:
a = []
idx = np.argpartition(a, -top_k)[-top_k:]
probs = a[idx]
# tl.logging.info("new %f" % probs)
probs = probs / np.sum(probs)
choice = np.random.choice(idx, p=probs)
return choice | python | def sample_top(a=None, top_k=10):
"""Sample from ``top_k`` probabilities.
Parameters
----------
a : list of float
List of probabilities.
top_k : int
Number of candidates to be considered.
"""
if a is None:
a = []
idx = np.argpartition(a, -top_k)[-top_k:]
probs = a[idx]
# tl.logging.info("new %f" % probs)
probs = probs / np.sum(probs)
choice = np.random.choice(idx, p=probs)
return choice | [
"def",
"sample_top",
"(",
"a",
"=",
"None",
",",
"top_k",
"=",
"10",
")",
":",
"if",
"a",
"is",
"None",
":",
"a",
"=",
"[",
"]",
"idx",
"=",
"np",
".",
"argpartition",
"(",
"a",
",",
"-",
"top_k",
")",
"[",
"-",
"top_k",
":",
"]",
"probs",
"=",
"a",
"[",
"idx",
"]",
"# tl.logging.info(\"new %f\" % probs)",
"probs",
"=",
"probs",
"/",
"np",
".",
"sum",
"(",
"probs",
")",
"choice",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"idx",
",",
"p",
"=",
"probs",
")",
"return",
"choice"
] | Sample from ``top_k`` probabilities.
Parameters
----------
a : list of float
List of probabilities.
top_k : int
Number of candidates to be considered. | [
"Sample",
"from",
"top_k",
"probabilities",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L176-L195 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | process_sentence | def process_sentence(sentence, start_word="<S>", end_word="</S>"):
"""Seperate a sentence string into a list of string words, add start_word and end_word,
see ``create_vocab()`` and ``tutorial_tfrecord3.py``.
Parameters
----------
sentence : str
A sentence.
start_word : str or None
The start word. If None, no start word will be appended.
end_word : str or None
The end word. If None, no end word will be appended.
Returns
---------
list of str
A list of strings that separated into words.
Examples
-----------
>>> c = "how are you?"
>>> c = tl.nlp.process_sentence(c)
>>> print(c)
['<S>', 'how', 'are', 'you', '?', '</S>']
Notes
-------
- You have to install the following package.
- `Installing NLTK <http://www.nltk.org/install.html>`__
- `Installing NLTK data <http://www.nltk.org/data.html>`__
"""
if start_word is not None:
process_sentence = [start_word]
else:
process_sentence = []
process_sentence.extend(nltk.tokenize.word_tokenize(sentence.lower()))
if end_word is not None:
process_sentence.append(end_word)
return process_sentence | python | def process_sentence(sentence, start_word="<S>", end_word="</S>"):
"""Seperate a sentence string into a list of string words, add start_word and end_word,
see ``create_vocab()`` and ``tutorial_tfrecord3.py``.
Parameters
----------
sentence : str
A sentence.
start_word : str or None
The start word. If None, no start word will be appended.
end_word : str or None
The end word. If None, no end word will be appended.
Returns
---------
list of str
A list of strings that separated into words.
Examples
-----------
>>> c = "how are you?"
>>> c = tl.nlp.process_sentence(c)
>>> print(c)
['<S>', 'how', 'are', 'you', '?', '</S>']
Notes
-------
- You have to install the following package.
- `Installing NLTK <http://www.nltk.org/install.html>`__
- `Installing NLTK data <http://www.nltk.org/data.html>`__
"""
if start_word is not None:
process_sentence = [start_word]
else:
process_sentence = []
process_sentence.extend(nltk.tokenize.word_tokenize(sentence.lower()))
if end_word is not None:
process_sentence.append(end_word)
return process_sentence | [
"def",
"process_sentence",
"(",
"sentence",
",",
"start_word",
"=",
"\"<S>\"",
",",
"end_word",
"=",
"\"</S>\"",
")",
":",
"if",
"start_word",
"is",
"not",
"None",
":",
"process_sentence",
"=",
"[",
"start_word",
"]",
"else",
":",
"process_sentence",
"=",
"[",
"]",
"process_sentence",
".",
"extend",
"(",
"nltk",
".",
"tokenize",
".",
"word_tokenize",
"(",
"sentence",
".",
"lower",
"(",
")",
")",
")",
"if",
"end_word",
"is",
"not",
"None",
":",
"process_sentence",
".",
"append",
"(",
"end_word",
")",
"return",
"process_sentence"
] | Seperate a sentence string into a list of string words, add start_word and end_word,
see ``create_vocab()`` and ``tutorial_tfrecord3.py``.
Parameters
----------
sentence : str
A sentence.
start_word : str or None
The start word. If None, no start word will be appended.
end_word : str or None
The end word. If None, no end word will be appended.
Returns
---------
list of str
A list of strings that separated into words.
Examples
-----------
>>> c = "how are you?"
>>> c = tl.nlp.process_sentence(c)
>>> print(c)
['<S>', 'how', 'are', 'you', '?', '</S>']
Notes
-------
- You have to install the following package.
- `Installing NLTK <http://www.nltk.org/install.html>`__
- `Installing NLTK data <http://www.nltk.org/data.html>`__ | [
"Seperate",
"a",
"sentence",
"string",
"into",
"a",
"list",
"of",
"string",
"words",
"add",
"start_word",
"and",
"end_word",
"see",
"create_vocab",
"()",
"and",
"tutorial_tfrecord3",
".",
"py",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L335-L375 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | create_vocab | def create_vocab(sentences, word_counts_output_file, min_word_count=1):
"""Creates the vocabulary of word to word_id.
See ``tutorial_tfrecord3.py``.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Parameters
------------
sentences : list of list of str
All sentences for creating the vocabulary.
word_counts_output_file : str
The file name.
min_word_count : int
Minimum number of occurrences for a word.
Returns
--------
:class:`SimpleVocabulary`
The simple vocabulary object, see :class:`Vocabulary` for more.
Examples
--------
Pre-process sentences
>>> captions = ["one two , three", "four five five"]
>>> processed_capts = []
>>> for c in captions:
>>> c = tl.nlp.process_sentence(c, start_word="<S>", end_word="</S>")
>>> processed_capts.append(c)
>>> print(processed_capts)
...[['<S>', 'one', 'two', ',', 'three', '</S>'], ['<S>', 'four', 'five', 'five', '</S>']]
Create vocabulary
>>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1)
Creating vocabulary.
Total words: 8
Words in vocabulary: 8
Wrote vocabulary file: vocab.txt
Get vocabulary object
>>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word="<S>", end_word="</S>", unk_word="<UNK>")
INFO:tensorflow:Initializing vocabulary from file: vocab.txt
[TL] Vocabulary from vocab.txt : <S> </S> <UNK>
vocabulary with 10 words (includes start_word, end_word, unk_word)
start_id: 2
end_id: 3
unk_id: 9
pad_id: 0
"""
tl.logging.info("Creating vocabulary.")
counter = Counter()
for c in sentences:
counter.update(c)
# tl.logging.info('c',c)
tl.logging.info(" Total words: %d" % len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
word_counts = [("<PAD>", 0)] + word_counts # 1st id should be reserved for padding
# tl.logging.info(word_counts)
tl.logging.info(" Words in vocabulary: %d" % len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
tl.logging.info(" Wrote vocabulary file: %s" % word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = SimpleVocabulary(vocab_dict, unk_id)
return vocab | python | def create_vocab(sentences, word_counts_output_file, min_word_count=1):
"""Creates the vocabulary of word to word_id.
See ``tutorial_tfrecord3.py``.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Parameters
------------
sentences : list of list of str
All sentences for creating the vocabulary.
word_counts_output_file : str
The file name.
min_word_count : int
Minimum number of occurrences for a word.
Returns
--------
:class:`SimpleVocabulary`
The simple vocabulary object, see :class:`Vocabulary` for more.
Examples
--------
Pre-process sentences
>>> captions = ["one two , three", "four five five"]
>>> processed_capts = []
>>> for c in captions:
>>> c = tl.nlp.process_sentence(c, start_word="<S>", end_word="</S>")
>>> processed_capts.append(c)
>>> print(processed_capts)
...[['<S>', 'one', 'two', ',', 'three', '</S>'], ['<S>', 'four', 'five', 'five', '</S>']]
Create vocabulary
>>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1)
Creating vocabulary.
Total words: 8
Words in vocabulary: 8
Wrote vocabulary file: vocab.txt
Get vocabulary object
>>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word="<S>", end_word="</S>", unk_word="<UNK>")
INFO:tensorflow:Initializing vocabulary from file: vocab.txt
[TL] Vocabulary from vocab.txt : <S> </S> <UNK>
vocabulary with 10 words (includes start_word, end_word, unk_word)
start_id: 2
end_id: 3
unk_id: 9
pad_id: 0
"""
tl.logging.info("Creating vocabulary.")
counter = Counter()
for c in sentences:
counter.update(c)
# tl.logging.info('c',c)
tl.logging.info(" Total words: %d" % len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
word_counts = [("<PAD>", 0)] + word_counts # 1st id should be reserved for padding
# tl.logging.info(word_counts)
tl.logging.info(" Words in vocabulary: %d" % len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
tl.logging.info(" Wrote vocabulary file: %s" % word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = SimpleVocabulary(vocab_dict, unk_id)
return vocab | [
"def",
"create_vocab",
"(",
"sentences",
",",
"word_counts_output_file",
",",
"min_word_count",
"=",
"1",
")",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Creating vocabulary.\"",
")",
"counter",
"=",
"Counter",
"(",
")",
"for",
"c",
"in",
"sentences",
":",
"counter",
".",
"update",
"(",
"c",
")",
"# tl.logging.info('c',c)",
"tl",
".",
"logging",
".",
"info",
"(",
"\" Total words: %d\"",
"%",
"len",
"(",
"counter",
")",
")",
"# Filter uncommon words and sort by descending count.",
"word_counts",
"=",
"[",
"x",
"for",
"x",
"in",
"counter",
".",
"items",
"(",
")",
"if",
"x",
"[",
"1",
"]",
">=",
"min_word_count",
"]",
"word_counts",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"word_counts",
"=",
"[",
"(",
"\"<PAD>\"",
",",
"0",
")",
"]",
"+",
"word_counts",
"# 1st id should be reserved for padding",
"# tl.logging.info(word_counts)",
"tl",
".",
"logging",
".",
"info",
"(",
"\" Words in vocabulary: %d\"",
"%",
"len",
"(",
"word_counts",
")",
")",
"# Write out the word counts file.",
"with",
"tf",
".",
"gfile",
".",
"FastGFile",
"(",
"word_counts_output_file",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"%s %d\"",
"%",
"(",
"w",
",",
"c",
")",
"for",
"w",
",",
"c",
"in",
"word_counts",
"]",
")",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"\" Wrote vocabulary file: %s\"",
"%",
"word_counts_output_file",
")",
"# Create the vocabulary dictionary.",
"reverse_vocab",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"word_counts",
"]",
"unk_id",
"=",
"len",
"(",
"reverse_vocab",
")",
"vocab_dict",
"=",
"dict",
"(",
"[",
"(",
"x",
",",
"y",
")",
"for",
"(",
"y",
",",
"x",
")",
"in",
"enumerate",
"(",
"reverse_vocab",
")",
"]",
")",
"vocab",
"=",
"SimpleVocabulary",
"(",
"vocab_dict",
",",
"unk_id",
")",
"return",
"vocab"
] | Creates the vocabulary of word to word_id.
See ``tutorial_tfrecord3.py``.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Parameters
------------
sentences : list of list of str
All sentences for creating the vocabulary.
word_counts_output_file : str
The file name.
min_word_count : int
Minimum number of occurrences for a word.
Returns
--------
:class:`SimpleVocabulary`
The simple vocabulary object, see :class:`Vocabulary` for more.
Examples
--------
Pre-process sentences
>>> captions = ["one two , three", "four five five"]
>>> processed_capts = []
>>> for c in captions:
>>> c = tl.nlp.process_sentence(c, start_word="<S>", end_word="</S>")
>>> processed_capts.append(c)
>>> print(processed_capts)
...[['<S>', 'one', 'two', ',', 'three', '</S>'], ['<S>', 'four', 'five', 'five', '</S>']]
Create vocabulary
>>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1)
Creating vocabulary.
Total words: 8
Words in vocabulary: 8
Wrote vocabulary file: vocab.txt
Get vocabulary object
>>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word="<S>", end_word="</S>", unk_word="<UNK>")
INFO:tensorflow:Initializing vocabulary from file: vocab.txt
[TL] Vocabulary from vocab.txt : <S> </S> <UNK>
vocabulary with 10 words (includes start_word, end_word, unk_word)
start_id: 2
end_id: 3
unk_id: 9
pad_id: 0 | [
"Creates",
"the",
"vocabulary",
"of",
"word",
"to",
"word_id",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L378-L459 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | read_words | def read_words(filename="nietzsche.txt", replace=None):
"""Read list format context from a file.
For customized read_words method, see ``tutorial_generate_text.py``.
Parameters
----------
filename : str
a file path.
replace : list of str
replace original string by target string.
Returns
-------
list of str
The context in a list (split using space).
"""
if replace is None:
replace = ['\n', '<eos>']
with tf.gfile.GFile(filename, "r") as f:
try: # python 3.4 or older
context_list = f.read().replace(*replace).split()
except Exception: # python 3.5
f.seek(0)
replace = [x.encode('utf-8') for x in replace]
context_list = f.read().replace(*replace).split()
return context_list | python | def read_words(filename="nietzsche.txt", replace=None):
"""Read list format context from a file.
For customized read_words method, see ``tutorial_generate_text.py``.
Parameters
----------
filename : str
a file path.
replace : list of str
replace original string by target string.
Returns
-------
list of str
The context in a list (split using space).
"""
if replace is None:
replace = ['\n', '<eos>']
with tf.gfile.GFile(filename, "r") as f:
try: # python 3.4 or older
context_list = f.read().replace(*replace).split()
except Exception: # python 3.5
f.seek(0)
replace = [x.encode('utf-8') for x in replace]
context_list = f.read().replace(*replace).split()
return context_list | [
"def",
"read_words",
"(",
"filename",
"=",
"\"nietzsche.txt\"",
",",
"replace",
"=",
"None",
")",
":",
"if",
"replace",
"is",
"None",
":",
"replace",
"=",
"[",
"'\\n'",
",",
"'<eos>'",
"]",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"f",
":",
"try",
":",
"# python 3.4 or older",
"context_list",
"=",
"f",
".",
"read",
"(",
")",
".",
"replace",
"(",
"*",
"replace",
")",
".",
"split",
"(",
")",
"except",
"Exception",
":",
"# python 3.5",
"f",
".",
"seek",
"(",
"0",
")",
"replace",
"=",
"[",
"x",
".",
"encode",
"(",
"'utf-8'",
")",
"for",
"x",
"in",
"replace",
"]",
"context_list",
"=",
"f",
".",
"read",
"(",
")",
".",
"replace",
"(",
"*",
"replace",
")",
".",
"split",
"(",
")",
"return",
"context_list"
] | Read list format context from a file.
For customized read_words method, see ``tutorial_generate_text.py``.
Parameters
----------
filename : str
a file path.
replace : list of str
replace original string by target string.
Returns
-------
list of str
The context in a list (split using space). | [
"Read",
"list",
"format",
"context",
"from",
"a",
"file",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L482-L509 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | read_analogies_file | def read_analogies_file(eval_file='questions-words.txt', word2id=None):
"""Reads through an analogy question file, return its id format.
Parameters
----------
eval_file : str
The file name.
word2id : dictionary
a dictionary that maps word to ID.
Returns
--------
numpy.array
A ``[n_examples, 4]`` numpy array containing the analogy question's word IDs.
Examples
---------
The file should be in this format
>>> : capital-common-countries
>>> Athens Greece Baghdad Iraq
>>> Athens Greece Bangkok Thailand
>>> Athens Greece Beijing China
>>> Athens Greece Berlin Germany
>>> Athens Greece Bern Switzerland
>>> Athens Greece Cairo Egypt
>>> Athens Greece Canberra Australia
>>> Athens Greece Hanoi Vietnam
>>> Athens Greece Havana Cuba
Get the tokenized analogy question data
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> analogy_questions = tl.nlp.read_analogies_file(eval_file='questions-words.txt', word2id=dictionary)
>>> print(analogy_questions)
[[ 3068 1248 7161 1581]
[ 3068 1248 28683 5642]
[ 3068 1248 3878 486]
...,
[ 1216 4309 19982 25506]
[ 1216 4309 3194 8650]
[ 1216 4309 140 312]]
"""
if word2id is None:
word2id = {}
questions = []
questions_skipped = 0
with open(eval_file, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ") # lowercase
ids = [word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
tl.logging.info("Eval analogy file: %s" % eval_file)
tl.logging.info("Questions: %d", len(questions))
tl.logging.info("Skipped: %d", questions_skipped)
analogy_questions = np.array(questions, dtype=np.int32)
return analogy_questions | python | def read_analogies_file(eval_file='questions-words.txt', word2id=None):
"""Reads through an analogy question file, return its id format.
Parameters
----------
eval_file : str
The file name.
word2id : dictionary
a dictionary that maps word to ID.
Returns
--------
numpy.array
A ``[n_examples, 4]`` numpy array containing the analogy question's word IDs.
Examples
---------
The file should be in this format
>>> : capital-common-countries
>>> Athens Greece Baghdad Iraq
>>> Athens Greece Bangkok Thailand
>>> Athens Greece Beijing China
>>> Athens Greece Berlin Germany
>>> Athens Greece Bern Switzerland
>>> Athens Greece Cairo Egypt
>>> Athens Greece Canberra Australia
>>> Athens Greece Hanoi Vietnam
>>> Athens Greece Havana Cuba
Get the tokenized analogy question data
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> analogy_questions = tl.nlp.read_analogies_file(eval_file='questions-words.txt', word2id=dictionary)
>>> print(analogy_questions)
[[ 3068 1248 7161 1581]
[ 3068 1248 28683 5642]
[ 3068 1248 3878 486]
...,
[ 1216 4309 19982 25506]
[ 1216 4309 3194 8650]
[ 1216 4309 140 312]]
"""
if word2id is None:
word2id = {}
questions = []
questions_skipped = 0
with open(eval_file, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ") # lowercase
ids = [word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
tl.logging.info("Eval analogy file: %s" % eval_file)
tl.logging.info("Questions: %d", len(questions))
tl.logging.info("Skipped: %d", questions_skipped)
analogy_questions = np.array(questions, dtype=np.int32)
return analogy_questions | [
"def",
"read_analogies_file",
"(",
"eval_file",
"=",
"'questions-words.txt'",
",",
"word2id",
"=",
"None",
")",
":",
"if",
"word2id",
"is",
"None",
":",
"word2id",
"=",
"{",
"}",
"questions",
"=",
"[",
"]",
"questions_skipped",
"=",
"0",
"with",
"open",
"(",
"eval_file",
",",
"\"rb\"",
")",
"as",
"analogy_f",
":",
"for",
"line",
"in",
"analogy_f",
":",
"if",
"line",
".",
"startswith",
"(",
"b\":\"",
")",
":",
"# Skip comments.",
"continue",
"words",
"=",
"line",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
".",
"split",
"(",
"b\" \"",
")",
"# lowercase",
"ids",
"=",
"[",
"word2id",
".",
"get",
"(",
"w",
".",
"strip",
"(",
")",
")",
"for",
"w",
"in",
"words",
"]",
"if",
"None",
"in",
"ids",
"or",
"len",
"(",
"ids",
")",
"!=",
"4",
":",
"questions_skipped",
"+=",
"1",
"else",
":",
"questions",
".",
"append",
"(",
"np",
".",
"array",
"(",
"ids",
")",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Eval analogy file: %s\"",
"%",
"eval_file",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Questions: %d\"",
",",
"len",
"(",
"questions",
")",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Skipped: %d\"",
",",
"questions_skipped",
")",
"analogy_questions",
"=",
"np",
".",
"array",
"(",
"questions",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"return",
"analogy_questions"
] | Reads through an analogy question file, return its id format.
Parameters
----------
eval_file : str
The file name.
word2id : dictionary
a dictionary that maps word to ID.
Returns
--------
numpy.array
A ``[n_examples, 4]`` numpy array containing the analogy question's word IDs.
Examples
---------
The file should be in this format
>>> : capital-common-countries
>>> Athens Greece Baghdad Iraq
>>> Athens Greece Bangkok Thailand
>>> Athens Greece Beijing China
>>> Athens Greece Berlin Germany
>>> Athens Greece Bern Switzerland
>>> Athens Greece Cairo Egypt
>>> Athens Greece Canberra Australia
>>> Athens Greece Hanoi Vietnam
>>> Athens Greece Havana Cuba
Get the tokenized analogy question data
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> analogy_questions = tl.nlp.read_analogies_file(eval_file='questions-words.txt', word2id=dictionary)
>>> print(analogy_questions)
[[ 3068 1248 7161 1581]
[ 3068 1248 28683 5642]
[ 3068 1248 3878 486]
...,
[ 1216 4309 19982 25506]
[ 1216 4309 3194 8650]
[ 1216 4309 140 312]] | [
"Reads",
"through",
"an",
"analogy",
"question",
"file",
"return",
"its",
"id",
"format",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L512-L576 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | build_reverse_dictionary | def build_reverse_dictionary(word_to_id):
"""Given a dictionary that maps word to integer id.
Returns a reverse dictionary that maps a id to word.
Parameters
----------
word_to_id : dictionary
that maps word to ID.
Returns
--------
dictionary
A dictionary that maps IDs to words.
"""
reverse_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))
return reverse_dictionary | python | def build_reverse_dictionary(word_to_id):
"""Given a dictionary that maps word to integer id.
Returns a reverse dictionary that maps a id to word.
Parameters
----------
word_to_id : dictionary
that maps word to ID.
Returns
--------
dictionary
A dictionary that maps IDs to words.
"""
reverse_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))
return reverse_dictionary | [
"def",
"build_reverse_dictionary",
"(",
"word_to_id",
")",
":",
"reverse_dictionary",
"=",
"dict",
"(",
"zip",
"(",
"word_to_id",
".",
"values",
"(",
")",
",",
"word_to_id",
".",
"keys",
"(",
")",
")",
")",
"return",
"reverse_dictionary"
] | Given a dictionary that maps word to integer id.
Returns a reverse dictionary that maps a id to word.
Parameters
----------
word_to_id : dictionary
that maps word to ID.
Returns
--------
dictionary
A dictionary that maps IDs to words. | [
"Given",
"a",
"dictionary",
"that",
"maps",
"word",
"to",
"integer",
"id",
".",
"Returns",
"a",
"reverse",
"dictionary",
"that",
"maps",
"a",
"id",
"to",
"word",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L619-L635 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | build_words_dataset | def build_words_dataset(words=None, vocabulary_size=50000, printable=True, unk_key='UNK'):
"""Build the words dictionary and replace rare words with 'UNK' token.
The most common word has the smallest integer id.
Parameters
----------
words : list of str or byte
The context in list format. You may need to do preprocessing on the words, such as lower case, remove marks etc.
vocabulary_size : int
The maximum vocabulary size, limiting the vocabulary size. Then the script replaces rare words with 'UNK' token.
printable : boolean
Whether to print the read vocabulary size of the given words.
unk_key : str
Represent the unknown words.
Returns
--------
data : list of int
The context in a list of ID.
count : list of tuple and list
Pair words and IDs.
- count[0] is a list : the number of rare words
- count[1:] are tuples : the number of occurrence of each word
- e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]
dictionary : dictionary
It is `word_to_id` that maps word to ID.
reverse_dictionary : a dictionary
It is `id_to_word` that maps ID to word.
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size)
References
-----------------
- `tensorflow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`__
"""
if words is None:
raise Exception("words : list of str or byte")
count = [[unk_key, -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
if printable:
tl.logging.info('Real vocabulary size %d' % len(collections.Counter(words).keys()))
tl.logging.info('Limited vocabulary size {}'.format(vocabulary_size))
if len(collections.Counter(words).keys()) < vocabulary_size:
raise Exception(
"len(collections.Counter(words).keys()) >= vocabulary_size , the limited vocabulary_size must be less than or equal to the read vocabulary_size"
)
return data, count, dictionary, reverse_dictionary | python | def build_words_dataset(words=None, vocabulary_size=50000, printable=True, unk_key='UNK'):
"""Build the words dictionary and replace rare words with 'UNK' token.
The most common word has the smallest integer id.
Parameters
----------
words : list of str or byte
The context in list format. You may need to do preprocessing on the words, such as lower case, remove marks etc.
vocabulary_size : int
The maximum vocabulary size, limiting the vocabulary size. Then the script replaces rare words with 'UNK' token.
printable : boolean
Whether to print the read vocabulary size of the given words.
unk_key : str
Represent the unknown words.
Returns
--------
data : list of int
The context in a list of ID.
count : list of tuple and list
Pair words and IDs.
- count[0] is a list : the number of rare words
- count[1:] are tuples : the number of occurrence of each word
- e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]
dictionary : dictionary
It is `word_to_id` that maps word to ID.
reverse_dictionary : a dictionary
It is `id_to_word` that maps ID to word.
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size)
References
-----------------
- `tensorflow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`__
"""
if words is None:
raise Exception("words : list of str or byte")
count = [[unk_key, -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
if printable:
tl.logging.info('Real vocabulary size %d' % len(collections.Counter(words).keys()))
tl.logging.info('Limited vocabulary size {}'.format(vocabulary_size))
if len(collections.Counter(words).keys()) < vocabulary_size:
raise Exception(
"len(collections.Counter(words).keys()) >= vocabulary_size , the limited vocabulary_size must be less than or equal to the read vocabulary_size"
)
return data, count, dictionary, reverse_dictionary | [
"def",
"build_words_dataset",
"(",
"words",
"=",
"None",
",",
"vocabulary_size",
"=",
"50000",
",",
"printable",
"=",
"True",
",",
"unk_key",
"=",
"'UNK'",
")",
":",
"if",
"words",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"words : list of str or byte\"",
")",
"count",
"=",
"[",
"[",
"unk_key",
",",
"-",
"1",
"]",
"]",
"count",
".",
"extend",
"(",
"collections",
".",
"Counter",
"(",
"words",
")",
".",
"most_common",
"(",
"vocabulary_size",
"-",
"1",
")",
")",
"dictionary",
"=",
"dict",
"(",
")",
"for",
"word",
",",
"_",
"in",
"count",
":",
"dictionary",
"[",
"word",
"]",
"=",
"len",
"(",
"dictionary",
")",
"data",
"=",
"list",
"(",
")",
"unk_count",
"=",
"0",
"for",
"word",
"in",
"words",
":",
"if",
"word",
"in",
"dictionary",
":",
"index",
"=",
"dictionary",
"[",
"word",
"]",
"else",
":",
"index",
"=",
"0",
"# dictionary['UNK']",
"unk_count",
"+=",
"1",
"data",
".",
"append",
"(",
"index",
")",
"count",
"[",
"0",
"]",
"[",
"1",
"]",
"=",
"unk_count",
"reverse_dictionary",
"=",
"dict",
"(",
"zip",
"(",
"dictionary",
".",
"values",
"(",
")",
",",
"dictionary",
".",
"keys",
"(",
")",
")",
")",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"'Real vocabulary size %d'",
"%",
"len",
"(",
"collections",
".",
"Counter",
"(",
"words",
")",
".",
"keys",
"(",
")",
")",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"'Limited vocabulary size {}'",
".",
"format",
"(",
"vocabulary_size",
")",
")",
"if",
"len",
"(",
"collections",
".",
"Counter",
"(",
"words",
")",
".",
"keys",
"(",
")",
")",
"<",
"vocabulary_size",
":",
"raise",
"Exception",
"(",
"\"len(collections.Counter(words).keys()) >= vocabulary_size , the limited vocabulary_size must be less than or equal to the read vocabulary_size\"",
")",
"return",
"data",
",",
"count",
",",
"dictionary",
",",
"reverse_dictionary"
] | Build the words dictionary and replace rare words with 'UNK' token.
The most common word has the smallest integer id.
Parameters
----------
words : list of str or byte
The context in list format. You may need to do preprocessing on the words, such as lower case, remove marks etc.
vocabulary_size : int
The maximum vocabulary size, limiting the vocabulary size. Then the script replaces rare words with 'UNK' token.
printable : boolean
Whether to print the read vocabulary size of the given words.
unk_key : str
Represent the unknown words.
Returns
--------
data : list of int
The context in a list of ID.
count : list of tuple and list
Pair words and IDs.
- count[0] is a list : the number of rare words
- count[1:] are tuples : the number of occurrence of each word
- e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]
dictionary : dictionary
It is `word_to_id` that maps word to ID.
reverse_dictionary : a dictionary
It is `id_to_word` that maps ID to word.
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size)
References
-----------------
- `tensorflow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`__ | [
"Build",
"the",
"words",
"dictionary",
"and",
"replace",
"rare",
"words",
"with",
"UNK",
"token",
".",
"The",
"most",
"common",
"word",
"has",
"the",
"smallest",
"integer",
"id",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L638-L704 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | words_to_word_ids | def words_to_word_ids(data=None, word_to_id=None, unk_key='UNK'):
"""Convert a list of string (words) to IDs.
Parameters
----------
data : list of string or byte
The context in list format
word_to_id : a dictionary
that maps word to ID.
unk_key : str
Represent the unknown words.
Returns
--------
list of int
A list of IDs to represent the context.
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> context = [b'hello', b'how', b'are', b'you']
>>> ids = tl.nlp.words_to_word_ids(words, dictionary)
>>> context = tl.nlp.word_ids_to_words(ids, reverse_dictionary)
>>> print(ids)
[6434, 311, 26, 207]
>>> print(context)
[b'hello', b'how', b'are', b'you']
References
---------------
- `tensorflow.models.rnn.ptb.reader <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/models/rnn/ptb>`__
"""
if data is None:
raise Exception("data : list of string or byte")
if word_to_id is None:
raise Exception("word_to_id : a dictionary")
# if isinstance(data[0], six.string_types):
# tl.logging.info(type(data[0]))
# # exit()
# tl.logging.info(data[0])
# tl.logging.info(word_to_id)
# return [word_to_id[str(word)] for word in data]
# else:
word_ids = []
for word in data:
if word_to_id.get(word) is not None:
word_ids.append(word_to_id[word])
else:
word_ids.append(word_to_id[unk_key])
return word_ids | python | def words_to_word_ids(data=None, word_to_id=None, unk_key='UNK'):
"""Convert a list of string (words) to IDs.
Parameters
----------
data : list of string or byte
The context in list format
word_to_id : a dictionary
that maps word to ID.
unk_key : str
Represent the unknown words.
Returns
--------
list of int
A list of IDs to represent the context.
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> context = [b'hello', b'how', b'are', b'you']
>>> ids = tl.nlp.words_to_word_ids(words, dictionary)
>>> context = tl.nlp.word_ids_to_words(ids, reverse_dictionary)
>>> print(ids)
[6434, 311, 26, 207]
>>> print(context)
[b'hello', b'how', b'are', b'you']
References
---------------
- `tensorflow.models.rnn.ptb.reader <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/models/rnn/ptb>`__
"""
if data is None:
raise Exception("data : list of string or byte")
if word_to_id is None:
raise Exception("word_to_id : a dictionary")
# if isinstance(data[0], six.string_types):
# tl.logging.info(type(data[0]))
# # exit()
# tl.logging.info(data[0])
# tl.logging.info(word_to_id)
# return [word_to_id[str(word)] for word in data]
# else:
word_ids = []
for word in data:
if word_to_id.get(word) is not None:
word_ids.append(word_to_id[word])
else:
word_ids.append(word_to_id[unk_key])
return word_ids | [
"def",
"words_to_word_ids",
"(",
"data",
"=",
"None",
",",
"word_to_id",
"=",
"None",
",",
"unk_key",
"=",
"'UNK'",
")",
":",
"if",
"data",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"data : list of string or byte\"",
")",
"if",
"word_to_id",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"word_to_id : a dictionary\"",
")",
"# if isinstance(data[0], six.string_types):",
"# tl.logging.info(type(data[0]))",
"# # exit()",
"# tl.logging.info(data[0])",
"# tl.logging.info(word_to_id)",
"# return [word_to_id[str(word)] for word in data]",
"# else:",
"word_ids",
"=",
"[",
"]",
"for",
"word",
"in",
"data",
":",
"if",
"word_to_id",
".",
"get",
"(",
"word",
")",
"is",
"not",
"None",
":",
"word_ids",
".",
"append",
"(",
"word_to_id",
"[",
"word",
"]",
")",
"else",
":",
"word_ids",
".",
"append",
"(",
"word_to_id",
"[",
"unk_key",
"]",
")",
"return",
"word_ids"
] | Convert a list of string (words) to IDs.
Parameters
----------
data : list of string or byte
The context in list format
word_to_id : a dictionary
that maps word to ID.
unk_key : str
Represent the unknown words.
Returns
--------
list of int
A list of IDs to represent the context.
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> context = [b'hello', b'how', b'are', b'you']
>>> ids = tl.nlp.words_to_word_ids(words, dictionary)
>>> context = tl.nlp.word_ids_to_words(ids, reverse_dictionary)
>>> print(ids)
[6434, 311, 26, 207]
>>> print(context)
[b'hello', b'how', b'are', b'you']
References
---------------
- `tensorflow.models.rnn.ptb.reader <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/models/rnn/ptb>`__ | [
"Convert",
"a",
"list",
"of",
"string",
"(",
"words",
")",
"to",
"IDs",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L707-L760 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | save_vocab | def save_vocab(count=None, name='vocab.txt'):
"""Save the vocabulary to a file so the model can be reloaded.
Parameters
----------
count : a list of tuple and list
count[0] is a list : the number of rare words,
count[1:] are tuples : the number of occurrence of each word,
e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]
Examples
---------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> tl.nlp.save_vocab(count, name='vocab_text8.txt')
>>> vocab_text8.txt
UNK 418391
the 1061396
of 593677
and 416629
one 411764
in 372201
a 325873
to 316376
"""
if count is None:
count = []
pwd = os.getcwd()
vocabulary_size = len(count)
with open(os.path.join(pwd, name), "w") as f:
for i in xrange(vocabulary_size):
f.write("%s %d\n" % (tf.compat.as_text(count[i][0]), count[i][1]))
tl.logging.info("%d vocab saved to %s in %s" % (vocabulary_size, name, pwd)) | python | def save_vocab(count=None, name='vocab.txt'):
"""Save the vocabulary to a file so the model can be reloaded.
Parameters
----------
count : a list of tuple and list
count[0] is a list : the number of rare words,
count[1:] are tuples : the number of occurrence of each word,
e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]
Examples
---------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> tl.nlp.save_vocab(count, name='vocab_text8.txt')
>>> vocab_text8.txt
UNK 418391
the 1061396
of 593677
and 416629
one 411764
in 372201
a 325873
to 316376
"""
if count is None:
count = []
pwd = os.getcwd()
vocabulary_size = len(count)
with open(os.path.join(pwd, name), "w") as f:
for i in xrange(vocabulary_size):
f.write("%s %d\n" % (tf.compat.as_text(count[i][0]), count[i][1]))
tl.logging.info("%d vocab saved to %s in %s" % (vocabulary_size, name, pwd)) | [
"def",
"save_vocab",
"(",
"count",
"=",
"None",
",",
"name",
"=",
"'vocab.txt'",
")",
":",
"if",
"count",
"is",
"None",
":",
"count",
"=",
"[",
"]",
"pwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"vocabulary_size",
"=",
"len",
"(",
"count",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pwd",
",",
"name",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"for",
"i",
"in",
"xrange",
"(",
"vocabulary_size",
")",
":",
"f",
".",
"write",
"(",
"\"%s %d\\n\"",
"%",
"(",
"tf",
".",
"compat",
".",
"as_text",
"(",
"count",
"[",
"i",
"]",
"[",
"0",
"]",
")",
",",
"count",
"[",
"i",
"]",
"[",
"1",
"]",
")",
")",
"tl",
".",
"logging",
".",
"info",
"(",
"\"%d vocab saved to %s in %s\"",
"%",
"(",
"vocabulary_size",
",",
"name",
",",
"pwd",
")",
")"
] | Save the vocabulary to a file so the model can be reloaded.
Parameters
----------
count : a list of tuple and list
count[0] is a list : the number of rare words,
count[1:] are tuples : the number of occurrence of each word,
e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]
Examples
---------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> tl.nlp.save_vocab(count, name='vocab_text8.txt')
>>> vocab_text8.txt
UNK 418391
the 1061396
of 593677
and 416629
one 411764
in 372201
a 325873
to 316376 | [
"Save",
"the",
"vocabulary",
"to",
"a",
"file",
"so",
"the",
"model",
"can",
"be",
"reloaded",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L795-L830 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | basic_tokenizer | def basic_tokenizer(sentence, _WORD_SPLIT=re.compile(b"([.,!?\"':;)(])")):
"""Very basic tokenizer: split the sentence into a list of tokens.
Parameters
-----------
sentence : tensorflow.python.platform.gfile.GFile Object
_WORD_SPLIT : regular expression for word spliting.
Examples
--------
>>> see create_vocabulary
>>> from tensorflow.python.platform import gfile
>>> train_path = "wmt/giga-fren.release2"
>>> with gfile.GFile(train_path + ".en", mode="rb") as f:
>>> for line in f:
>>> tokens = tl.nlp.basic_tokenizer(line)
>>> tl.logging.info(tokens)
>>> exit()
[b'Changing', b'Lives', b'|', b'Changing', b'Society', b'|', b'How',
b'It', b'Works', b'|', b'Technology', b'Drives', b'Change', b'Home',
b'|', b'Concepts', b'|', b'Teachers', b'|', b'Search', b'|', b'Overview',
b'|', b'Credits', b'|', b'HHCC', b'Web', b'|', b'Reference', b'|',
b'Feedback', b'Virtual', b'Museum', b'of', b'Canada', b'Home', b'Page']
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
words = []
sentence = tf.compat.as_bytes(sentence)
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w] | python | def basic_tokenizer(sentence, _WORD_SPLIT=re.compile(b"([.,!?\"':;)(])")):
"""Very basic tokenizer: split the sentence into a list of tokens.
Parameters
-----------
sentence : tensorflow.python.platform.gfile.GFile Object
_WORD_SPLIT : regular expression for word spliting.
Examples
--------
>>> see create_vocabulary
>>> from tensorflow.python.platform import gfile
>>> train_path = "wmt/giga-fren.release2"
>>> with gfile.GFile(train_path + ".en", mode="rb") as f:
>>> for line in f:
>>> tokens = tl.nlp.basic_tokenizer(line)
>>> tl.logging.info(tokens)
>>> exit()
[b'Changing', b'Lives', b'|', b'Changing', b'Society', b'|', b'How',
b'It', b'Works', b'|', b'Technology', b'Drives', b'Change', b'Home',
b'|', b'Concepts', b'|', b'Teachers', b'|', b'Search', b'|', b'Overview',
b'|', b'Credits', b'|', b'HHCC', b'Web', b'|', b'Reference', b'|',
b'Feedback', b'Virtual', b'Museum', b'of', b'Canada', b'Home', b'Page']
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
words = []
sentence = tf.compat.as_bytes(sentence)
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w] | [
"def",
"basic_tokenizer",
"(",
"sentence",
",",
"_WORD_SPLIT",
"=",
"re",
".",
"compile",
"(",
"b\"([.,!?\\\"':;)(])\"",
")",
")",
":",
"words",
"=",
"[",
"]",
"sentence",
"=",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"sentence",
")",
"for",
"space_separated_fragment",
"in",
"sentence",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
":",
"words",
".",
"extend",
"(",
"re",
".",
"split",
"(",
"_WORD_SPLIT",
",",
"space_separated_fragment",
")",
")",
"return",
"[",
"w",
"for",
"w",
"in",
"words",
"if",
"w",
"]"
] | Very basic tokenizer: split the sentence into a list of tokens.
Parameters
-----------
sentence : tensorflow.python.platform.gfile.GFile Object
_WORD_SPLIT : regular expression for word spliting.
Examples
--------
>>> see create_vocabulary
>>> from tensorflow.python.platform import gfile
>>> train_path = "wmt/giga-fren.release2"
>>> with gfile.GFile(train_path + ".en", mode="rb") as f:
>>> for line in f:
>>> tokens = tl.nlp.basic_tokenizer(line)
>>> tl.logging.info(tokens)
>>> exit()
[b'Changing', b'Lives', b'|', b'Changing', b'Society', b'|', b'How',
b'It', b'Works', b'|', b'Technology', b'Drives', b'Change', b'Home',
b'|', b'Concepts', b'|', b'Teachers', b'|', b'Search', b'|', b'Overview',
b'|', b'Credits', b'|', b'HHCC', b'Web', b'|', b'Reference', b'|',
b'Feedback', b'Virtual', b'Museum', b'of', b'Canada', b'Home', b'Page']
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py`` | [
"Very",
"basic",
"tokenizer",
":",
"split",
"the",
"sentence",
"into",
"a",
"list",
"of",
"tokens",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L836-L870 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | create_vocabulary | def create_vocabulary(
vocabulary_path, data_path, max_vocabulary_size, tokenizer=None, normalize_digits=True,
_DIGIT_RE=re.compile(br"\d"), _START_VOCAB=None
):
r"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Parameters
-----------
vocabulary_path : str
Path where the vocabulary will be created.
data_path : str
Data file that will be used to create vocabulary.
max_vocabulary_size : int
Limit on the size of the created vocabulary.
tokenizer : function
A function to use to tokenize each data sentence. If None, basic_tokenizer will be used.
normalize_digits : boolean
If true, all digits are replaced by `0`.
_DIGIT_RE : regular expression function
Default is ``re.compile(br"\d")``.
_START_VOCAB : list of str
The pad, go, eos and unk token, default is ``[b"_PAD", b"_GO", b"_EOS", b"_UNK"]``.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
if _START_VOCAB is None:
_START_VOCAB = [b"_PAD", b"_GO", b"_EOS", b"_UNK"]
if not gfile.Exists(vocabulary_path):
tl.logging.info("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
tl.logging.info(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
else:
tl.logging.info("Vocabulary %s from data %s exists" % (vocabulary_path, data_path)) | python | def create_vocabulary(
vocabulary_path, data_path, max_vocabulary_size, tokenizer=None, normalize_digits=True,
_DIGIT_RE=re.compile(br"\d"), _START_VOCAB=None
):
r"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Parameters
-----------
vocabulary_path : str
Path where the vocabulary will be created.
data_path : str
Data file that will be used to create vocabulary.
max_vocabulary_size : int
Limit on the size of the created vocabulary.
tokenizer : function
A function to use to tokenize each data sentence. If None, basic_tokenizer will be used.
normalize_digits : boolean
If true, all digits are replaced by `0`.
_DIGIT_RE : regular expression function
Default is ``re.compile(br"\d")``.
_START_VOCAB : list of str
The pad, go, eos and unk token, default is ``[b"_PAD", b"_GO", b"_EOS", b"_UNK"]``.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
if _START_VOCAB is None:
_START_VOCAB = [b"_PAD", b"_GO", b"_EOS", b"_UNK"]
if not gfile.Exists(vocabulary_path):
tl.logging.info("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
tl.logging.info(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
else:
tl.logging.info("Vocabulary %s from data %s exists" % (vocabulary_path, data_path)) | [
"def",
"create_vocabulary",
"(",
"vocabulary_path",
",",
"data_path",
",",
"max_vocabulary_size",
",",
"tokenizer",
"=",
"None",
",",
"normalize_digits",
"=",
"True",
",",
"_DIGIT_RE",
"=",
"re",
".",
"compile",
"(",
"br\"\\d\"",
")",
",",
"_START_VOCAB",
"=",
"None",
")",
":",
"if",
"_START_VOCAB",
"is",
"None",
":",
"_START_VOCAB",
"=",
"[",
"b\"_PAD\"",
",",
"b\"_GO\"",
",",
"b\"_EOS\"",
",",
"b\"_UNK\"",
"]",
"if",
"not",
"gfile",
".",
"Exists",
"(",
"vocabulary_path",
")",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Creating vocabulary %s from data %s\"",
"%",
"(",
"vocabulary_path",
",",
"data_path",
")",
")",
"vocab",
"=",
"{",
"}",
"with",
"gfile",
".",
"GFile",
"(",
"data_path",
",",
"mode",
"=",
"\"rb\"",
")",
"as",
"f",
":",
"counter",
"=",
"0",
"for",
"line",
"in",
"f",
":",
"counter",
"+=",
"1",
"if",
"counter",
"%",
"100000",
"==",
"0",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\" processing line %d\"",
"%",
"counter",
")",
"tokens",
"=",
"tokenizer",
"(",
"line",
")",
"if",
"tokenizer",
"else",
"basic_tokenizer",
"(",
"line",
")",
"for",
"w",
"in",
"tokens",
":",
"word",
"=",
"re",
".",
"sub",
"(",
"_DIGIT_RE",
",",
"b\"0\"",
",",
"w",
")",
"if",
"normalize_digits",
"else",
"w",
"if",
"word",
"in",
"vocab",
":",
"vocab",
"[",
"word",
"]",
"+=",
"1",
"else",
":",
"vocab",
"[",
"word",
"]",
"=",
"1",
"vocab_list",
"=",
"_START_VOCAB",
"+",
"sorted",
"(",
"vocab",
",",
"key",
"=",
"vocab",
".",
"get",
",",
"reverse",
"=",
"True",
")",
"if",
"len",
"(",
"vocab_list",
")",
">",
"max_vocabulary_size",
":",
"vocab_list",
"=",
"vocab_list",
"[",
":",
"max_vocabulary_size",
"]",
"with",
"gfile",
".",
"GFile",
"(",
"vocabulary_path",
",",
"mode",
"=",
"\"wb\"",
")",
"as",
"vocab_file",
":",
"for",
"w",
"in",
"vocab_list",
":",
"vocab_file",
".",
"write",
"(",
"w",
"+",
"b\"\\n\"",
")",
"else",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Vocabulary %s from data %s exists\"",
"%",
"(",
"vocabulary_path",
",",
"data_path",
")",
")"
] | r"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Parameters
-----------
vocabulary_path : str
Path where the vocabulary will be created.
data_path : str
Data file that will be used to create vocabulary.
max_vocabulary_size : int
Limit on the size of the created vocabulary.
tokenizer : function
A function to use to tokenize each data sentence. If None, basic_tokenizer will be used.
normalize_digits : boolean
If true, all digits are replaced by `0`.
_DIGIT_RE : regular expression function
Default is ``re.compile(br"\d")``.
_START_VOCAB : list of str
The pad, go, eos and unk token, default is ``[b"_PAD", b"_GO", b"_EOS", b"_UNK"]``.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py`` | [
"r",
"Create",
"vocabulary",
"file",
"(",
"if",
"it",
"does",
"not",
"exist",
"yet",
")",
"from",
"data",
"file",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L873-L932 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | initialize_vocabulary | def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file, return the `word_to_id` (dictionary)
and `id_to_word` (list).
We assume the vocabulary is stored one-item-per-line, so a file will result in a vocabulary {"dog": 0, "cat": 1}, and this function will also return the reversed-vocabulary ["dog", "cat"].
Parameters
-----------
vocabulary_path : str
Path to the file containing the vocabulary.
Returns
--------
vocab : dictionary
a dictionary that maps word to ID.
rev_vocab : list of int
a list that maps ID to word.
Examples
---------
>>> Assume 'test' contains
dog
cat
bird
>>> vocab, rev_vocab = tl.nlp.initialize_vocabulary("test")
>>> print(vocab)
>>> {b'cat': 1, b'dog': 0, b'bird': 2}
>>> print(rev_vocab)
>>> [b'dog', b'cat', b'bird']
Raises
-------
ValueError : if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path) | python | def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file, return the `word_to_id` (dictionary)
and `id_to_word` (list).
We assume the vocabulary is stored one-item-per-line, so a file will result in a vocabulary {"dog": 0, "cat": 1}, and this function will also return the reversed-vocabulary ["dog", "cat"].
Parameters
-----------
vocabulary_path : str
Path to the file containing the vocabulary.
Returns
--------
vocab : dictionary
a dictionary that maps word to ID.
rev_vocab : list of int
a list that maps ID to word.
Examples
---------
>>> Assume 'test' contains
dog
cat
bird
>>> vocab, rev_vocab = tl.nlp.initialize_vocabulary("test")
>>> print(vocab)
>>> {b'cat': 1, b'dog': 0, b'bird': 2}
>>> print(rev_vocab)
>>> [b'dog', b'cat', b'bird']
Raises
-------
ValueError : if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path) | [
"def",
"initialize_vocabulary",
"(",
"vocabulary_path",
")",
":",
"if",
"gfile",
".",
"Exists",
"(",
"vocabulary_path",
")",
":",
"rev_vocab",
"=",
"[",
"]",
"with",
"gfile",
".",
"GFile",
"(",
"vocabulary_path",
",",
"mode",
"=",
"\"rb\"",
")",
"as",
"f",
":",
"rev_vocab",
".",
"extend",
"(",
"f",
".",
"readlines",
"(",
")",
")",
"rev_vocab",
"=",
"[",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"line",
".",
"strip",
"(",
")",
")",
"for",
"line",
"in",
"rev_vocab",
"]",
"vocab",
"=",
"dict",
"(",
"[",
"(",
"x",
",",
"y",
")",
"for",
"(",
"y",
",",
"x",
")",
"in",
"enumerate",
"(",
"rev_vocab",
")",
"]",
")",
"return",
"vocab",
",",
"rev_vocab",
"else",
":",
"raise",
"ValueError",
"(",
"\"Vocabulary file %s not found.\"",
",",
"vocabulary_path",
")"
] | Initialize vocabulary from file, return the `word_to_id` (dictionary)
and `id_to_word` (list).
We assume the vocabulary is stored one-item-per-line, so a file will result in a vocabulary {"dog": 0, "cat": 1}, and this function will also return the reversed-vocabulary ["dog", "cat"].
Parameters
-----------
vocabulary_path : str
Path to the file containing the vocabulary.
Returns
--------
vocab : dictionary
a dictionary that maps word to ID.
rev_vocab : list of int
a list that maps ID to word.
Examples
---------
>>> Assume 'test' contains
dog
cat
bird
>>> vocab, rev_vocab = tl.nlp.initialize_vocabulary("test")
>>> print(vocab)
>>> {b'cat': 1, b'dog': 0, b'bird': 2}
>>> print(rev_vocab)
>>> [b'dog', b'cat', b'bird']
Raises
-------
ValueError : if the provided vocabulary_path does not exist. | [
"Initialize",
"vocabulary",
"from",
"file",
"return",
"the",
"word_to_id",
"(",
"dictionary",
")",
"and",
"id_to_word",
"(",
"list",
")",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L935-L978 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | sentence_to_token_ids | def sentence_to_token_ids(
sentence, vocabulary, tokenizer=None, normalize_digits=True, UNK_ID=3, _DIGIT_RE=re.compile(br"\d")
):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Parameters
-----------
sentence : tensorflow.python.platform.gfile.GFile Object
The sentence in bytes format to convert to token-ids, see ``basic_tokenizer()`` and ``data_to_token_ids()``.
vocabulary : dictionary
Mmapping tokens to integers.
tokenizer : function
A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.
normalize_digits : boolean
If true, all digits are replaced by 0.
Returns
--------
list of int
The token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(re.sub(_DIGIT_RE, b"0", w), UNK_ID) for w in words] | python | def sentence_to_token_ids(
sentence, vocabulary, tokenizer=None, normalize_digits=True, UNK_ID=3, _DIGIT_RE=re.compile(br"\d")
):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Parameters
-----------
sentence : tensorflow.python.platform.gfile.GFile Object
The sentence in bytes format to convert to token-ids, see ``basic_tokenizer()`` and ``data_to_token_ids()``.
vocabulary : dictionary
Mmapping tokens to integers.
tokenizer : function
A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.
normalize_digits : boolean
If true, all digits are replaced by 0.
Returns
--------
list of int
The token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(re.sub(_DIGIT_RE, b"0", w), UNK_ID) for w in words] | [
"def",
"sentence_to_token_ids",
"(",
"sentence",
",",
"vocabulary",
",",
"tokenizer",
"=",
"None",
",",
"normalize_digits",
"=",
"True",
",",
"UNK_ID",
"=",
"3",
",",
"_DIGIT_RE",
"=",
"re",
".",
"compile",
"(",
"br\"\\d\"",
")",
")",
":",
"if",
"tokenizer",
":",
"words",
"=",
"tokenizer",
"(",
"sentence",
")",
"else",
":",
"words",
"=",
"basic_tokenizer",
"(",
"sentence",
")",
"if",
"not",
"normalize_digits",
":",
"return",
"[",
"vocabulary",
".",
"get",
"(",
"w",
",",
"UNK_ID",
")",
"for",
"w",
"in",
"words",
"]",
"# Normalize digits by 0 before looking words up in the vocabulary.",
"return",
"[",
"vocabulary",
".",
"get",
"(",
"re",
".",
"sub",
"(",
"_DIGIT_RE",
",",
"b\"0\"",
",",
"w",
")",
",",
"UNK_ID",
")",
"for",
"w",
"in",
"words",
"]"
] | Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Parameters
-----------
sentence : tensorflow.python.platform.gfile.GFile Object
The sentence in bytes format to convert to token-ids, see ``basic_tokenizer()`` and ``data_to_token_ids()``.
vocabulary : dictionary
Mmapping tokens to integers.
tokenizer : function
A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.
normalize_digits : boolean
If true, all digits are replaced by 0.
Returns
--------
list of int
The token-ids for the sentence. | [
"Convert",
"a",
"string",
"to",
"list",
"of",
"integers",
"representing",
"token",
"-",
"ids",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L981-L1014 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | data_to_token_ids | def data_to_token_ids(
data_path, target_path, vocabulary_path, tokenizer=None, normalize_digits=True, UNK_ID=3,
_DIGIT_RE=re.compile(br"\d")
):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Parameters
-----------
data_path : str
Path to the data file in one-sentence-per-line format.
target_path : str
Path where the file with token-ids will be created.
vocabulary_path : str
Path to the vocabulary file.
tokenizer : function
A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.
normalize_digits : boolean
If true, all digits are replaced by 0.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
if not gfile.Exists(target_path):
tl.logging.info("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
tl.logging.info(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(
line, vocab, tokenizer, normalize_digits, UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE
)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
else:
tl.logging.info("Target path %s exists" % target_path) | python | def data_to_token_ids(
data_path, target_path, vocabulary_path, tokenizer=None, normalize_digits=True, UNK_ID=3,
_DIGIT_RE=re.compile(br"\d")
):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Parameters
-----------
data_path : str
Path to the data file in one-sentence-per-line format.
target_path : str
Path where the file with token-ids will be created.
vocabulary_path : str
Path to the vocabulary file.
tokenizer : function
A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.
normalize_digits : boolean
If true, all digits are replaced by 0.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
if not gfile.Exists(target_path):
tl.logging.info("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
tl.logging.info(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(
line, vocab, tokenizer, normalize_digits, UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE
)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
else:
tl.logging.info("Target path %s exists" % target_path) | [
"def",
"data_to_token_ids",
"(",
"data_path",
",",
"target_path",
",",
"vocabulary_path",
",",
"tokenizer",
"=",
"None",
",",
"normalize_digits",
"=",
"True",
",",
"UNK_ID",
"=",
"3",
",",
"_DIGIT_RE",
"=",
"re",
".",
"compile",
"(",
"br\"\\d\"",
")",
")",
":",
"if",
"not",
"gfile",
".",
"Exists",
"(",
"target_path",
")",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Tokenizing data in %s\"",
"%",
"data_path",
")",
"vocab",
",",
"_",
"=",
"initialize_vocabulary",
"(",
"vocabulary_path",
")",
"with",
"gfile",
".",
"GFile",
"(",
"data_path",
",",
"mode",
"=",
"\"rb\"",
")",
"as",
"data_file",
":",
"with",
"gfile",
".",
"GFile",
"(",
"target_path",
",",
"mode",
"=",
"\"w\"",
")",
"as",
"tokens_file",
":",
"counter",
"=",
"0",
"for",
"line",
"in",
"data_file",
":",
"counter",
"+=",
"1",
"if",
"counter",
"%",
"100000",
"==",
"0",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\" tokenizing line %d\"",
"%",
"counter",
")",
"token_ids",
"=",
"sentence_to_token_ids",
"(",
"line",
",",
"vocab",
",",
"tokenizer",
",",
"normalize_digits",
",",
"UNK_ID",
"=",
"UNK_ID",
",",
"_DIGIT_RE",
"=",
"_DIGIT_RE",
")",
"tokens_file",
".",
"write",
"(",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"tok",
")",
"for",
"tok",
"in",
"token_ids",
"]",
")",
"+",
"\"\\n\"",
")",
"else",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Target path %s exists\"",
"%",
"target_path",
")"
] | Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Parameters
-----------
data_path : str
Path to the data file in one-sentence-per-line format.
target_path : str
Path where the file with token-ids will be created.
vocabulary_path : str
Path to the vocabulary file.
tokenizer : function
A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.
normalize_digits : boolean
If true, all digits are replaced by 0.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py`` | [
"Tokenize",
"data",
"file",
"and",
"turn",
"into",
"token",
"-",
"ids",
"using",
"given",
"vocabulary",
"file",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L1017-L1060 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | moses_multi_bleu | def moses_multi_bleu(hypotheses, references, lowercase=False):
"""Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Parameters
------------
hypotheses : numpy.array.string
A numpy array of strings where each string is a single example.
references : numpy.array.string
A numpy array of strings where each string is a single example.
lowercase : boolean
If True, pass the "-lc" flag to the multi-bleu script
Examples
---------
>>> hypotheses = ["a bird is flying on the sky"]
>>> references = ["two birds are flying on the sky", "a bird is on the top of the tree", "an airplane is on the sky",]
>>> score = tl.nlp.moses_multi_bleu(hypotheses, references)
Returns
--------
float
The BLEU score
References
----------
- `Google/seq2seq/metric/bleu <https://github.com/google/seq2seq>`__
"""
if np.size(hypotheses) == 0:
return np.float32(0.0)
# Get MOSES multi-bleu script
try:
multi_bleu_path, _ = urllib.request.urlretrieve(
"https://raw.githubusercontent.com/moses-smt/mosesdecoder/"
"master/scripts/generic/multi-bleu.perl"
)
os.chmod(multi_bleu_path, 0o755)
except Exception: # pylint: disable=W0702
tl.logging.info("Unable to fetch multi-bleu.perl script, using local.")
metrics_dir = os.path.dirname(os.path.realpath(__file__))
bin_dir = os.path.abspath(os.path.join(metrics_dir, "..", "..", "bin"))
multi_bleu_path = os.path.join(bin_dir, "tools/multi-bleu.perl")
# Dump hypotheses and references to tempfiles
hypothesis_file = tempfile.NamedTemporaryFile()
hypothesis_file.write("\n".join(hypotheses).encode("utf-8"))
hypothesis_file.write(b"\n")
hypothesis_file.flush()
reference_file = tempfile.NamedTemporaryFile()
reference_file.write("\n".join(references).encode("utf-8"))
reference_file.write(b"\n")
reference_file.flush()
# Calculate BLEU using multi-bleu script
with open(hypothesis_file.name, "r") as read_pred:
bleu_cmd = [multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference_file.name]
try:
bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT)
bleu_out = bleu_out.decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
bleu_score = float(bleu_score)
except subprocess.CalledProcessError as error:
if error.output is not None:
tl.logging.warning("multi-bleu.perl script returned non-zero exit code")
tl.logging.warning(error.output)
bleu_score = np.float32(0.0)
# Close temp files
hypothesis_file.close()
reference_file.close()
return np.float32(bleu_score) | python | def moses_multi_bleu(hypotheses, references, lowercase=False):
"""Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Parameters
------------
hypotheses : numpy.array.string
A numpy array of strings where each string is a single example.
references : numpy.array.string
A numpy array of strings where each string is a single example.
lowercase : boolean
If True, pass the "-lc" flag to the multi-bleu script
Examples
---------
>>> hypotheses = ["a bird is flying on the sky"]
>>> references = ["two birds are flying on the sky", "a bird is on the top of the tree", "an airplane is on the sky",]
>>> score = tl.nlp.moses_multi_bleu(hypotheses, references)
Returns
--------
float
The BLEU score
References
----------
- `Google/seq2seq/metric/bleu <https://github.com/google/seq2seq>`__
"""
if np.size(hypotheses) == 0:
return np.float32(0.0)
# Get MOSES multi-bleu script
try:
multi_bleu_path, _ = urllib.request.urlretrieve(
"https://raw.githubusercontent.com/moses-smt/mosesdecoder/"
"master/scripts/generic/multi-bleu.perl"
)
os.chmod(multi_bleu_path, 0o755)
except Exception: # pylint: disable=W0702
tl.logging.info("Unable to fetch multi-bleu.perl script, using local.")
metrics_dir = os.path.dirname(os.path.realpath(__file__))
bin_dir = os.path.abspath(os.path.join(metrics_dir, "..", "..", "bin"))
multi_bleu_path = os.path.join(bin_dir, "tools/multi-bleu.perl")
# Dump hypotheses and references to tempfiles
hypothesis_file = tempfile.NamedTemporaryFile()
hypothesis_file.write("\n".join(hypotheses).encode("utf-8"))
hypothesis_file.write(b"\n")
hypothesis_file.flush()
reference_file = tempfile.NamedTemporaryFile()
reference_file.write("\n".join(references).encode("utf-8"))
reference_file.write(b"\n")
reference_file.flush()
# Calculate BLEU using multi-bleu script
with open(hypothesis_file.name, "r") as read_pred:
bleu_cmd = [multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference_file.name]
try:
bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT)
bleu_out = bleu_out.decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
bleu_score = float(bleu_score)
except subprocess.CalledProcessError as error:
if error.output is not None:
tl.logging.warning("multi-bleu.perl script returned non-zero exit code")
tl.logging.warning(error.output)
bleu_score = np.float32(0.0)
# Close temp files
hypothesis_file.close()
reference_file.close()
return np.float32(bleu_score) | [
"def",
"moses_multi_bleu",
"(",
"hypotheses",
",",
"references",
",",
"lowercase",
"=",
"False",
")",
":",
"if",
"np",
".",
"size",
"(",
"hypotheses",
")",
"==",
"0",
":",
"return",
"np",
".",
"float32",
"(",
"0.0",
")",
"# Get MOSES multi-bleu script",
"try",
":",
"multi_bleu_path",
",",
"_",
"=",
"urllib",
".",
"request",
".",
"urlretrieve",
"(",
"\"https://raw.githubusercontent.com/moses-smt/mosesdecoder/\"",
"\"master/scripts/generic/multi-bleu.perl\"",
")",
"os",
".",
"chmod",
"(",
"multi_bleu_path",
",",
"0o755",
")",
"except",
"Exception",
":",
"# pylint: disable=W0702",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Unable to fetch multi-bleu.perl script, using local.\"",
")",
"metrics_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"bin_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"metrics_dir",
",",
"\"..\"",
",",
"\"..\"",
",",
"\"bin\"",
")",
")",
"multi_bleu_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"bin_dir",
",",
"\"tools/multi-bleu.perl\"",
")",
"# Dump hypotheses and references to tempfiles",
"hypothesis_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"hypothesis_file",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"hypotheses",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"hypothesis_file",
".",
"write",
"(",
"b\"\\n\"",
")",
"hypothesis_file",
".",
"flush",
"(",
")",
"reference_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"reference_file",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"references",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"reference_file",
".",
"write",
"(",
"b\"\\n\"",
")",
"reference_file",
".",
"flush",
"(",
")",
"# Calculate BLEU using multi-bleu script",
"with",
"open",
"(",
"hypothesis_file",
".",
"name",
",",
"\"r\"",
")",
"as",
"read_pred",
":",
"bleu_cmd",
"=",
"[",
"multi_bleu_path",
"]",
"if",
"lowercase",
":",
"bleu_cmd",
"+=",
"[",
"\"-lc\"",
"]",
"bleu_cmd",
"+=",
"[",
"reference_file",
".",
"name",
"]",
"try",
":",
"bleu_out",
"=",
"subprocess",
".",
"check_output",
"(",
"bleu_cmd",
",",
"stdin",
"=",
"read_pred",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"bleu_out",
"=",
"bleu_out",
".",
"decode",
"(",
"\"utf-8\"",
")",
"bleu_score",
"=",
"re",
".",
"search",
"(",
"r\"BLEU = (.+?),\"",
",",
"bleu_out",
")",
".",
"group",
"(",
"1",
")",
"bleu_score",
"=",
"float",
"(",
"bleu_score",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"error",
":",
"if",
"error",
".",
"output",
"is",
"not",
"None",
":",
"tl",
".",
"logging",
".",
"warning",
"(",
"\"multi-bleu.perl script returned non-zero exit code\"",
")",
"tl",
".",
"logging",
".",
"warning",
"(",
"error",
".",
"output",
")",
"bleu_score",
"=",
"np",
".",
"float32",
"(",
"0.0",
")",
"# Close temp files",
"hypothesis_file",
".",
"close",
"(",
")",
"reference_file",
".",
"close",
"(",
")",
"return",
"np",
".",
"float32",
"(",
"bleu_score",
")"
] | Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Parameters
------------
hypotheses : numpy.array.string
A numpy array of strings where each string is a single example.
references : numpy.array.string
A numpy array of strings where each string is a single example.
lowercase : boolean
If True, pass the "-lc" flag to the multi-bleu script
Examples
---------
>>> hypotheses = ["a bird is flying on the sky"]
>>> references = ["two birds are flying on the sky", "a bird is on the top of the tree", "an airplane is on the sky",]
>>> score = tl.nlp.moses_multi_bleu(hypotheses, references)
Returns
--------
float
The BLEU score
References
----------
- `Google/seq2seq/metric/bleu <https://github.com/google/seq2seq>`__ | [
"Calculate",
"the",
"bleu",
"score",
"for",
"hypotheses",
"and",
"references",
"using",
"the",
"MOSES",
"ulti",
"-",
"bleu",
".",
"perl",
"script",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L1063-L1139 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | SimpleVocabulary.word_to_id | def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id | python | def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id | [
"def",
"word_to_id",
"(",
"self",
",",
"word",
")",
":",
"if",
"word",
"in",
"self",
".",
"_vocab",
":",
"return",
"self",
".",
"_vocab",
"[",
"word",
"]",
"else",
":",
"return",
"self",
".",
"_unk_id"
] | Returns the integer id of a word string. | [
"Returns",
"the",
"integer",
"id",
"of",
"a",
"word",
"string",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L226-L231 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | Vocabulary.word_to_id | def word_to_id(self, word):
"""Returns the integer word id of a word string."""
if word in self.vocab:
return self.vocab[word]
else:
return self.unk_id | python | def word_to_id(self, word):
"""Returns the integer word id of a word string."""
if word in self.vocab:
return self.vocab[word]
else:
return self.unk_id | [
"def",
"word_to_id",
"(",
"self",
",",
"word",
")",
":",
"if",
"word",
"in",
"self",
".",
"vocab",
":",
"return",
"self",
".",
"vocab",
"[",
"word",
"]",
"else",
":",
"return",
"self",
".",
"unk_id"
] | Returns the integer word id of a word string. | [
"Returns",
"the",
"integer",
"word",
"id",
"of",
"a",
"word",
"string",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L320-L325 | valid |
tensorlayer/tensorlayer | tensorlayer/nlp.py | Vocabulary.id_to_word | def id_to_word(self, word_id):
"""Returns the word string of an integer word id."""
if word_id >= len(self.reverse_vocab):
return self.reverse_vocab[self.unk_id]
else:
return self.reverse_vocab[word_id] | python | def id_to_word(self, word_id):
"""Returns the word string of an integer word id."""
if word_id >= len(self.reverse_vocab):
return self.reverse_vocab[self.unk_id]
else:
return self.reverse_vocab[word_id] | [
"def",
"id_to_word",
"(",
"self",
",",
"word_id",
")",
":",
"if",
"word_id",
">=",
"len",
"(",
"self",
".",
"reverse_vocab",
")",
":",
"return",
"self",
".",
"reverse_vocab",
"[",
"self",
".",
"unk_id",
"]",
"else",
":",
"return",
"self",
".",
"reverse_vocab",
"[",
"word_id",
"]"
] | Returns the word string of an integer word id. | [
"Returns",
"the",
"word",
"string",
"of",
"an",
"integer",
"word",
"id",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L327-L332 | valid |
tensorlayer/tensorlayer | examples/text_generation/tutorial_generate_text.py | basic_clean_str | def basic_clean_str(string):
"""Tokenization/string cleaning for a datasets."""
string = re.sub(r"\n", " ", string) # '\n' --> ' '
string = re.sub(r"\'s", " \'s", string) # it's --> it 's
string = re.sub(r"\’s", " \'s", string)
string = re.sub(r"\'ve", " have", string) # they've --> they have
string = re.sub(r"\’ve", " have", string)
string = re.sub(r"\'t", " not", string) # can't --> can not
string = re.sub(r"\’t", " not", string)
string = re.sub(r"\'re", " are", string) # they're --> they are
string = re.sub(r"\’re", " are", string)
string = re.sub(r"\'d", "", string) # I'd (I had, I would) --> I
string = re.sub(r"\’d", "", string)
string = re.sub(r"\'ll", " will", string) # I'll --> I will
string = re.sub(r"\’ll", " will", string)
string = re.sub(r"\“", " ", string) # “a” --> “ a ”
string = re.sub(r"\”", " ", string)
string = re.sub(r"\"", " ", string) # "a" --> " a "
string = re.sub(r"\'", " ", string) # they' --> they '
string = re.sub(r"\’", " ", string) # they’ --> they ’
string = re.sub(r"\.", " . ", string) # they. --> they .
string = re.sub(r"\,", " , ", string) # they, --> they ,
string = re.sub(r"\!", " ! ", string)
string = re.sub(r"\-", " ", string) # "low-cost"--> lost cost
string = re.sub(r"\(", " ", string) # (they) --> ( they)
string = re.sub(r"\)", " ", string) # ( they) --> ( they )
string = re.sub(r"\]", " ", string) # they] --> they ]
string = re.sub(r"\[", " ", string) # they[ --> they [
string = re.sub(r"\?", " ", string) # they? --> they ?
string = re.sub(r"\>", " ", string) # they> --> they >
string = re.sub(r"\<", " ", string) # they< --> they <
string = re.sub(r"\=", " ", string) # easier= --> easier =
string = re.sub(r"\;", " ", string) # easier; --> easier ;
string = re.sub(r"\;", " ", string)
string = re.sub(r"\:", " ", string) # easier: --> easier :
string = re.sub(r"\"", " ", string) # easier" --> easier "
string = re.sub(r"\$", " ", string) # $380 --> $ 380
string = re.sub(r"\_", " ", string) # _100 --> _ 100
string = re.sub(r"\s{2,}", " ", string) # Akara is handsome --> Akara is handsome
return string.strip().lower() | python | def basic_clean_str(string):
"""Tokenization/string cleaning for a datasets."""
string = re.sub(r"\n", " ", string) # '\n' --> ' '
string = re.sub(r"\'s", " \'s", string) # it's --> it 's
string = re.sub(r"\’s", " \'s", string)
string = re.sub(r"\'ve", " have", string) # they've --> they have
string = re.sub(r"\’ve", " have", string)
string = re.sub(r"\'t", " not", string) # can't --> can not
string = re.sub(r"\’t", " not", string)
string = re.sub(r"\'re", " are", string) # they're --> they are
string = re.sub(r"\’re", " are", string)
string = re.sub(r"\'d", "", string) # I'd (I had, I would) --> I
string = re.sub(r"\’d", "", string)
string = re.sub(r"\'ll", " will", string) # I'll --> I will
string = re.sub(r"\’ll", " will", string)
string = re.sub(r"\“", " ", string) # “a” --> “ a ”
string = re.sub(r"\”", " ", string)
string = re.sub(r"\"", " ", string) # "a" --> " a "
string = re.sub(r"\'", " ", string) # they' --> they '
string = re.sub(r"\’", " ", string) # they’ --> they ’
string = re.sub(r"\.", " . ", string) # they. --> they .
string = re.sub(r"\,", " , ", string) # they, --> they ,
string = re.sub(r"\!", " ! ", string)
string = re.sub(r"\-", " ", string) # "low-cost"--> lost cost
string = re.sub(r"\(", " ", string) # (they) --> ( they)
string = re.sub(r"\)", " ", string) # ( they) --> ( they )
string = re.sub(r"\]", " ", string) # they] --> they ]
string = re.sub(r"\[", " ", string) # they[ --> they [
string = re.sub(r"\?", " ", string) # they? --> they ?
string = re.sub(r"\>", " ", string) # they> --> they >
string = re.sub(r"\<", " ", string) # they< --> they <
string = re.sub(r"\=", " ", string) # easier= --> easier =
string = re.sub(r"\;", " ", string) # easier; --> easier ;
string = re.sub(r"\;", " ", string)
string = re.sub(r"\:", " ", string) # easier: --> easier :
string = re.sub(r"\"", " ", string) # easier" --> easier "
string = re.sub(r"\$", " ", string) # $380 --> $ 380
string = re.sub(r"\_", " ", string) # _100 --> _ 100
string = re.sub(r"\s{2,}", " ", string) # Akara is handsome --> Akara is handsome
return string.strip().lower() | [
"def",
"basic_clean_str",
"(",
"string",
")",
":",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\n\"",
",",
"\" \"",
",",
"string",
")",
"# '\\n' --> ' '",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'s\"",
",",
"\" \\'s\"",
",",
"string",
")",
"# it's --> it 's",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\’s\", ",
"\"",
"\\'s\", ",
"s",
"ring)",
"",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'ve\"",
",",
"\" have\"",
",",
"string",
")",
"# they've --> they have",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\’ve\", ",
"\"",
"have\", ",
"s",
"ring)",
"",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'t\"",
",",
"\" not\"",
",",
"string",
")",
"# can't --> can not",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\’t\", ",
"\"",
"not\", ",
"s",
"ring)",
"",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'re\"",
",",
"\" are\"",
",",
"string",
")",
"# they're --> they are",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\’re\", ",
"\"",
"are\", ",
"s",
"ring)",
"",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'d\"",
",",
"\"\"",
",",
"string",
")",
"# I'd (I had, I would) --> I",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\’d\", ",
"\"",
", ",
"s",
"ring)",
"",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'ll\"",
",",
"\" will\"",
",",
"string",
")",
"# I'll --> I will",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\’ll\", ",
"\"",
"will\", ",
"s",
"ring)",
"",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\“\", ",
"\"",
" \", ",
"s",
"ring) ",
" ",
"“a” --> “ a ”",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\”\", ",
"\"",
" \", ",
"s",
"ring)",
"",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\\"\"",
",",
"\" \"",
",",
"string",
")",
"# \"a\" --> \" a \"",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'\"",
",",
"\" \"",
",",
"string",
")",
"# they' --> they '",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\’\", ",
"\"",
" \", ",
"s",
"ring) ",
" ",
"they’ --> they ’",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\.\"",
",",
"\" . \"",
",",
"string",
")",
"# they. --> they .",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\,\"",
",",
"\" , \"",
",",
"string",
")",
"# they, --> they ,",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\!\"",
",",
"\" ! \"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\-\"",
",",
"\" \"",
",",
"string",
")",
"# \"low-cost\"--> lost cost",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\(\"",
",",
"\" \"",
",",
"string",
")",
"# (they) --> ( they)",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\)\"",
",",
"\" \"",
",",
"string",
")",
"# ( they) --> ( they )",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\]\"",
",",
"\" \"",
",",
"string",
")",
"# they] --> they ]",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\[\"",
",",
"\" \"",
",",
"string",
")",
"# they[ --> they [",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\?\"",
",",
"\" \"",
",",
"string",
")",
"# they? --> they ?",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\>\"",
",",
"\" \"",
",",
"string",
")",
"# they> --> they >",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\<\"",
",",
"\" \"",
",",
"string",
")",
"# they< --> they <",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\=\"",
",",
"\" \"",
",",
"string",
")",
"# easier= --> easier =",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\;\"",
",",
"\" \"",
",",
"string",
")",
"# easier; --> easier ;",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\;\"",
",",
"\" \"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\:\"",
",",
"\" \"",
",",
"string",
")",
"# easier: --> easier :",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\\"\"",
",",
"\" \"",
",",
"string",
")",
"# easier\" --> easier \"",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\$\"",
",",
"\" \"",
",",
"string",
")",
"# $380 --> $ 380",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\_\"",
",",
"\" \"",
",",
"string",
")",
"# _100 --> _ 100",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\s{2,}\"",
",",
"\" \"",
",",
"string",
")",
"# Akara is handsome --> Akara is handsome",
"return",
"string",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")"
] | Tokenization/string cleaning for a datasets. | [
"Tokenization",
"/",
"string",
"cleaning",
"for",
"a",
"datasets",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/text_generation/tutorial_generate_text.py#L39-L78 | valid |
tensorlayer/tensorlayer | examples/text_generation/tutorial_generate_text.py | main_restore_embedding_layer | def main_restore_embedding_layer():
"""How to use Embedding layer, and how to convert IDs to vector,
IDs to words, etc.
"""
# Step 1: Build the embedding matrix and load the existing embedding matrix.
vocabulary_size = 50000
embedding_size = 128
model_file_name = "model_word2vec_50k_128"
batch_size = None
print("Load existing embedding matrix and dictionaries")
all_var = tl.files.load_npy_to_any(name=model_file_name + '.npy')
data = all_var['data']
count = all_var['count']
dictionary = all_var['dictionary']
reverse_dictionary = all_var['reverse_dictionary']
tl.nlp.save_vocab(count, name='vocab_' + model_file_name + '.txt')
del all_var, data, count
load_params = tl.files.load_npz(name=model_file_name + '.npz')
x = tf.placeholder(tf.int32, shape=[batch_size])
emb_net = tl.layers.EmbeddingInputlayer(x, vocabulary_size, embedding_size, name='emb')
# sess.run(tf.global_variables_initializer())
sess.run(tf.global_variables_initializer())
tl.files.assign_params(sess, [load_params[0]], emb_net)
emb_net.print_params()
emb_net.print_layers()
# Step 2: Input word(s), output the word vector(s).
word = b'hello'
word_id = dictionary[word]
print('word_id:', word_id)
words = [b'i', b'am', b'tensor', b'layer']
word_ids = tl.nlp.words_to_word_ids(words, dictionary, _UNK)
context = tl.nlp.word_ids_to_words(word_ids, reverse_dictionary)
print('word_ids:', word_ids)
print('context:', context)
vector = sess.run(emb_net.outputs, feed_dict={x: [word_id]})
print('vector:', vector.shape)
vectors = sess.run(emb_net.outputs, feed_dict={x: word_ids})
print('vectors:', vectors.shape) | python | def main_restore_embedding_layer():
"""How to use Embedding layer, and how to convert IDs to vector,
IDs to words, etc.
"""
# Step 1: Build the embedding matrix and load the existing embedding matrix.
vocabulary_size = 50000
embedding_size = 128
model_file_name = "model_word2vec_50k_128"
batch_size = None
print("Load existing embedding matrix and dictionaries")
all_var = tl.files.load_npy_to_any(name=model_file_name + '.npy')
data = all_var['data']
count = all_var['count']
dictionary = all_var['dictionary']
reverse_dictionary = all_var['reverse_dictionary']
tl.nlp.save_vocab(count, name='vocab_' + model_file_name + '.txt')
del all_var, data, count
load_params = tl.files.load_npz(name=model_file_name + '.npz')
x = tf.placeholder(tf.int32, shape=[batch_size])
emb_net = tl.layers.EmbeddingInputlayer(x, vocabulary_size, embedding_size, name='emb')
# sess.run(tf.global_variables_initializer())
sess.run(tf.global_variables_initializer())
tl.files.assign_params(sess, [load_params[0]], emb_net)
emb_net.print_params()
emb_net.print_layers()
# Step 2: Input word(s), output the word vector(s).
word = b'hello'
word_id = dictionary[word]
print('word_id:', word_id)
words = [b'i', b'am', b'tensor', b'layer']
word_ids = tl.nlp.words_to_word_ids(words, dictionary, _UNK)
context = tl.nlp.word_ids_to_words(word_ids, reverse_dictionary)
print('word_ids:', word_ids)
print('context:', context)
vector = sess.run(emb_net.outputs, feed_dict={x: [word_id]})
print('vector:', vector.shape)
vectors = sess.run(emb_net.outputs, feed_dict={x: word_ids})
print('vectors:', vectors.shape) | [
"def",
"main_restore_embedding_layer",
"(",
")",
":",
"# Step 1: Build the embedding matrix and load the existing embedding matrix.",
"vocabulary_size",
"=",
"50000",
"embedding_size",
"=",
"128",
"model_file_name",
"=",
"\"model_word2vec_50k_128\"",
"batch_size",
"=",
"None",
"print",
"(",
"\"Load existing embedding matrix and dictionaries\"",
")",
"all_var",
"=",
"tl",
".",
"files",
".",
"load_npy_to_any",
"(",
"name",
"=",
"model_file_name",
"+",
"'.npy'",
")",
"data",
"=",
"all_var",
"[",
"'data'",
"]",
"count",
"=",
"all_var",
"[",
"'count'",
"]",
"dictionary",
"=",
"all_var",
"[",
"'dictionary'",
"]",
"reverse_dictionary",
"=",
"all_var",
"[",
"'reverse_dictionary'",
"]",
"tl",
".",
"nlp",
".",
"save_vocab",
"(",
"count",
",",
"name",
"=",
"'vocab_'",
"+",
"model_file_name",
"+",
"'.txt'",
")",
"del",
"all_var",
",",
"data",
",",
"count",
"load_params",
"=",
"tl",
".",
"files",
".",
"load_npz",
"(",
"name",
"=",
"model_file_name",
"+",
"'.npz'",
")",
"x",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"batch_size",
"]",
")",
"emb_net",
"=",
"tl",
".",
"layers",
".",
"EmbeddingInputlayer",
"(",
"x",
",",
"vocabulary_size",
",",
"embedding_size",
",",
"name",
"=",
"'emb'",
")",
"# sess.run(tf.global_variables_initializer())",
"sess",
".",
"run",
"(",
"tf",
".",
"global_variables_initializer",
"(",
")",
")",
"tl",
".",
"files",
".",
"assign_params",
"(",
"sess",
",",
"[",
"load_params",
"[",
"0",
"]",
"]",
",",
"emb_net",
")",
"emb_net",
".",
"print_params",
"(",
")",
"emb_net",
".",
"print_layers",
"(",
")",
"# Step 2: Input word(s), output the word vector(s).",
"word",
"=",
"b'hello'",
"word_id",
"=",
"dictionary",
"[",
"word",
"]",
"print",
"(",
"'word_id:'",
",",
"word_id",
")",
"words",
"=",
"[",
"b'i'",
",",
"b'am'",
",",
"b'tensor'",
",",
"b'layer'",
"]",
"word_ids",
"=",
"tl",
".",
"nlp",
".",
"words_to_word_ids",
"(",
"words",
",",
"dictionary",
",",
"_UNK",
")",
"context",
"=",
"tl",
".",
"nlp",
".",
"word_ids_to_words",
"(",
"word_ids",
",",
"reverse_dictionary",
")",
"print",
"(",
"'word_ids:'",
",",
"word_ids",
")",
"print",
"(",
"'context:'",
",",
"context",
")",
"vector",
"=",
"sess",
".",
"run",
"(",
"emb_net",
".",
"outputs",
",",
"feed_dict",
"=",
"{",
"x",
":",
"[",
"word_id",
"]",
"}",
")",
"print",
"(",
"'vector:'",
",",
"vector",
".",
"shape",
")",
"vectors",
"=",
"sess",
".",
"run",
"(",
"emb_net",
".",
"outputs",
",",
"feed_dict",
"=",
"{",
"x",
":",
"word_ids",
"}",
")",
"print",
"(",
"'vectors:'",
",",
"vectors",
".",
"shape",
")"
] | How to use Embedding layer, and how to convert IDs to vector,
IDs to words, etc. | [
"How",
"to",
"use",
"Embedding",
"layer",
"and",
"how",
"to",
"convert",
"IDs",
"to",
"vector",
"IDs",
"to",
"words",
"etc",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/text_generation/tutorial_generate_text.py#L132-L182 | valid |
tensorlayer/tensorlayer | examples/text_generation/tutorial_generate_text.py | main_lstm_generate_text | def main_lstm_generate_text():
"""Generate text by Synced sequence input and output."""
# rnn model and update (describtion: see tutorial_ptb_lstm.py)
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
sequence_length = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 100
lr_decay = 0.9
batch_size = 20
top_k_list = [1, 3, 5, 10]
print_length = 30
model_file_name = "model_generate_text.npz"
# ===== Prepare Data
words = customized_read_words(input_fpath="data/trump/trump_text.txt")
vocab = tl.nlp.create_vocab([words], word_counts_output_file='vocab.txt', min_word_count=1)
vocab = tl.nlp.Vocabulary('vocab.txt', unk_word="<UNK>")
vocab_size = vocab.unk_id + 1
train_data = [vocab.word_to_id(word) for word in words]
# Set the seed to generate sentence.
seed = "it is a"
# seed = basic_clean_str(seed).split()
seed = nltk.tokenize.word_tokenize(seed)
print('seed : %s' % seed)
sess = tf.InteractiveSession()
# ===== Define model
input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
targets = tf.placeholder(tf.int32, [batch_size, sequence_length])
# Testing (Evaluation), for generate text
input_data_test = tf.placeholder(tf.int32, [1, 1])
def inference(x, is_train, sequence_length, reuse=None):
"""If reuse is True, the inferences use the existing parameters,
then different inferences share the same parameters.
"""
print("\nsequence_length: %d, is_train: %s, reuse: %s" % (sequence_length, is_train, reuse))
rnn_init = tf.random_uniform_initializer(-init_scale, init_scale)
with tf.variable_scope("model", reuse=reuse):
network = EmbeddingInputlayer(x, vocab_size, hidden_size, rnn_init, name='embedding')
network = RNNLayer(
network, cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={
'forget_bias': 0.0,
'state_is_tuple': True
}, n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False,
return_seq_2d=True, name='lstm1'
)
lstm1 = network
network = DenseLayer(network, vocab_size, W_init=rnn_init, b_init=rnn_init, act=None, name='output')
return network, lstm1
# Inference for Training
network, lstm1 = inference(input_data, is_train=True, sequence_length=sequence_length, reuse=None)
# Inference for generate text, sequence_length=1
network_test, lstm1_test = inference(input_data_test, is_train=False, sequence_length=1, reuse=True)
y_linear = network_test.outputs
y_soft = tf.nn.softmax(y_linear)
# y_id = tf.argmax(tf.nn.softmax(y), 1)
# ===== Define train ops
def loss_fn(outputs, targets, batch_size, sequence_length):
# Returns the cost function of Cross-entropy of two sequences, implement
# softmax internally.
# outputs : 2D tensor [n_examples, n_outputs]
# targets : 2D tensor [n_examples, n_outputs]
# n_examples = batch_size * sequence_length
# so
# cost is the averaged cost of each mini-batch (concurrent process).
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[outputs], [tf.reshape(targets, [-1])], [tf.ones([batch_size * sequence_length])]
)
cost = tf.reduce_sum(loss) / batch_size
return cost
# Cost for Training
cost = loss_fn(network.outputs, targets, batch_size, sequence_length)
# Truncated Backpropagation for training
with tf.variable_scope('learning_rate'):
lr = tf.Variable(0.0, trainable=False)
# You can get all trainable parameters as follow.
# tvars = tf.trainable_variables()
# Alternatively, you can specify the parameters for training as follw.
# tvars = network.all_params $ all parameters
# tvars = network.all_params[1:] $ parameters except embedding matrix
# Train the whole network.
tvars = network.all_params
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads, tvars))
# ===== Training
sess.run(tf.global_variables_initializer())
print("\nStart learning a model to generate text")
for i in range(max_max_epoch):
# decrease the learning_rate after ``max_epoch``, by multipling lr_decay.
new_lr_decay = lr_decay**max(i - max_epoch, 0.0)
sess.run(tf.assign(lr, learning_rate * new_lr_decay))
print("Epoch: %d/%d Learning rate: %.8f" % (i + 1, max_max_epoch, sess.run(lr)))
epoch_size = ((len(train_data) // batch_size) - 1) // sequence_length
start_time = time.time()
costs = 0.0
iters = 0
# reset all states at the begining of every epoch
state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)):
_cost, state1, _ = sess.run(
[cost, lstm1.final_state, train_op], feed_dict={
input_data: x,
targets: y,
lstm1.initial_state: state1
}
)
costs += _cost
iters += sequence_length
if step % (epoch_size // 10) == 1:
print(
"%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time))
)
train_perplexity = np.exp(costs / iters)
# print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity))
# for diversity in diversity_list:
# testing: sample from top k words
for top_k in top_k_list:
# Testing, generate some text from a given seed.
state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)
# state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state)
outs_id = [vocab.word_to_id(w) for w in seed]
# feed the seed to initialize the state for generation.
for ids in outs_id[:-1]:
a_id = np.asarray(ids).reshape(1, 1)
state1 = sess.run(
[lstm1_test.final_state], feed_dict={
input_data_test: a_id,
lstm1_test.initial_state: state1
}
)
# feed the last word in seed, and start to generate sentence.
a_id = outs_id[-1]
for _ in range(print_length):
a_id = np.asarray(a_id).reshape(1, 1)
out, state1 = sess.run(
[y_soft, lstm1_test.final_state], feed_dict={
input_data_test: a_id,
lstm1_test.initial_state: state1
}
)
# Without sampling
# a_id = np.argmax(out[0])
# Sample from all words, if vocab_size is large,
# this may have numeric error.
# a_id = tl.nlp.sample(out[0], diversity)
# Sample from the top k words.
a_id = tl.nlp.sample_top(out[0], top_k=top_k)
outs_id.append(a_id)
sentence = [vocab.id_to_word(w) for w in outs_id]
sentence = " ".join(sentence)
# print(diversity, ':', sentence)
print(top_k, ':', sentence)
print("Save model")
tl.files.save_npz(network_test.all_params, name=model_file_name) | python | def main_lstm_generate_text():
"""Generate text by Synced sequence input and output."""
# rnn model and update (describtion: see tutorial_ptb_lstm.py)
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
sequence_length = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 100
lr_decay = 0.9
batch_size = 20
top_k_list = [1, 3, 5, 10]
print_length = 30
model_file_name = "model_generate_text.npz"
# ===== Prepare Data
words = customized_read_words(input_fpath="data/trump/trump_text.txt")
vocab = tl.nlp.create_vocab([words], word_counts_output_file='vocab.txt', min_word_count=1)
vocab = tl.nlp.Vocabulary('vocab.txt', unk_word="<UNK>")
vocab_size = vocab.unk_id + 1
train_data = [vocab.word_to_id(word) for word in words]
# Set the seed to generate sentence.
seed = "it is a"
# seed = basic_clean_str(seed).split()
seed = nltk.tokenize.word_tokenize(seed)
print('seed : %s' % seed)
sess = tf.InteractiveSession()
# ===== Define model
input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
targets = tf.placeholder(tf.int32, [batch_size, sequence_length])
# Testing (Evaluation), for generate text
input_data_test = tf.placeholder(tf.int32, [1, 1])
def inference(x, is_train, sequence_length, reuse=None):
"""If reuse is True, the inferences use the existing parameters,
then different inferences share the same parameters.
"""
print("\nsequence_length: %d, is_train: %s, reuse: %s" % (sequence_length, is_train, reuse))
rnn_init = tf.random_uniform_initializer(-init_scale, init_scale)
with tf.variable_scope("model", reuse=reuse):
network = EmbeddingInputlayer(x, vocab_size, hidden_size, rnn_init, name='embedding')
network = RNNLayer(
network, cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={
'forget_bias': 0.0,
'state_is_tuple': True
}, n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False,
return_seq_2d=True, name='lstm1'
)
lstm1 = network
network = DenseLayer(network, vocab_size, W_init=rnn_init, b_init=rnn_init, act=None, name='output')
return network, lstm1
# Inference for Training
network, lstm1 = inference(input_data, is_train=True, sequence_length=sequence_length, reuse=None)
# Inference for generate text, sequence_length=1
network_test, lstm1_test = inference(input_data_test, is_train=False, sequence_length=1, reuse=True)
y_linear = network_test.outputs
y_soft = tf.nn.softmax(y_linear)
# y_id = tf.argmax(tf.nn.softmax(y), 1)
# ===== Define train ops
def loss_fn(outputs, targets, batch_size, sequence_length):
# Returns the cost function of Cross-entropy of two sequences, implement
# softmax internally.
# outputs : 2D tensor [n_examples, n_outputs]
# targets : 2D tensor [n_examples, n_outputs]
# n_examples = batch_size * sequence_length
# so
# cost is the averaged cost of each mini-batch (concurrent process).
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[outputs], [tf.reshape(targets, [-1])], [tf.ones([batch_size * sequence_length])]
)
cost = tf.reduce_sum(loss) / batch_size
return cost
# Cost for Training
cost = loss_fn(network.outputs, targets, batch_size, sequence_length)
# Truncated Backpropagation for training
with tf.variable_scope('learning_rate'):
lr = tf.Variable(0.0, trainable=False)
# You can get all trainable parameters as follow.
# tvars = tf.trainable_variables()
# Alternatively, you can specify the parameters for training as follw.
# tvars = network.all_params $ all parameters
# tvars = network.all_params[1:] $ parameters except embedding matrix
# Train the whole network.
tvars = network.all_params
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads, tvars))
# ===== Training
sess.run(tf.global_variables_initializer())
print("\nStart learning a model to generate text")
for i in range(max_max_epoch):
# decrease the learning_rate after ``max_epoch``, by multipling lr_decay.
new_lr_decay = lr_decay**max(i - max_epoch, 0.0)
sess.run(tf.assign(lr, learning_rate * new_lr_decay))
print("Epoch: %d/%d Learning rate: %.8f" % (i + 1, max_max_epoch, sess.run(lr)))
epoch_size = ((len(train_data) // batch_size) - 1) // sequence_length
start_time = time.time()
costs = 0.0
iters = 0
# reset all states at the begining of every epoch
state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)):
_cost, state1, _ = sess.run(
[cost, lstm1.final_state, train_op], feed_dict={
input_data: x,
targets: y,
lstm1.initial_state: state1
}
)
costs += _cost
iters += sequence_length
if step % (epoch_size // 10) == 1:
print(
"%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time))
)
train_perplexity = np.exp(costs / iters)
# print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity))
# for diversity in diversity_list:
# testing: sample from top k words
for top_k in top_k_list:
# Testing, generate some text from a given seed.
state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)
# state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state)
outs_id = [vocab.word_to_id(w) for w in seed]
# feed the seed to initialize the state for generation.
for ids in outs_id[:-1]:
a_id = np.asarray(ids).reshape(1, 1)
state1 = sess.run(
[lstm1_test.final_state], feed_dict={
input_data_test: a_id,
lstm1_test.initial_state: state1
}
)
# feed the last word in seed, and start to generate sentence.
a_id = outs_id[-1]
for _ in range(print_length):
a_id = np.asarray(a_id).reshape(1, 1)
out, state1 = sess.run(
[y_soft, lstm1_test.final_state], feed_dict={
input_data_test: a_id,
lstm1_test.initial_state: state1
}
)
# Without sampling
# a_id = np.argmax(out[0])
# Sample from all words, if vocab_size is large,
# this may have numeric error.
# a_id = tl.nlp.sample(out[0], diversity)
# Sample from the top k words.
a_id = tl.nlp.sample_top(out[0], top_k=top_k)
outs_id.append(a_id)
sentence = [vocab.id_to_word(w) for w in outs_id]
sentence = " ".join(sentence)
# print(diversity, ':', sentence)
print(top_k, ':', sentence)
print("Save model")
tl.files.save_npz(network_test.all_params, name=model_file_name) | [
"def",
"main_lstm_generate_text",
"(",
")",
":",
"# rnn model and update (describtion: see tutorial_ptb_lstm.py)",
"init_scale",
"=",
"0.1",
"learning_rate",
"=",
"1.0",
"max_grad_norm",
"=",
"5",
"sequence_length",
"=",
"20",
"hidden_size",
"=",
"200",
"max_epoch",
"=",
"4",
"max_max_epoch",
"=",
"100",
"lr_decay",
"=",
"0.9",
"batch_size",
"=",
"20",
"top_k_list",
"=",
"[",
"1",
",",
"3",
",",
"5",
",",
"10",
"]",
"print_length",
"=",
"30",
"model_file_name",
"=",
"\"model_generate_text.npz\"",
"# ===== Prepare Data",
"words",
"=",
"customized_read_words",
"(",
"input_fpath",
"=",
"\"data/trump/trump_text.txt\"",
")",
"vocab",
"=",
"tl",
".",
"nlp",
".",
"create_vocab",
"(",
"[",
"words",
"]",
",",
"word_counts_output_file",
"=",
"'vocab.txt'",
",",
"min_word_count",
"=",
"1",
")",
"vocab",
"=",
"tl",
".",
"nlp",
".",
"Vocabulary",
"(",
"'vocab.txt'",
",",
"unk_word",
"=",
"\"<UNK>\"",
")",
"vocab_size",
"=",
"vocab",
".",
"unk_id",
"+",
"1",
"train_data",
"=",
"[",
"vocab",
".",
"word_to_id",
"(",
"word",
")",
"for",
"word",
"in",
"words",
"]",
"# Set the seed to generate sentence.",
"seed",
"=",
"\"it is a\"",
"# seed = basic_clean_str(seed).split()",
"seed",
"=",
"nltk",
".",
"tokenize",
".",
"word_tokenize",
"(",
"seed",
")",
"print",
"(",
"'seed : %s'",
"%",
"seed",
")",
"sess",
"=",
"tf",
".",
"InteractiveSession",
"(",
")",
"# ===== Define model",
"input_data",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
",",
"[",
"batch_size",
",",
"sequence_length",
"]",
")",
"targets",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
",",
"[",
"batch_size",
",",
"sequence_length",
"]",
")",
"# Testing (Evaluation), for generate text",
"input_data_test",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
",",
"[",
"1",
",",
"1",
"]",
")",
"def",
"inference",
"(",
"x",
",",
"is_train",
",",
"sequence_length",
",",
"reuse",
"=",
"None",
")",
":",
"\"\"\"If reuse is True, the inferences use the existing parameters,\n then different inferences share the same parameters.\n \"\"\"",
"print",
"(",
"\"\\nsequence_length: %d, is_train: %s, reuse: %s\"",
"%",
"(",
"sequence_length",
",",
"is_train",
",",
"reuse",
")",
")",
"rnn_init",
"=",
"tf",
".",
"random_uniform_initializer",
"(",
"-",
"init_scale",
",",
"init_scale",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"model\"",
",",
"reuse",
"=",
"reuse",
")",
":",
"network",
"=",
"EmbeddingInputlayer",
"(",
"x",
",",
"vocab_size",
",",
"hidden_size",
",",
"rnn_init",
",",
"name",
"=",
"'embedding'",
")",
"network",
"=",
"RNNLayer",
"(",
"network",
",",
"cell_fn",
"=",
"tf",
".",
"contrib",
".",
"rnn",
".",
"BasicLSTMCell",
",",
"cell_init_args",
"=",
"{",
"'forget_bias'",
":",
"0.0",
",",
"'state_is_tuple'",
":",
"True",
"}",
",",
"n_hidden",
"=",
"hidden_size",
",",
"initializer",
"=",
"rnn_init",
",",
"n_steps",
"=",
"sequence_length",
",",
"return_last",
"=",
"False",
",",
"return_seq_2d",
"=",
"True",
",",
"name",
"=",
"'lstm1'",
")",
"lstm1",
"=",
"network",
"network",
"=",
"DenseLayer",
"(",
"network",
",",
"vocab_size",
",",
"W_init",
"=",
"rnn_init",
",",
"b_init",
"=",
"rnn_init",
",",
"act",
"=",
"None",
",",
"name",
"=",
"'output'",
")",
"return",
"network",
",",
"lstm1",
"# Inference for Training",
"network",
",",
"lstm1",
"=",
"inference",
"(",
"input_data",
",",
"is_train",
"=",
"True",
",",
"sequence_length",
"=",
"sequence_length",
",",
"reuse",
"=",
"None",
")",
"# Inference for generate text, sequence_length=1",
"network_test",
",",
"lstm1_test",
"=",
"inference",
"(",
"input_data_test",
",",
"is_train",
"=",
"False",
",",
"sequence_length",
"=",
"1",
",",
"reuse",
"=",
"True",
")",
"y_linear",
"=",
"network_test",
".",
"outputs",
"y_soft",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"y_linear",
")",
"# y_id = tf.argmax(tf.nn.softmax(y), 1)",
"# ===== Define train ops",
"def",
"loss_fn",
"(",
"outputs",
",",
"targets",
",",
"batch_size",
",",
"sequence_length",
")",
":",
"# Returns the cost function of Cross-entropy of two sequences, implement",
"# softmax internally.",
"# outputs : 2D tensor [n_examples, n_outputs]",
"# targets : 2D tensor [n_examples, n_outputs]",
"# n_examples = batch_size * sequence_length",
"# so",
"# cost is the averaged cost of each mini-batch (concurrent process).",
"loss",
"=",
"tf",
".",
"contrib",
".",
"legacy_seq2seq",
".",
"sequence_loss_by_example",
"(",
"[",
"outputs",
"]",
",",
"[",
"tf",
".",
"reshape",
"(",
"targets",
",",
"[",
"-",
"1",
"]",
")",
"]",
",",
"[",
"tf",
".",
"ones",
"(",
"[",
"batch_size",
"*",
"sequence_length",
"]",
")",
"]",
")",
"cost",
"=",
"tf",
".",
"reduce_sum",
"(",
"loss",
")",
"/",
"batch_size",
"return",
"cost",
"# Cost for Training",
"cost",
"=",
"loss_fn",
"(",
"network",
".",
"outputs",
",",
"targets",
",",
"batch_size",
",",
"sequence_length",
")",
"# Truncated Backpropagation for training",
"with",
"tf",
".",
"variable_scope",
"(",
"'learning_rate'",
")",
":",
"lr",
"=",
"tf",
".",
"Variable",
"(",
"0.0",
",",
"trainable",
"=",
"False",
")",
"# You can get all trainable parameters as follow.",
"# tvars = tf.trainable_variables()",
"# Alternatively, you can specify the parameters for training as follw.",
"# tvars = network.all_params $ all parameters",
"# tvars = network.all_params[1:] $ parameters except embedding matrix",
"# Train the whole network.",
"tvars",
"=",
"network",
".",
"all_params",
"grads",
",",
"_",
"=",
"tf",
".",
"clip_by_global_norm",
"(",
"tf",
".",
"gradients",
"(",
"cost",
",",
"tvars",
")",
",",
"max_grad_norm",
")",
"optimizer",
"=",
"tf",
".",
"train",
".",
"GradientDescentOptimizer",
"(",
"lr",
")",
"train_op",
"=",
"optimizer",
".",
"apply_gradients",
"(",
"zip",
"(",
"grads",
",",
"tvars",
")",
")",
"# ===== Training",
"sess",
".",
"run",
"(",
"tf",
".",
"global_variables_initializer",
"(",
")",
")",
"print",
"(",
"\"\\nStart learning a model to generate text\"",
")",
"for",
"i",
"in",
"range",
"(",
"max_max_epoch",
")",
":",
"# decrease the learning_rate after ``max_epoch``, by multipling lr_decay.",
"new_lr_decay",
"=",
"lr_decay",
"**",
"max",
"(",
"i",
"-",
"max_epoch",
",",
"0.0",
")",
"sess",
".",
"run",
"(",
"tf",
".",
"assign",
"(",
"lr",
",",
"learning_rate",
"*",
"new_lr_decay",
")",
")",
"print",
"(",
"\"Epoch: %d/%d Learning rate: %.8f\"",
"%",
"(",
"i",
"+",
"1",
",",
"max_max_epoch",
",",
"sess",
".",
"run",
"(",
"lr",
")",
")",
")",
"epoch_size",
"=",
"(",
"(",
"len",
"(",
"train_data",
")",
"//",
"batch_size",
")",
"-",
"1",
")",
"//",
"sequence_length",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"costs",
"=",
"0.0",
"iters",
"=",
"0",
"# reset all states at the begining of every epoch",
"state1",
"=",
"tl",
".",
"layers",
".",
"initialize_rnn_state",
"(",
"lstm1",
".",
"initial_state",
")",
"for",
"step",
",",
"(",
"x",
",",
"y",
")",
"in",
"enumerate",
"(",
"tl",
".",
"iterate",
".",
"ptb_iterator",
"(",
"train_data",
",",
"batch_size",
",",
"sequence_length",
")",
")",
":",
"_cost",
",",
"state1",
",",
"_",
"=",
"sess",
".",
"run",
"(",
"[",
"cost",
",",
"lstm1",
".",
"final_state",
",",
"train_op",
"]",
",",
"feed_dict",
"=",
"{",
"input_data",
":",
"x",
",",
"targets",
":",
"y",
",",
"lstm1",
".",
"initial_state",
":",
"state1",
"}",
")",
"costs",
"+=",
"_cost",
"iters",
"+=",
"sequence_length",
"if",
"step",
"%",
"(",
"epoch_size",
"//",
"10",
")",
"==",
"1",
":",
"print",
"(",
"\"%.3f perplexity: %.3f speed: %.0f wps\"",
"%",
"(",
"step",
"*",
"1.0",
"/",
"epoch_size",
",",
"np",
".",
"exp",
"(",
"costs",
"/",
"iters",
")",
",",
"iters",
"*",
"batch_size",
"/",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
")",
"train_perplexity",
"=",
"np",
".",
"exp",
"(",
"costs",
"/",
"iters",
")",
"# print(\"Epoch: %d Train Perplexity: %.3f\" % (i + 1, train_perplexity))",
"print",
"(",
"\"Epoch: %d/%d Train Perplexity: %.3f\"",
"%",
"(",
"i",
"+",
"1",
",",
"max_max_epoch",
",",
"train_perplexity",
")",
")",
"# for diversity in diversity_list:",
"# testing: sample from top k words",
"for",
"top_k",
"in",
"top_k_list",
":",
"# Testing, generate some text from a given seed.",
"state1",
"=",
"tl",
".",
"layers",
".",
"initialize_rnn_state",
"(",
"lstm1_test",
".",
"initial_state",
")",
"# state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state)",
"outs_id",
"=",
"[",
"vocab",
".",
"word_to_id",
"(",
"w",
")",
"for",
"w",
"in",
"seed",
"]",
"# feed the seed to initialize the state for generation.",
"for",
"ids",
"in",
"outs_id",
"[",
":",
"-",
"1",
"]",
":",
"a_id",
"=",
"np",
".",
"asarray",
"(",
"ids",
")",
".",
"reshape",
"(",
"1",
",",
"1",
")",
"state1",
"=",
"sess",
".",
"run",
"(",
"[",
"lstm1_test",
".",
"final_state",
"]",
",",
"feed_dict",
"=",
"{",
"input_data_test",
":",
"a_id",
",",
"lstm1_test",
".",
"initial_state",
":",
"state1",
"}",
")",
"# feed the last word in seed, and start to generate sentence.",
"a_id",
"=",
"outs_id",
"[",
"-",
"1",
"]",
"for",
"_",
"in",
"range",
"(",
"print_length",
")",
":",
"a_id",
"=",
"np",
".",
"asarray",
"(",
"a_id",
")",
".",
"reshape",
"(",
"1",
",",
"1",
")",
"out",
",",
"state1",
"=",
"sess",
".",
"run",
"(",
"[",
"y_soft",
",",
"lstm1_test",
".",
"final_state",
"]",
",",
"feed_dict",
"=",
"{",
"input_data_test",
":",
"a_id",
",",
"lstm1_test",
".",
"initial_state",
":",
"state1",
"}",
")",
"# Without sampling",
"# a_id = np.argmax(out[0])",
"# Sample from all words, if vocab_size is large,",
"# this may have numeric error.",
"# a_id = tl.nlp.sample(out[0], diversity)",
"# Sample from the top k words.",
"a_id",
"=",
"tl",
".",
"nlp",
".",
"sample_top",
"(",
"out",
"[",
"0",
"]",
",",
"top_k",
"=",
"top_k",
")",
"outs_id",
".",
"append",
"(",
"a_id",
")",
"sentence",
"=",
"[",
"vocab",
".",
"id_to_word",
"(",
"w",
")",
"for",
"w",
"in",
"outs_id",
"]",
"sentence",
"=",
"\" \"",
".",
"join",
"(",
"sentence",
")",
"# print(diversity, ':', sentence)",
"print",
"(",
"top_k",
",",
"':'",
",",
"sentence",
")",
"print",
"(",
"\"Save model\"",
")",
"tl",
".",
"files",
".",
"save_npz",
"(",
"network_test",
".",
"all_params",
",",
"name",
"=",
"model_file_name",
")"
] | Generate text by Synced sequence input and output. | [
"Generate",
"text",
"by",
"Synced",
"sequence",
"input",
"and",
"output",
"."
] | aa9e52e36c7058a7e6fd81d36563ca6850b21956 | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/text_generation/tutorial_generate_text.py#L185-L362 | valid |
numenta/nupic | src/nupic/swarming/api.py | createAndStartSwarm | def createAndStartSwarm(client, clientInfo="", clientKey="", params="",
minimumWorkers=None, maximumWorkers=None,
alreadyRunning=False):
"""Create and start a swarm job.
Args:
client - A string identifying the calling client. There is a small limit
for the length of the value. See ClientJobsDAO.CLIENT_MAX_LEN.
clientInfo - JSON encoded dict of client specific information.
clientKey - Foreign key. Limited in length, see ClientJobsDAO._initTables.
params - JSON encoded dict of the parameters for the job. This can be
fetched out of the database by the worker processes based on the jobID.
minimumWorkers - The minimum workers to allocate to the swarm. Set to None
to use the default.
maximumWorkers - The maximum workers to allocate to the swarm. Set to None
to use the swarm default. Set to 0 to use the maximum scheduler value.
alreadyRunning - Insert a job record for an already running process. Used
for testing.
"""
if minimumWorkers is None:
minimumWorkers = Configuration.getInt(
"nupic.hypersearch.minWorkersPerSwarm")
if maximumWorkers is None:
maximumWorkers = Configuration.getInt(
"nupic.hypersearch.maxWorkersPerSwarm")
return ClientJobsDAO.get().jobInsert(
client=client,
cmdLine="$HYPERSEARCH",
clientInfo=clientInfo,
clientKey=clientKey,
alreadyRunning=alreadyRunning,
params=params,
minimumWorkers=minimumWorkers,
maximumWorkers=maximumWorkers,
jobType=ClientJobsDAO.JOB_TYPE_HS) | python | def createAndStartSwarm(client, clientInfo="", clientKey="", params="",
minimumWorkers=None, maximumWorkers=None,
alreadyRunning=False):
"""Create and start a swarm job.
Args:
client - A string identifying the calling client. There is a small limit
for the length of the value. See ClientJobsDAO.CLIENT_MAX_LEN.
clientInfo - JSON encoded dict of client specific information.
clientKey - Foreign key. Limited in length, see ClientJobsDAO._initTables.
params - JSON encoded dict of the parameters for the job. This can be
fetched out of the database by the worker processes based on the jobID.
minimumWorkers - The minimum workers to allocate to the swarm. Set to None
to use the default.
maximumWorkers - The maximum workers to allocate to the swarm. Set to None
to use the swarm default. Set to 0 to use the maximum scheduler value.
alreadyRunning - Insert a job record for an already running process. Used
for testing.
"""
if minimumWorkers is None:
minimumWorkers = Configuration.getInt(
"nupic.hypersearch.minWorkersPerSwarm")
if maximumWorkers is None:
maximumWorkers = Configuration.getInt(
"nupic.hypersearch.maxWorkersPerSwarm")
return ClientJobsDAO.get().jobInsert(
client=client,
cmdLine="$HYPERSEARCH",
clientInfo=clientInfo,
clientKey=clientKey,
alreadyRunning=alreadyRunning,
params=params,
minimumWorkers=minimumWorkers,
maximumWorkers=maximumWorkers,
jobType=ClientJobsDAO.JOB_TYPE_HS) | [
"def",
"createAndStartSwarm",
"(",
"client",
",",
"clientInfo",
"=",
"\"\"",
",",
"clientKey",
"=",
"\"\"",
",",
"params",
"=",
"\"\"",
",",
"minimumWorkers",
"=",
"None",
",",
"maximumWorkers",
"=",
"None",
",",
"alreadyRunning",
"=",
"False",
")",
":",
"if",
"minimumWorkers",
"is",
"None",
":",
"minimumWorkers",
"=",
"Configuration",
".",
"getInt",
"(",
"\"nupic.hypersearch.minWorkersPerSwarm\"",
")",
"if",
"maximumWorkers",
"is",
"None",
":",
"maximumWorkers",
"=",
"Configuration",
".",
"getInt",
"(",
"\"nupic.hypersearch.maxWorkersPerSwarm\"",
")",
"return",
"ClientJobsDAO",
".",
"get",
"(",
")",
".",
"jobInsert",
"(",
"client",
"=",
"client",
",",
"cmdLine",
"=",
"\"$HYPERSEARCH\"",
",",
"clientInfo",
"=",
"clientInfo",
",",
"clientKey",
"=",
"clientKey",
",",
"alreadyRunning",
"=",
"alreadyRunning",
",",
"params",
"=",
"params",
",",
"minimumWorkers",
"=",
"minimumWorkers",
",",
"maximumWorkers",
"=",
"maximumWorkers",
",",
"jobType",
"=",
"ClientJobsDAO",
".",
"JOB_TYPE_HS",
")"
] | Create and start a swarm job.
Args:
client - A string identifying the calling client. There is a small limit
for the length of the value. See ClientJobsDAO.CLIENT_MAX_LEN.
clientInfo - JSON encoded dict of client specific information.
clientKey - Foreign key. Limited in length, see ClientJobsDAO._initTables.
params - JSON encoded dict of the parameters for the job. This can be
fetched out of the database by the worker processes based on the jobID.
minimumWorkers - The minimum workers to allocate to the swarm. Set to None
to use the default.
maximumWorkers - The maximum workers to allocate to the swarm. Set to None
to use the swarm default. Set to 0 to use the maximum scheduler value.
alreadyRunning - Insert a job record for an already running process. Used
for testing. | [
"Create",
"and",
"start",
"a",
"swarm",
"job",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/api.py#L34-L69 | valid |
numenta/nupic | src/nupic/swarming/api.py | getSwarmModelParams | def getSwarmModelParams(modelID):
"""Retrieve the Engine-level model params from a Swarm model
Args:
modelID - Engine-level model ID of the Swarm model
Returns:
JSON-encoded string containing Model Params
"""
# TODO: the use of nupic.frameworks.opf.helpers.loadExperimentDescriptionScriptFromDir when
# retrieving module params results in a leakage of pf_base_descriptionNN and
# pf_descriptionNN module imports for every call to getSwarmModelParams, so
# the leakage is unlimited when getSwarmModelParams is called by a
# long-running process. An alternate solution is to execute the guts of
# this function's logic in a seprate process (via multiprocessing module).
cjDAO = ClientJobsDAO.get()
(jobID, description) = cjDAO.modelsGetFields(
modelID,
["jobId", "genDescription"])
(baseDescription,) = cjDAO.jobGetFields(jobID, ["genBaseDescription"])
# Construct a directory with base.py and description.py for loading model
# params, and use nupic.frameworks.opf.helpers to extract model params from
# those files
descriptionDirectory = tempfile.mkdtemp()
try:
baseDescriptionFilePath = os.path.join(descriptionDirectory, "base.py")
with open(baseDescriptionFilePath, mode="wb") as f:
f.write(baseDescription)
descriptionFilePath = os.path.join(descriptionDirectory, "description.py")
with open(descriptionFilePath, mode="wb") as f:
f.write(description)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
helpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory))
return json.dumps(
dict(
modelConfig=expIface.getModelDescription(),
inferenceArgs=expIface.getModelControl().get("inferenceArgs", None)))
finally:
shutil.rmtree(descriptionDirectory, ignore_errors=True) | python | def getSwarmModelParams(modelID):
"""Retrieve the Engine-level model params from a Swarm model
Args:
modelID - Engine-level model ID of the Swarm model
Returns:
JSON-encoded string containing Model Params
"""
# TODO: the use of nupic.frameworks.opf.helpers.loadExperimentDescriptionScriptFromDir when
# retrieving module params results in a leakage of pf_base_descriptionNN and
# pf_descriptionNN module imports for every call to getSwarmModelParams, so
# the leakage is unlimited when getSwarmModelParams is called by a
# long-running process. An alternate solution is to execute the guts of
# this function's logic in a seprate process (via multiprocessing module).
cjDAO = ClientJobsDAO.get()
(jobID, description) = cjDAO.modelsGetFields(
modelID,
["jobId", "genDescription"])
(baseDescription,) = cjDAO.jobGetFields(jobID, ["genBaseDescription"])
# Construct a directory with base.py and description.py for loading model
# params, and use nupic.frameworks.opf.helpers to extract model params from
# those files
descriptionDirectory = tempfile.mkdtemp()
try:
baseDescriptionFilePath = os.path.join(descriptionDirectory, "base.py")
with open(baseDescriptionFilePath, mode="wb") as f:
f.write(baseDescription)
descriptionFilePath = os.path.join(descriptionDirectory, "description.py")
with open(descriptionFilePath, mode="wb") as f:
f.write(description)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
helpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory))
return json.dumps(
dict(
modelConfig=expIface.getModelDescription(),
inferenceArgs=expIface.getModelControl().get("inferenceArgs", None)))
finally:
shutil.rmtree(descriptionDirectory, ignore_errors=True) | [
"def",
"getSwarmModelParams",
"(",
"modelID",
")",
":",
"# TODO: the use of nupic.frameworks.opf.helpers.loadExperimentDescriptionScriptFromDir when",
"# retrieving module params results in a leakage of pf_base_descriptionNN and",
"# pf_descriptionNN module imports for every call to getSwarmModelParams, so",
"# the leakage is unlimited when getSwarmModelParams is called by a",
"# long-running process. An alternate solution is to execute the guts of",
"# this function's logic in a seprate process (via multiprocessing module).",
"cjDAO",
"=",
"ClientJobsDAO",
".",
"get",
"(",
")",
"(",
"jobID",
",",
"description",
")",
"=",
"cjDAO",
".",
"modelsGetFields",
"(",
"modelID",
",",
"[",
"\"jobId\"",
",",
"\"genDescription\"",
"]",
")",
"(",
"baseDescription",
",",
")",
"=",
"cjDAO",
".",
"jobGetFields",
"(",
"jobID",
",",
"[",
"\"genBaseDescription\"",
"]",
")",
"# Construct a directory with base.py and description.py for loading model",
"# params, and use nupic.frameworks.opf.helpers to extract model params from",
"# those files",
"descriptionDirectory",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"baseDescriptionFilePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"descriptionDirectory",
",",
"\"base.py\"",
")",
"with",
"open",
"(",
"baseDescriptionFilePath",
",",
"mode",
"=",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"baseDescription",
")",
"descriptionFilePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"descriptionDirectory",
",",
"\"description.py\"",
")",
"with",
"open",
"(",
"descriptionFilePath",
",",
"mode",
"=",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"description",
")",
"expIface",
"=",
"helpers",
".",
"getExperimentDescriptionInterfaceFromModule",
"(",
"helpers",
".",
"loadExperimentDescriptionScriptFromDir",
"(",
"descriptionDirectory",
")",
")",
"return",
"json",
".",
"dumps",
"(",
"dict",
"(",
"modelConfig",
"=",
"expIface",
".",
"getModelDescription",
"(",
")",
",",
"inferenceArgs",
"=",
"expIface",
".",
"getModelControl",
"(",
")",
".",
"get",
"(",
"\"inferenceArgs\"",
",",
"None",
")",
")",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"descriptionDirectory",
",",
"ignore_errors",
"=",
"True",
")"
] | Retrieve the Engine-level model params from a Swarm model
Args:
modelID - Engine-level model ID of the Swarm model
Returns:
JSON-encoded string containing Model Params | [
"Retrieve",
"the",
"Engine",
"-",
"level",
"model",
"params",
"from",
"a",
"Swarm",
"model"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/api.py#L73-L119 | valid |
numenta/nupic | src/nupic/database/connection.py | enableConcurrencyChecks | def enableConcurrencyChecks(maxConcurrency, raiseException=True):
""" Enable the diagnostic feature for debugging unexpected concurrency in
acquiring ConnectionWrapper instances.
NOTE: This MUST be done early in your application's execution, BEFORE any
accesses to ConnectionFactory or connection policies from your application
(including imports and sub-imports of your app).
Parameters:
----------------------------------------------------------------
maxConcurrency: A non-negative integer that represents the maximum expected
number of outstanding connections. When this value is
exceeded, useful information will be logged and, depending
on the value of the raiseException arg,
ConcurrencyExceededError may be raised.
raiseException: If true, ConcurrencyExceededError will be raised when
maxConcurrency is exceeded.
"""
global g_max_concurrency, g_max_concurrency_raise_exception
assert maxConcurrency >= 0
g_max_concurrency = maxConcurrency
g_max_concurrency_raise_exception = raiseException
return | python | def enableConcurrencyChecks(maxConcurrency, raiseException=True):
""" Enable the diagnostic feature for debugging unexpected concurrency in
acquiring ConnectionWrapper instances.
NOTE: This MUST be done early in your application's execution, BEFORE any
accesses to ConnectionFactory or connection policies from your application
(including imports and sub-imports of your app).
Parameters:
----------------------------------------------------------------
maxConcurrency: A non-negative integer that represents the maximum expected
number of outstanding connections. When this value is
exceeded, useful information will be logged and, depending
on the value of the raiseException arg,
ConcurrencyExceededError may be raised.
raiseException: If true, ConcurrencyExceededError will be raised when
maxConcurrency is exceeded.
"""
global g_max_concurrency, g_max_concurrency_raise_exception
assert maxConcurrency >= 0
g_max_concurrency = maxConcurrency
g_max_concurrency_raise_exception = raiseException
return | [
"def",
"enableConcurrencyChecks",
"(",
"maxConcurrency",
",",
"raiseException",
"=",
"True",
")",
":",
"global",
"g_max_concurrency",
",",
"g_max_concurrency_raise_exception",
"assert",
"maxConcurrency",
">=",
"0",
"g_max_concurrency",
"=",
"maxConcurrency",
"g_max_concurrency_raise_exception",
"=",
"raiseException",
"return"
] | Enable the diagnostic feature for debugging unexpected concurrency in
acquiring ConnectionWrapper instances.
NOTE: This MUST be done early in your application's execution, BEFORE any
accesses to ConnectionFactory or connection policies from your application
(including imports and sub-imports of your app).
Parameters:
----------------------------------------------------------------
maxConcurrency: A non-negative integer that represents the maximum expected
number of outstanding connections. When this value is
exceeded, useful information will be logged and, depending
on the value of the raiseException arg,
ConcurrencyExceededError may be raised.
raiseException: If true, ConcurrencyExceededError will be raised when
maxConcurrency is exceeded. | [
"Enable",
"the",
"diagnostic",
"feature",
"for",
"debugging",
"unexpected",
"concurrency",
"in",
"acquiring",
"ConnectionWrapper",
"instances",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L59-L83 | valid |
numenta/nupic | src/nupic/database/connection.py | _getCommonSteadyDBArgsDict | def _getCommonSteadyDBArgsDict():
""" Returns a dictionary of arguments for DBUtils.SteadyDB.SteadyDBConnection
constructor.
"""
return dict(
creator = pymysql,
host = Configuration.get('nupic.cluster.database.host'),
port = int(Configuration.get('nupic.cluster.database.port')),
user = Configuration.get('nupic.cluster.database.user'),
passwd = Configuration.get('nupic.cluster.database.passwd'),
charset = 'utf8',
use_unicode = True,
setsession = ['SET AUTOCOMMIT = 1']) | python | def _getCommonSteadyDBArgsDict():
""" Returns a dictionary of arguments for DBUtils.SteadyDB.SteadyDBConnection
constructor.
"""
return dict(
creator = pymysql,
host = Configuration.get('nupic.cluster.database.host'),
port = int(Configuration.get('nupic.cluster.database.port')),
user = Configuration.get('nupic.cluster.database.user'),
passwd = Configuration.get('nupic.cluster.database.passwd'),
charset = 'utf8',
use_unicode = True,
setsession = ['SET AUTOCOMMIT = 1']) | [
"def",
"_getCommonSteadyDBArgsDict",
"(",
")",
":",
"return",
"dict",
"(",
"creator",
"=",
"pymysql",
",",
"host",
"=",
"Configuration",
".",
"get",
"(",
"'nupic.cluster.database.host'",
")",
",",
"port",
"=",
"int",
"(",
"Configuration",
".",
"get",
"(",
"'nupic.cluster.database.port'",
")",
")",
",",
"user",
"=",
"Configuration",
".",
"get",
"(",
"'nupic.cluster.database.user'",
")",
",",
"passwd",
"=",
"Configuration",
".",
"get",
"(",
"'nupic.cluster.database.passwd'",
")",
",",
"charset",
"=",
"'utf8'",
",",
"use_unicode",
"=",
"True",
",",
"setsession",
"=",
"[",
"'SET AUTOCOMMIT = 1'",
"]",
")"
] | Returns a dictionary of arguments for DBUtils.SteadyDB.SteadyDBConnection
constructor. | [
"Returns",
"a",
"dictionary",
"of",
"arguments",
"for",
"DBUtils",
".",
"SteadyDB",
".",
"SteadyDBConnection",
"constructor",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L643-L656 | valid |
numenta/nupic | src/nupic/database/connection.py | _getLogger | def _getLogger(cls, logLevel=None):
""" Gets a logger for the given class in this module
"""
logger = logging.getLogger(
".".join(['com.numenta', _MODULE_NAME, cls.__name__]))
if logLevel is not None:
logger.setLevel(logLevel)
return logger | python | def _getLogger(cls, logLevel=None):
""" Gets a logger for the given class in this module
"""
logger = logging.getLogger(
".".join(['com.numenta', _MODULE_NAME, cls.__name__]))
if logLevel is not None:
logger.setLevel(logLevel)
return logger | [
"def",
"_getLogger",
"(",
"cls",
",",
"logLevel",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"\".\"",
".",
"join",
"(",
"[",
"'com.numenta'",
",",
"_MODULE_NAME",
",",
"cls",
".",
"__name__",
"]",
")",
")",
"if",
"logLevel",
"is",
"not",
"None",
":",
"logger",
".",
"setLevel",
"(",
"logLevel",
")",
"return",
"logger"
] | Gets a logger for the given class in this module | [
"Gets",
"a",
"logger",
"for",
"the",
"given",
"class",
"in",
"this",
"module"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L660-L669 | valid |
numenta/nupic | src/nupic/database/connection.py | ConnectionFactory.get | def get(cls):
""" Acquire a ConnectionWrapper instance that represents a connection
to the SQL server per nupic.cluster.database.* configuration settings.
NOTE: caller is responsible for calling the ConnectionWrapper instance's
release() method after using the connection in order to release resources.
Better yet, use the returned ConnectionWrapper instance in a Context Manager
statement for automatic invocation of release():
Example:
# If using Jython 2.5.x, first import with_statement at the very top of
your script (don't need this import for Jython/Python 2.6.x and later):
from __future__ import with_statement
# Then:
from nupic.database.Connection import ConnectionFactory
# Then use it like this
with ConnectionFactory.get() as conn:
conn.cursor.execute("SELECT ...")
conn.cursor.fetchall()
conn.cursor.execute("INSERT ...")
WARNING: DO NOT close the underlying connection or cursor as it may be
shared by other modules in your process. ConnectionWrapper's release()
method will do the right thing.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller is responsible
for releasing resources as described above.
"""
if cls._connectionPolicy is None:
logger = _getLogger(cls)
logger.info("Creating db connection policy via provider %r",
cls._connectionPolicyInstanceProvider)
cls._connectionPolicy = cls._connectionPolicyInstanceProvider()
logger.debug("Created connection policy: %r", cls._connectionPolicy)
return cls._connectionPolicy.acquireConnection() | python | def get(cls):
""" Acquire a ConnectionWrapper instance that represents a connection
to the SQL server per nupic.cluster.database.* configuration settings.
NOTE: caller is responsible for calling the ConnectionWrapper instance's
release() method after using the connection in order to release resources.
Better yet, use the returned ConnectionWrapper instance in a Context Manager
statement for automatic invocation of release():
Example:
# If using Jython 2.5.x, first import with_statement at the very top of
your script (don't need this import for Jython/Python 2.6.x and later):
from __future__ import with_statement
# Then:
from nupic.database.Connection import ConnectionFactory
# Then use it like this
with ConnectionFactory.get() as conn:
conn.cursor.execute("SELECT ...")
conn.cursor.fetchall()
conn.cursor.execute("INSERT ...")
WARNING: DO NOT close the underlying connection or cursor as it may be
shared by other modules in your process. ConnectionWrapper's release()
method will do the right thing.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller is responsible
for releasing resources as described above.
"""
if cls._connectionPolicy is None:
logger = _getLogger(cls)
logger.info("Creating db connection policy via provider %r",
cls._connectionPolicyInstanceProvider)
cls._connectionPolicy = cls._connectionPolicyInstanceProvider()
logger.debug("Created connection policy: %r", cls._connectionPolicy)
return cls._connectionPolicy.acquireConnection() | [
"def",
"get",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"_connectionPolicy",
"is",
"None",
":",
"logger",
"=",
"_getLogger",
"(",
"cls",
")",
"logger",
".",
"info",
"(",
"\"Creating db connection policy via provider %r\"",
",",
"cls",
".",
"_connectionPolicyInstanceProvider",
")",
"cls",
".",
"_connectionPolicy",
"=",
"cls",
".",
"_connectionPolicyInstanceProvider",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Created connection policy: %r\"",
",",
"cls",
".",
"_connectionPolicy",
")",
"return",
"cls",
".",
"_connectionPolicy",
".",
"acquireConnection",
"(",
")"
] | Acquire a ConnectionWrapper instance that represents a connection
to the SQL server per nupic.cluster.database.* configuration settings.
NOTE: caller is responsible for calling the ConnectionWrapper instance's
release() method after using the connection in order to release resources.
Better yet, use the returned ConnectionWrapper instance in a Context Manager
statement for automatic invocation of release():
Example:
# If using Jython 2.5.x, first import with_statement at the very top of
your script (don't need this import for Jython/Python 2.6.x and later):
from __future__ import with_statement
# Then:
from nupic.database.Connection import ConnectionFactory
# Then use it like this
with ConnectionFactory.get() as conn:
conn.cursor.execute("SELECT ...")
conn.cursor.fetchall()
conn.cursor.execute("INSERT ...")
WARNING: DO NOT close the underlying connection or cursor as it may be
shared by other modules in your process. ConnectionWrapper's release()
method will do the right thing.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller is responsible
for releasing resources as described above. | [
"Acquire",
"a",
"ConnectionWrapper",
"instance",
"that",
"represents",
"a",
"connection",
"to",
"the",
"SQL",
"server",
"per",
"nupic",
".",
"cluster",
".",
"database",
".",
"*",
"configuration",
"settings",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L130-L167 | valid |
numenta/nupic | src/nupic/database/connection.py | ConnectionFactory._createDefaultPolicy | def _createDefaultPolicy(cls):
""" [private] Create the default database connection policy instance
Parameters:
----------------------------------------------------------------
retval: The default database connection policy instance
"""
logger = _getLogger(cls)
logger.debug(
"Creating database connection policy: platform=%r; pymysql.VERSION=%r",
platform.system(), pymysql.VERSION)
if platform.system() == "Java":
# NOTE: PooledDB doesn't seem to work under Jython
# NOTE: not appropriate for multi-threaded applications.
# TODO: this was fixed in Webware DBUtils r8228, so once
# we pick up a realease with this fix, we should use
# PooledConnectionPolicy for both Jython and Python.
policy = SingleSharedConnectionPolicy()
else:
policy = PooledConnectionPolicy()
return policy | python | def _createDefaultPolicy(cls):
""" [private] Create the default database connection policy instance
Parameters:
----------------------------------------------------------------
retval: The default database connection policy instance
"""
logger = _getLogger(cls)
logger.debug(
"Creating database connection policy: platform=%r; pymysql.VERSION=%r",
platform.system(), pymysql.VERSION)
if platform.system() == "Java":
# NOTE: PooledDB doesn't seem to work under Jython
# NOTE: not appropriate for multi-threaded applications.
# TODO: this was fixed in Webware DBUtils r8228, so once
# we pick up a realease with this fix, we should use
# PooledConnectionPolicy for both Jython and Python.
policy = SingleSharedConnectionPolicy()
else:
policy = PooledConnectionPolicy()
return policy | [
"def",
"_createDefaultPolicy",
"(",
"cls",
")",
":",
"logger",
"=",
"_getLogger",
"(",
"cls",
")",
"logger",
".",
"debug",
"(",
"\"Creating database connection policy: platform=%r; pymysql.VERSION=%r\"",
",",
"platform",
".",
"system",
"(",
")",
",",
"pymysql",
".",
"VERSION",
")",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"\"Java\"",
":",
"# NOTE: PooledDB doesn't seem to work under Jython",
"# NOTE: not appropriate for multi-threaded applications.",
"# TODO: this was fixed in Webware DBUtils r8228, so once",
"# we pick up a realease with this fix, we should use",
"# PooledConnectionPolicy for both Jython and Python.",
"policy",
"=",
"SingleSharedConnectionPolicy",
"(",
")",
"else",
":",
"policy",
"=",
"PooledConnectionPolicy",
"(",
")",
"return",
"policy"
] | [private] Create the default database connection policy instance
Parameters:
----------------------------------------------------------------
retval: The default database connection policy instance | [
"[",
"private",
"]",
"Create",
"the",
"default",
"database",
"connection",
"policy",
"instance"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L212-L235 | valid |
numenta/nupic | src/nupic/database/connection.py | ConnectionWrapper.release | def release(self):
""" Release the database connection and cursor
The receiver of the Connection instance MUST call this method in order
to reclaim resources
"""
self._logger.debug("Releasing: %r", self)
# Discard self from set of outstanding instances
if self._addedToInstanceSet:
try:
self._clsOutstandingInstances.remove(self)
except:
self._logger.exception(
"Failed to remove self from _clsOutstandingInstances: %r;", self)
raise
self._releaser(dbConn=self.dbConn, cursor=self.cursor)
self.__class__._clsNumOutstanding -= 1
assert self._clsNumOutstanding >= 0, \
"_clsNumOutstanding=%r" % (self._clsNumOutstanding,)
self._releaser = None
self.cursor = None
self.dbConn = None
self._creationTracebackString = None
self._addedToInstanceSet = False
self._logger = None
return | python | def release(self):
""" Release the database connection and cursor
The receiver of the Connection instance MUST call this method in order
to reclaim resources
"""
self._logger.debug("Releasing: %r", self)
# Discard self from set of outstanding instances
if self._addedToInstanceSet:
try:
self._clsOutstandingInstances.remove(self)
except:
self._logger.exception(
"Failed to remove self from _clsOutstandingInstances: %r;", self)
raise
self._releaser(dbConn=self.dbConn, cursor=self.cursor)
self.__class__._clsNumOutstanding -= 1
assert self._clsNumOutstanding >= 0, \
"_clsNumOutstanding=%r" % (self._clsNumOutstanding,)
self._releaser = None
self.cursor = None
self.dbConn = None
self._creationTracebackString = None
self._addedToInstanceSet = False
self._logger = None
return | [
"def",
"release",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Releasing: %r\"",
",",
"self",
")",
"# Discard self from set of outstanding instances",
"if",
"self",
".",
"_addedToInstanceSet",
":",
"try",
":",
"self",
".",
"_clsOutstandingInstances",
".",
"remove",
"(",
"self",
")",
"except",
":",
"self",
".",
"_logger",
".",
"exception",
"(",
"\"Failed to remove self from _clsOutstandingInstances: %r;\"",
",",
"self",
")",
"raise",
"self",
".",
"_releaser",
"(",
"dbConn",
"=",
"self",
".",
"dbConn",
",",
"cursor",
"=",
"self",
".",
"cursor",
")",
"self",
".",
"__class__",
".",
"_clsNumOutstanding",
"-=",
"1",
"assert",
"self",
".",
"_clsNumOutstanding",
">=",
"0",
",",
"\"_clsNumOutstanding=%r\"",
"%",
"(",
"self",
".",
"_clsNumOutstanding",
",",
")",
"self",
".",
"_releaser",
"=",
"None",
"self",
".",
"cursor",
"=",
"None",
"self",
".",
"dbConn",
"=",
"None",
"self",
".",
"_creationTracebackString",
"=",
"None",
"self",
".",
"_addedToInstanceSet",
"=",
"False",
"self",
".",
"_logger",
"=",
"None",
"return"
] | Release the database connection and cursor
The receiver of the Connection instance MUST call this method in order
to reclaim resources | [
"Release",
"the",
"database",
"connection",
"and",
"cursor"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L340-L370 | valid |
numenta/nupic | src/nupic/database/connection.py | ConnectionWrapper._trackInstanceAndCheckForConcurrencyViolation | def _trackInstanceAndCheckForConcurrencyViolation(self):
""" Check for concurrency violation and add self to
_clsOutstandingInstances.
ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is
incremented
"""
global g_max_concurrency, g_max_concurrency_raise_exception
assert g_max_concurrency is not None
assert self not in self._clsOutstandingInstances, repr(self)
# Populate diagnostic info
self._creationTracebackString = traceback.format_stack()
# Check for concurrency violation
if self._clsNumOutstanding >= g_max_concurrency:
# NOTE: It's possible for _clsNumOutstanding to be greater than
# len(_clsOutstandingInstances) if concurrency check was enabled after
# unrelease allocations.
errorMsg = ("With numOutstanding=%r, exceeded concurrency limit=%r "
"when requesting %r. OTHER TRACKED UNRELEASED "
"INSTANCES (%s): %r") % (
self._clsNumOutstanding, g_max_concurrency, self,
len(self._clsOutstandingInstances), self._clsOutstandingInstances,)
self._logger.error(errorMsg)
if g_max_concurrency_raise_exception:
raise ConcurrencyExceededError(errorMsg)
# Add self to tracked instance set
self._clsOutstandingInstances.add(self)
self._addedToInstanceSet = True
return | python | def _trackInstanceAndCheckForConcurrencyViolation(self):
""" Check for concurrency violation and add self to
_clsOutstandingInstances.
ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is
incremented
"""
global g_max_concurrency, g_max_concurrency_raise_exception
assert g_max_concurrency is not None
assert self not in self._clsOutstandingInstances, repr(self)
# Populate diagnostic info
self._creationTracebackString = traceback.format_stack()
# Check for concurrency violation
if self._clsNumOutstanding >= g_max_concurrency:
# NOTE: It's possible for _clsNumOutstanding to be greater than
# len(_clsOutstandingInstances) if concurrency check was enabled after
# unrelease allocations.
errorMsg = ("With numOutstanding=%r, exceeded concurrency limit=%r "
"when requesting %r. OTHER TRACKED UNRELEASED "
"INSTANCES (%s): %r") % (
self._clsNumOutstanding, g_max_concurrency, self,
len(self._clsOutstandingInstances), self._clsOutstandingInstances,)
self._logger.error(errorMsg)
if g_max_concurrency_raise_exception:
raise ConcurrencyExceededError(errorMsg)
# Add self to tracked instance set
self._clsOutstandingInstances.add(self)
self._addedToInstanceSet = True
return | [
"def",
"_trackInstanceAndCheckForConcurrencyViolation",
"(",
"self",
")",
":",
"global",
"g_max_concurrency",
",",
"g_max_concurrency_raise_exception",
"assert",
"g_max_concurrency",
"is",
"not",
"None",
"assert",
"self",
"not",
"in",
"self",
".",
"_clsOutstandingInstances",
",",
"repr",
"(",
"self",
")",
"# Populate diagnostic info",
"self",
".",
"_creationTracebackString",
"=",
"traceback",
".",
"format_stack",
"(",
")",
"# Check for concurrency violation",
"if",
"self",
".",
"_clsNumOutstanding",
">=",
"g_max_concurrency",
":",
"# NOTE: It's possible for _clsNumOutstanding to be greater than",
"# len(_clsOutstandingInstances) if concurrency check was enabled after",
"# unrelease allocations.",
"errorMsg",
"=",
"(",
"\"With numOutstanding=%r, exceeded concurrency limit=%r \"",
"\"when requesting %r. OTHER TRACKED UNRELEASED \"",
"\"INSTANCES (%s): %r\"",
")",
"%",
"(",
"self",
".",
"_clsNumOutstanding",
",",
"g_max_concurrency",
",",
"self",
",",
"len",
"(",
"self",
".",
"_clsOutstandingInstances",
")",
",",
"self",
".",
"_clsOutstandingInstances",
",",
")",
"self",
".",
"_logger",
".",
"error",
"(",
"errorMsg",
")",
"if",
"g_max_concurrency_raise_exception",
":",
"raise",
"ConcurrencyExceededError",
"(",
"errorMsg",
")",
"# Add self to tracked instance set",
"self",
".",
"_clsOutstandingInstances",
".",
"add",
"(",
"self",
")",
"self",
".",
"_addedToInstanceSet",
"=",
"True",
"return"
] | Check for concurrency violation and add self to
_clsOutstandingInstances.
ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is
incremented | [
"Check",
"for",
"concurrency",
"violation",
"and",
"add",
"self",
"to",
"_clsOutstandingInstances",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L373-L409 | valid |
numenta/nupic | src/nupic/database/connection.py | SingleSharedConnectionPolicy.close | def close(self):
""" Close the policy instance and its shared database connection. """
self._logger.info("Closing")
if self._conn is not None:
self._conn.close()
self._conn = None
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return | python | def close(self):
""" Close the policy instance and its shared database connection. """
self._logger.info("Closing")
if self._conn is not None:
self._conn.close()
self._conn = None
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Closing\"",
")",
"if",
"self",
".",
"_conn",
"is",
"not",
"None",
":",
"self",
".",
"_conn",
".",
"close",
"(",
")",
"self",
".",
"_conn",
"=",
"None",
"else",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"close() called, but connection policy was alredy closed\"",
")",
"return"
] | Close the policy instance and its shared database connection. | [
"Close",
"the",
"policy",
"instance",
"and",
"its",
"shared",
"database",
"connection",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L458-L467 | valid |
numenta/nupic | src/nupic/database/connection.py | SingleSharedConnectionPolicy.acquireConnection | def acquireConnection(self):
""" Get a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
# Check connection and attempt to re-establish it if it died (this is
# what PooledDB does)
self._conn._ping_check()
connWrap = ConnectionWrapper(dbConn=self._conn,
cursor=self._conn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap | python | def acquireConnection(self):
""" Get a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
# Check connection and attempt to re-establish it if it died (this is
# what PooledDB does)
self._conn._ping_check()
connWrap = ConnectionWrapper(dbConn=self._conn,
cursor=self._conn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap | [
"def",
"acquireConnection",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Acquiring connection\"",
")",
"# Check connection and attempt to re-establish it if it died (this is",
"# what PooledDB does)",
"self",
".",
"_conn",
".",
"_ping_check",
"(",
")",
"connWrap",
"=",
"ConnectionWrapper",
"(",
"dbConn",
"=",
"self",
".",
"_conn",
",",
"cursor",
"=",
"self",
".",
"_conn",
".",
"cursor",
"(",
")",
",",
"releaser",
"=",
"self",
".",
"_releaseConnection",
",",
"logger",
"=",
"self",
".",
"_logger",
")",
"return",
"connWrap"
] | Get a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources. | [
"Get",
"a",
"Connection",
"instance",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L470-L489 | valid |
numenta/nupic | src/nupic/database/connection.py | PooledConnectionPolicy.close | def close(self):
""" Close the policy instance and its database connection pool. """
self._logger.info("Closing")
if self._pool is not None:
self._pool.close()
self._pool = None
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return | python | def close(self):
""" Close the policy instance and its database connection pool. """
self._logger.info("Closing")
if self._pool is not None:
self._pool.close()
self._pool = None
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Closing\"",
")",
"if",
"self",
".",
"_pool",
"is",
"not",
"None",
":",
"self",
".",
"_pool",
".",
"close",
"(",
")",
"self",
".",
"_pool",
"=",
"None",
"else",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"close() called, but connection policy was alredy closed\"",
")",
"return"
] | Close the policy instance and its database connection pool. | [
"Close",
"the",
"policy",
"instance",
"and",
"its",
"database",
"connection",
"pool",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L528-L538 | valid |
numenta/nupic | src/nupic/database/connection.py | PooledConnectionPolicy.acquireConnection | def acquireConnection(self):
""" Get a connection from the pool.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
dbConn = self._pool.connection(shareable=False)
connWrap = ConnectionWrapper(dbConn=dbConn,
cursor=dbConn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap | python | def acquireConnection(self):
""" Get a connection from the pool.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
dbConn = self._pool.connection(shareable=False)
connWrap = ConnectionWrapper(dbConn=dbConn,
cursor=dbConn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap | [
"def",
"acquireConnection",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Acquiring connection\"",
")",
"dbConn",
"=",
"self",
".",
"_pool",
".",
"connection",
"(",
"shareable",
"=",
"False",
")",
"connWrap",
"=",
"ConnectionWrapper",
"(",
"dbConn",
"=",
"dbConn",
",",
"cursor",
"=",
"dbConn",
".",
"cursor",
"(",
")",
",",
"releaser",
"=",
"self",
".",
"_releaseConnection",
",",
"logger",
"=",
"self",
".",
"_logger",
")",
"return",
"connWrap"
] | Get a connection from the pool.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources. | [
"Get",
"a",
"connection",
"from",
"the",
"pool",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L541-L558 | valid |
numenta/nupic | src/nupic/database/connection.py | PerTransactionConnectionPolicy.close | def close(self):
""" Close the policy instance. """
self._logger.info("Closing")
if self._opened:
self._opened = False
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return | python | def close(self):
""" Close the policy instance. """
self._logger.info("Closing")
if self._opened:
self._opened = False
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Closing\"",
")",
"if",
"self",
".",
"_opened",
":",
"self",
".",
"_opened",
"=",
"False",
"else",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"close() called, but connection policy was alredy closed\"",
")",
"return"
] | Close the policy instance. | [
"Close",
"the",
"policy",
"instance",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L595-L605 | valid |
numenta/nupic | src/nupic/database/connection.py | PerTransactionConnectionPolicy.acquireConnection | def acquireConnection(self):
""" Create a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
dbConn = SteadyDB.connect(** _getCommonSteadyDBArgsDict())
connWrap = ConnectionWrapper(dbConn=dbConn,
cursor=dbConn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap | python | def acquireConnection(self):
""" Create a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
dbConn = SteadyDB.connect(** _getCommonSteadyDBArgsDict())
connWrap = ConnectionWrapper(dbConn=dbConn,
cursor=dbConn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap | [
"def",
"acquireConnection",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Acquiring connection\"",
")",
"dbConn",
"=",
"SteadyDB",
".",
"connect",
"(",
"*",
"*",
"_getCommonSteadyDBArgsDict",
"(",
")",
")",
"connWrap",
"=",
"ConnectionWrapper",
"(",
"dbConn",
"=",
"dbConn",
",",
"cursor",
"=",
"dbConn",
".",
"cursor",
"(",
")",
",",
"releaser",
"=",
"self",
".",
"_releaseConnection",
",",
"logger",
"=",
"self",
".",
"_logger",
")",
"return",
"connWrap"
] | Create a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources. | [
"Create",
"a",
"Connection",
"instance",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L608-L625 | valid |
numenta/nupic | src/nupic/database/connection.py | PerTransactionConnectionPolicy._releaseConnection | def _releaseConnection(self, dbConn, cursor):
""" Release database connection and cursor; passed as a callback to
ConnectionWrapper
"""
self._logger.debug("Releasing connection")
# Close the cursor
cursor.close()
# ... then close the database connection
dbConn.close()
return | python | def _releaseConnection(self, dbConn, cursor):
""" Release database connection and cursor; passed as a callback to
ConnectionWrapper
"""
self._logger.debug("Releasing connection")
# Close the cursor
cursor.close()
# ... then close the database connection
dbConn.close()
return | [
"def",
"_releaseConnection",
"(",
"self",
",",
"dbConn",
",",
"cursor",
")",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Releasing connection\"",
")",
"# Close the cursor",
"cursor",
".",
"close",
"(",
")",
"# ... then close the database connection",
"dbConn",
".",
"close",
"(",
")",
"return"
] | Release database connection and cursor; passed as a callback to
ConnectionWrapper | [
"Release",
"database",
"connection",
"and",
"cursor",
";",
"passed",
"as",
"a",
"callback",
"to",
"ConnectionWrapper"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L628-L639 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion.getSpec | def getSpec(cls):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
"""
ns = dict(
description=KNNAnomalyClassifierRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
spBottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
tpTopDownOut=dict(
description="""The top-down inputsignal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
tpLrnActiveStateT=dict(
description="""Active cells in the learn state at time T from TM.
This is used to classify on.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
sequenceIdIn=dict(
description="Sequence ID",
dataType='UInt64',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
),
parameters=dict(
trainRecords=dict(
description='Number of records to wait for training',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
anomalyThreshold=dict(
description='Threshold used to classify anomalies.',
dataType='Real32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
cacheSize=dict(
description='Number of records to store in cache.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
classificationVectorType=dict(
description="""Vector type to use when classifying.
1 - Vector Column with Difference (TM and SP)
""",
dataType='UInt32',
count=1,
constraints='',
defaultValue=1,
accessMode='ReadWrite'),
activeColumnCount=dict(
description="""Number of active columns in a given step. Typically
equivalent to SP.numActiveColumnsPerInhArea""",
dataType='UInt32',
count=1,
constraints='',
defaultValue=40,
accessMode='ReadWrite'),
classificationMaxDist=dict(
description="""Maximum distance a sample can be from an anomaly
in the classifier to be labeled as an anomaly.
Ex: With rawOverlap distance, a value of 0.65 means that the points
must be at most a distance 0.65 apart from each other. This
translates to they must be at least 35% similar.""",
dataType='Real32',
count=1,
constraints='',
defaultValue=0.65,
accessMode='Create'
)
),
commands=dict(
getLabels=dict(description=
"Returns a list of label dicts with properties ROWID and labels."
"ROWID corresponds to the records id and labels is a list of "
"strings representing the records labels. Takes additional "
"integer properties start and end representing the range that "
"will be returned."),
addLabel=dict(description=
"Takes parameters start, end and labelName. Adds the label "
"labelName to the records from start to end. This will recalculate "
"labels from end to the most recent record."),
removeLabels=dict(description=
"Takes additional parameters start, end, labelFilter. Start and "
"end correspond to range to remove the label. Remove labels from "
"each record with record ROWID in range from start to end, "
"noninclusive of end. Removes all records if labelFilter is None, "
"otherwise only removes the labels eqaul to labelFilter.")
)
)
ns['parameters'].update(KNNClassifierRegion.getSpec()['parameters'])
return ns | python | def getSpec(cls):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
"""
ns = dict(
description=KNNAnomalyClassifierRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
spBottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
tpTopDownOut=dict(
description="""The top-down inputsignal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
tpLrnActiveStateT=dict(
description="""Active cells in the learn state at time T from TM.
This is used to classify on.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
sequenceIdIn=dict(
description="Sequence ID",
dataType='UInt64',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
),
parameters=dict(
trainRecords=dict(
description='Number of records to wait for training',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
anomalyThreshold=dict(
description='Threshold used to classify anomalies.',
dataType='Real32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
cacheSize=dict(
description='Number of records to store in cache.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
classificationVectorType=dict(
description="""Vector type to use when classifying.
1 - Vector Column with Difference (TM and SP)
""",
dataType='UInt32',
count=1,
constraints='',
defaultValue=1,
accessMode='ReadWrite'),
activeColumnCount=dict(
description="""Number of active columns in a given step. Typically
equivalent to SP.numActiveColumnsPerInhArea""",
dataType='UInt32',
count=1,
constraints='',
defaultValue=40,
accessMode='ReadWrite'),
classificationMaxDist=dict(
description="""Maximum distance a sample can be from an anomaly
in the classifier to be labeled as an anomaly.
Ex: With rawOverlap distance, a value of 0.65 means that the points
must be at most a distance 0.65 apart from each other. This
translates to they must be at least 35% similar.""",
dataType='Real32',
count=1,
constraints='',
defaultValue=0.65,
accessMode='Create'
)
),
commands=dict(
getLabels=dict(description=
"Returns a list of label dicts with properties ROWID and labels."
"ROWID corresponds to the records id and labels is a list of "
"strings representing the records labels. Takes additional "
"integer properties start and end representing the range that "
"will be returned."),
addLabel=dict(description=
"Takes parameters start, end and labelName. Adds the label "
"labelName to the records from start to end. This will recalculate "
"labels from end to the most recent record."),
removeLabels=dict(description=
"Takes additional parameters start, end, labelFilter. Start and "
"end correspond to range to remove the label. Remove labels from "
"each record with record ROWID in range from start to end, "
"noninclusive of end. Removes all records if labelFilter is None, "
"otherwise only removes the labels eqaul to labelFilter.")
)
)
ns['parameters'].update(KNNClassifierRegion.getSpec()['parameters'])
return ns | [
"def",
"getSpec",
"(",
"cls",
")",
":",
"ns",
"=",
"dict",
"(",
"description",
"=",
"KNNAnomalyClassifierRegion",
".",
"__doc__",
",",
"singleNodeOnly",
"=",
"True",
",",
"inputs",
"=",
"dict",
"(",
"spBottomUpOut",
"=",
"dict",
"(",
"description",
"=",
"\"\"\"The output signal generated from the bottom-up inputs\n from lower levels.\"\"\"",
",",
"dataType",
"=",
"'Real32'",
",",
"count",
"=",
"0",
",",
"required",
"=",
"True",
",",
"regionLevel",
"=",
"False",
",",
"isDefaultInput",
"=",
"True",
",",
"requireSplitterMap",
"=",
"False",
")",
",",
"tpTopDownOut",
"=",
"dict",
"(",
"description",
"=",
"\"\"\"The top-down inputsignal, generated from\n feedback from upper levels\"\"\"",
",",
"dataType",
"=",
"'Real32'",
",",
"count",
"=",
"0",
",",
"required",
"=",
"True",
",",
"regionLevel",
"=",
"False",
",",
"isDefaultInput",
"=",
"True",
",",
"requireSplitterMap",
"=",
"False",
")",
",",
"tpLrnActiveStateT",
"=",
"dict",
"(",
"description",
"=",
"\"\"\"Active cells in the learn state at time T from TM.\n This is used to classify on.\"\"\"",
",",
"dataType",
"=",
"'Real32'",
",",
"count",
"=",
"0",
",",
"required",
"=",
"True",
",",
"regionLevel",
"=",
"False",
",",
"isDefaultInput",
"=",
"True",
",",
"requireSplitterMap",
"=",
"False",
")",
",",
"sequenceIdIn",
"=",
"dict",
"(",
"description",
"=",
"\"Sequence ID\"",
",",
"dataType",
"=",
"'UInt64'",
",",
"count",
"=",
"1",
",",
"required",
"=",
"False",
",",
"regionLevel",
"=",
"True",
",",
"isDefaultInput",
"=",
"False",
",",
"requireSplitterMap",
"=",
"False",
")",
",",
")",
",",
"outputs",
"=",
"dict",
"(",
")",
",",
"parameters",
"=",
"dict",
"(",
"trainRecords",
"=",
"dict",
"(",
"description",
"=",
"'Number of records to wait for training'",
",",
"dataType",
"=",
"'UInt32'",
",",
"count",
"=",
"1",
",",
"constraints",
"=",
"''",
",",
"defaultValue",
"=",
"0",
",",
"accessMode",
"=",
"'Create'",
")",
",",
"anomalyThreshold",
"=",
"dict",
"(",
"description",
"=",
"'Threshold used to classify anomalies.'",
",",
"dataType",
"=",
"'Real32'",
",",
"count",
"=",
"1",
",",
"constraints",
"=",
"''",
",",
"defaultValue",
"=",
"0",
",",
"accessMode",
"=",
"'Create'",
")",
",",
"cacheSize",
"=",
"dict",
"(",
"description",
"=",
"'Number of records to store in cache.'",
",",
"dataType",
"=",
"'UInt32'",
",",
"count",
"=",
"1",
",",
"constraints",
"=",
"''",
",",
"defaultValue",
"=",
"0",
",",
"accessMode",
"=",
"'Create'",
")",
",",
"classificationVectorType",
"=",
"dict",
"(",
"description",
"=",
"\"\"\"Vector type to use when classifying.\n 1 - Vector Column with Difference (TM and SP)\n \"\"\"",
",",
"dataType",
"=",
"'UInt32'",
",",
"count",
"=",
"1",
",",
"constraints",
"=",
"''",
",",
"defaultValue",
"=",
"1",
",",
"accessMode",
"=",
"'ReadWrite'",
")",
",",
"activeColumnCount",
"=",
"dict",
"(",
"description",
"=",
"\"\"\"Number of active columns in a given step. Typically\n equivalent to SP.numActiveColumnsPerInhArea\"\"\"",
",",
"dataType",
"=",
"'UInt32'",
",",
"count",
"=",
"1",
",",
"constraints",
"=",
"''",
",",
"defaultValue",
"=",
"40",
",",
"accessMode",
"=",
"'ReadWrite'",
")",
",",
"classificationMaxDist",
"=",
"dict",
"(",
"description",
"=",
"\"\"\"Maximum distance a sample can be from an anomaly\n in the classifier to be labeled as an anomaly.\n\n Ex: With rawOverlap distance, a value of 0.65 means that the points\n must be at most a distance 0.65 apart from each other. This\n translates to they must be at least 35% similar.\"\"\"",
",",
"dataType",
"=",
"'Real32'",
",",
"count",
"=",
"1",
",",
"constraints",
"=",
"''",
",",
"defaultValue",
"=",
"0.65",
",",
"accessMode",
"=",
"'Create'",
")",
")",
",",
"commands",
"=",
"dict",
"(",
"getLabels",
"=",
"dict",
"(",
"description",
"=",
"\"Returns a list of label dicts with properties ROWID and labels.\"",
"\"ROWID corresponds to the records id and labels is a list of \"",
"\"strings representing the records labels. Takes additional \"",
"\"integer properties start and end representing the range that \"",
"\"will be returned.\"",
")",
",",
"addLabel",
"=",
"dict",
"(",
"description",
"=",
"\"Takes parameters start, end and labelName. Adds the label \"",
"\"labelName to the records from start to end. This will recalculate \"",
"\"labels from end to the most recent record.\"",
")",
",",
"removeLabels",
"=",
"dict",
"(",
"description",
"=",
"\"Takes additional parameters start, end, labelFilter. Start and \"",
"\"end correspond to range to remove the label. Remove labels from \"",
"\"each record with record ROWID in range from start to end, \"",
"\"noninclusive of end. Removes all records if labelFilter is None, \"",
"\"otherwise only removes the labels eqaul to labelFilter.\"",
")",
")",
")",
"ns",
"[",
"'parameters'",
"]",
".",
"update",
"(",
"KNNClassifierRegion",
".",
"getSpec",
"(",
")",
"[",
"'parameters'",
"]",
")",
"return",
"ns"
] | Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`. | [
"Overrides",
":",
"meth",
":",
"nupic",
".",
"bindings",
".",
"regions",
".",
"PyRegion",
".",
"PyRegion",
".",
"getSpec",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L79-L213 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion.getParameter | def getParameter(self, name, index=-1):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.
"""
if name == "trainRecords":
return self.trainRecords
elif name == "anomalyThreshold":
return self.anomalyThreshold
elif name == "activeColumnCount":
return self._activeColumnCount
elif name == "classificationMaxDist":
return self._classificationMaxDist
else:
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index) | python | def getParameter(self, name, index=-1):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.
"""
if name == "trainRecords":
return self.trainRecords
elif name == "anomalyThreshold":
return self.anomalyThreshold
elif name == "activeColumnCount":
return self._activeColumnCount
elif name == "classificationMaxDist":
return self._classificationMaxDist
else:
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index) | [
"def",
"getParameter",
"(",
"self",
",",
"name",
",",
"index",
"=",
"-",
"1",
")",
":",
"if",
"name",
"==",
"\"trainRecords\"",
":",
"return",
"self",
".",
"trainRecords",
"elif",
"name",
"==",
"\"anomalyThreshold\"",
":",
"return",
"self",
".",
"anomalyThreshold",
"elif",
"name",
"==",
"\"activeColumnCount\"",
":",
"return",
"self",
".",
"_activeColumnCount",
"elif",
"name",
"==",
"\"classificationMaxDist\"",
":",
"return",
"self",
".",
"_classificationMaxDist",
"else",
":",
"# If any spec parameter name is the same as an attribute, this call",
"# will get it automatically, e.g. self.learningMode",
"return",
"PyRegion",
".",
"getParameter",
"(",
"self",
",",
"name",
",",
"index",
")"
] | Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`. | [
"Overrides",
":",
"meth",
":",
"nupic",
".",
"bindings",
".",
"regions",
".",
"PyRegion",
".",
"PyRegion",
".",
"getParameter",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L260-L275 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion.setParameter | def setParameter(self, name, index, value):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
"""
if name == "trainRecords":
# Ensure that the trainRecords can only be set to minimum of the ROWID in
# the saved states
if not (isinstance(value, float) or isinstance(value, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(value)))
if len(self._recordsCache) > 0 and value < self._recordsCache[0].ROWID:
raise HTMPredictionModelInvalidArgument("Invalid value. autoDetectWaitRecord "
"value must be valid record within output stream. Current minimum "
" ROWID in output stream is %d." % (self._recordsCache[0].ROWID))
self.trainRecords = value
# Remove any labels before the first cached record (wont be used anymore)
self._deleteRangeFromKNN(0, self._recordsCache[0].ROWID)
# Reclassify all states
self._classifyStates()
elif name == "anomalyThreshold":
if not (isinstance(value, float) or isinstance(value, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(value)))
self.anomalyThreshold = value
self._classifyStates()
elif name == "classificationMaxDist":
if not (isinstance(value, float) or isinstance(value, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. "
"classificationMaxDist must be a number." % (type(value)))
self._classificationMaxDist = value
self._classifyStates()
elif name == "activeColumnCount":
self._activeColumnCount = value
else:
return PyRegion.setParameter(self, name, index, value) | python | def setParameter(self, name, index, value):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
"""
if name == "trainRecords":
# Ensure that the trainRecords can only be set to minimum of the ROWID in
# the saved states
if not (isinstance(value, float) or isinstance(value, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(value)))
if len(self._recordsCache) > 0 and value < self._recordsCache[0].ROWID:
raise HTMPredictionModelInvalidArgument("Invalid value. autoDetectWaitRecord "
"value must be valid record within output stream. Current minimum "
" ROWID in output stream is %d." % (self._recordsCache[0].ROWID))
self.trainRecords = value
# Remove any labels before the first cached record (wont be used anymore)
self._deleteRangeFromKNN(0, self._recordsCache[0].ROWID)
# Reclassify all states
self._classifyStates()
elif name == "anomalyThreshold":
if not (isinstance(value, float) or isinstance(value, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(value)))
self.anomalyThreshold = value
self._classifyStates()
elif name == "classificationMaxDist":
if not (isinstance(value, float) or isinstance(value, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. "
"classificationMaxDist must be a number." % (type(value)))
self._classificationMaxDist = value
self._classifyStates()
elif name == "activeColumnCount":
self._activeColumnCount = value
else:
return PyRegion.setParameter(self, name, index, value) | [
"def",
"setParameter",
"(",
"self",
",",
"name",
",",
"index",
",",
"value",
")",
":",
"if",
"name",
"==",
"\"trainRecords\"",
":",
"# Ensure that the trainRecords can only be set to minimum of the ROWID in",
"# the saved states",
"if",
"not",
"(",
"isinstance",
"(",
"value",
",",
"float",
")",
"or",
"isinstance",
"(",
"value",
",",
"int",
")",
")",
":",
"raise",
"HTMPredictionModelInvalidArgument",
"(",
"\"Invalid argument type \\'%s\\'. threshold \"",
"\"must be a number.\"",
"%",
"(",
"type",
"(",
"value",
")",
")",
")",
"if",
"len",
"(",
"self",
".",
"_recordsCache",
")",
">",
"0",
"and",
"value",
"<",
"self",
".",
"_recordsCache",
"[",
"0",
"]",
".",
"ROWID",
":",
"raise",
"HTMPredictionModelInvalidArgument",
"(",
"\"Invalid value. autoDetectWaitRecord \"",
"\"value must be valid record within output stream. Current minimum \"",
"\" ROWID in output stream is %d.\"",
"%",
"(",
"self",
".",
"_recordsCache",
"[",
"0",
"]",
".",
"ROWID",
")",
")",
"self",
".",
"trainRecords",
"=",
"value",
"# Remove any labels before the first cached record (wont be used anymore)",
"self",
".",
"_deleteRangeFromKNN",
"(",
"0",
",",
"self",
".",
"_recordsCache",
"[",
"0",
"]",
".",
"ROWID",
")",
"# Reclassify all states",
"self",
".",
"_classifyStates",
"(",
")",
"elif",
"name",
"==",
"\"anomalyThreshold\"",
":",
"if",
"not",
"(",
"isinstance",
"(",
"value",
",",
"float",
")",
"or",
"isinstance",
"(",
"value",
",",
"int",
")",
")",
":",
"raise",
"HTMPredictionModelInvalidArgument",
"(",
"\"Invalid argument type \\'%s\\'. threshold \"",
"\"must be a number.\"",
"%",
"(",
"type",
"(",
"value",
")",
")",
")",
"self",
".",
"anomalyThreshold",
"=",
"value",
"self",
".",
"_classifyStates",
"(",
")",
"elif",
"name",
"==",
"\"classificationMaxDist\"",
":",
"if",
"not",
"(",
"isinstance",
"(",
"value",
",",
"float",
")",
"or",
"isinstance",
"(",
"value",
",",
"int",
")",
")",
":",
"raise",
"HTMPredictionModelInvalidArgument",
"(",
"\"Invalid argument type \\'%s\\'. \"",
"\"classificationMaxDist must be a number.\"",
"%",
"(",
"type",
"(",
"value",
")",
")",
")",
"self",
".",
"_classificationMaxDist",
"=",
"value",
"self",
".",
"_classifyStates",
"(",
")",
"elif",
"name",
"==",
"\"activeColumnCount\"",
":",
"self",
".",
"_activeColumnCount",
"=",
"value",
"else",
":",
"return",
"PyRegion",
".",
"setParameter",
"(",
"self",
",",
"name",
",",
"index",
",",
"value",
")"
] | Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`. | [
"Overrides",
":",
"meth",
":",
"nupic",
".",
"bindings",
".",
"regions",
".",
"PyRegion",
".",
"PyRegion",
".",
"setParameter",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L278-L314 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion.compute | def compute(self, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
"""
record = self._constructClassificationRecord(inputs)
#Classify this point after waiting the classification delay
if record.ROWID >= self.getParameter('trainRecords'):
self._classifyState(record)
#Save new classification record and keep history as moving window
self._recordsCache.append(record)
while len(self._recordsCache) > self.cacheSize:
self._recordsCache.pop(0)
self.labelResults = record.anomalyLabel
self._iteration += 1 | python | def compute(self, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
"""
record = self._constructClassificationRecord(inputs)
#Classify this point after waiting the classification delay
if record.ROWID >= self.getParameter('trainRecords'):
self._classifyState(record)
#Save new classification record and keep history as moving window
self._recordsCache.append(record)
while len(self._recordsCache) > self.cacheSize:
self._recordsCache.pop(0)
self.labelResults = record.anomalyLabel
self._iteration += 1 | [
"def",
"compute",
"(",
"self",
",",
"inputs",
",",
"outputs",
")",
":",
"record",
"=",
"self",
".",
"_constructClassificationRecord",
"(",
"inputs",
")",
"#Classify this point after waiting the classification delay",
"if",
"record",
".",
"ROWID",
">=",
"self",
".",
"getParameter",
"(",
"'trainRecords'",
")",
":",
"self",
".",
"_classifyState",
"(",
"record",
")",
"#Save new classification record and keep history as moving window",
"self",
".",
"_recordsCache",
".",
"append",
"(",
"record",
")",
"while",
"len",
"(",
"self",
".",
"_recordsCache",
")",
">",
"self",
".",
"cacheSize",
":",
"self",
".",
"_recordsCache",
".",
"pop",
"(",
"0",
")",
"self",
".",
"labelResults",
"=",
"record",
".",
"anomalyLabel",
"self",
".",
"_iteration",
"+=",
"1"
] | Process one input sample.
This method is called by the runtime engine. | [
"Process",
"one",
"input",
"sample",
".",
"This",
"method",
"is",
"called",
"by",
"the",
"runtime",
"engine",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L317-L335 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._classifyState | def _classifyState(self, state):
"""
Reclassifies given state.
"""
# Record is before wait period do not classifiy
if state.ROWID < self.getParameter('trainRecords'):
if not state.setByUser:
state.anomalyLabel = []
self._deleteRecordsFromKNN([state])
return
label = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL
autoLabel = label + KNNAnomalyClassifierRegion.AUTO_TAG
# Update the label based on classifications
newCategory = self._recomputeRecordFromKNN(state)
labelList = self._categoryToLabelList(newCategory)
if state.setByUser:
if label in state.anomalyLabel:
state.anomalyLabel.remove(label)
if autoLabel in state.anomalyLabel:
state.anomalyLabel.remove(autoLabel)
labelList.extend(state.anomalyLabel)
# Add threshold classification label if above threshold, else if
# classified to add the auto threshold classification.
if state.anomalyScore >= self.getParameter('anomalyThreshold'):
labelList.append(label)
elif label in labelList:
ind = labelList.index(label)
labelList[ind] = autoLabel
# Make all entries unique
labelList = list(set(labelList))
# If both above threshold and auto classified above - remove auto label
if label in labelList and autoLabel in labelList:
labelList.remove(autoLabel)
if state.anomalyLabel == labelList:
return
# Update state's labeling
state.anomalyLabel = labelList
# Update KNN Classifier with new labeling
if state.anomalyLabel == []:
self._deleteRecordsFromKNN([state])
else:
self._addRecordToKNN(state) | python | def _classifyState(self, state):
"""
Reclassifies given state.
"""
# Record is before wait period do not classifiy
if state.ROWID < self.getParameter('trainRecords'):
if not state.setByUser:
state.anomalyLabel = []
self._deleteRecordsFromKNN([state])
return
label = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL
autoLabel = label + KNNAnomalyClassifierRegion.AUTO_TAG
# Update the label based on classifications
newCategory = self._recomputeRecordFromKNN(state)
labelList = self._categoryToLabelList(newCategory)
if state.setByUser:
if label in state.anomalyLabel:
state.anomalyLabel.remove(label)
if autoLabel in state.anomalyLabel:
state.anomalyLabel.remove(autoLabel)
labelList.extend(state.anomalyLabel)
# Add threshold classification label if above threshold, else if
# classified to add the auto threshold classification.
if state.anomalyScore >= self.getParameter('anomalyThreshold'):
labelList.append(label)
elif label in labelList:
ind = labelList.index(label)
labelList[ind] = autoLabel
# Make all entries unique
labelList = list(set(labelList))
# If both above threshold and auto classified above - remove auto label
if label in labelList and autoLabel in labelList:
labelList.remove(autoLabel)
if state.anomalyLabel == labelList:
return
# Update state's labeling
state.anomalyLabel = labelList
# Update KNN Classifier with new labeling
if state.anomalyLabel == []:
self._deleteRecordsFromKNN([state])
else:
self._addRecordToKNN(state) | [
"def",
"_classifyState",
"(",
"self",
",",
"state",
")",
":",
"# Record is before wait period do not classifiy",
"if",
"state",
".",
"ROWID",
"<",
"self",
".",
"getParameter",
"(",
"'trainRecords'",
")",
":",
"if",
"not",
"state",
".",
"setByUser",
":",
"state",
".",
"anomalyLabel",
"=",
"[",
"]",
"self",
".",
"_deleteRecordsFromKNN",
"(",
"[",
"state",
"]",
")",
"return",
"label",
"=",
"KNNAnomalyClassifierRegion",
".",
"AUTO_THRESHOLD_CLASSIFIED_LABEL",
"autoLabel",
"=",
"label",
"+",
"KNNAnomalyClassifierRegion",
".",
"AUTO_TAG",
"# Update the label based on classifications",
"newCategory",
"=",
"self",
".",
"_recomputeRecordFromKNN",
"(",
"state",
")",
"labelList",
"=",
"self",
".",
"_categoryToLabelList",
"(",
"newCategory",
")",
"if",
"state",
".",
"setByUser",
":",
"if",
"label",
"in",
"state",
".",
"anomalyLabel",
":",
"state",
".",
"anomalyLabel",
".",
"remove",
"(",
"label",
")",
"if",
"autoLabel",
"in",
"state",
".",
"anomalyLabel",
":",
"state",
".",
"anomalyLabel",
".",
"remove",
"(",
"autoLabel",
")",
"labelList",
".",
"extend",
"(",
"state",
".",
"anomalyLabel",
")",
"# Add threshold classification label if above threshold, else if",
"# classified to add the auto threshold classification.",
"if",
"state",
".",
"anomalyScore",
">=",
"self",
".",
"getParameter",
"(",
"'anomalyThreshold'",
")",
":",
"labelList",
".",
"append",
"(",
"label",
")",
"elif",
"label",
"in",
"labelList",
":",
"ind",
"=",
"labelList",
".",
"index",
"(",
"label",
")",
"labelList",
"[",
"ind",
"]",
"=",
"autoLabel",
"# Make all entries unique",
"labelList",
"=",
"list",
"(",
"set",
"(",
"labelList",
")",
")",
"# If both above threshold and auto classified above - remove auto label",
"if",
"label",
"in",
"labelList",
"and",
"autoLabel",
"in",
"labelList",
":",
"labelList",
".",
"remove",
"(",
"autoLabel",
")",
"if",
"state",
".",
"anomalyLabel",
"==",
"labelList",
":",
"return",
"# Update state's labeling",
"state",
".",
"anomalyLabel",
"=",
"labelList",
"# Update KNN Classifier with new labeling",
"if",
"state",
".",
"anomalyLabel",
"==",
"[",
"]",
":",
"self",
".",
"_deleteRecordsFromKNN",
"(",
"[",
"state",
"]",
")",
"else",
":",
"self",
".",
"_addRecordToKNN",
"(",
"state",
")"
] | Reclassifies given state. | [
"Reclassifies",
"given",
"state",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L355-L405 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._constructClassificationRecord | def _constructClassificationRecord(self, inputs):
"""
Construct a _HTMClassificationRecord based on the state of the model
passed in through the inputs.
Types for self.classificationVectorType:
1 - TM active cells in learn state
2 - SP columns concatenated with error from TM column predictions and SP
"""
# Count the number of unpredicted columns
allSPColumns = inputs["spBottomUpOut"]
activeSPColumns = allSPColumns.nonzero()[0]
score = anomaly.computeRawAnomalyScore(activeSPColumns,
self._prevPredictedColumns)
spSize = len(allSPColumns)
allTPCells = inputs['tpTopDownOut']
tpSize = len(inputs['tpLrnActiveStateT'])
classificationVector = numpy.array([])
if self.classificationVectorType == 1:
# Classification Vector: [---TM Cells---]
classificationVector = numpy.zeros(tpSize)
activeCellMatrix = inputs["tpLrnActiveStateT"].reshape(tpSize, 1)
activeCellIdx = numpy.where(activeCellMatrix > 0)[0]
if activeCellIdx.shape[0] > 0:
classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1
elif self.classificationVectorType == 2:
# Classification Vecotr: [---SP---|---(TM-SP)----]
classificationVector = numpy.zeros(spSize+spSize)
if activeSPColumns.shape[0] > 0:
classificationVector[activeSPColumns] = 1.0
errorColumns = numpy.setdiff1d(self._prevPredictedColumns,
activeSPColumns)
if errorColumns.shape[0] > 0:
errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +
spSize )
classificationVector[errorColumnIndexes] = 1.0
else:
raise TypeError("Classification vector type must be either 'tpc' or"
" 'sp_tpe', current value is %s" % (self.classificationVectorType))
# Store the state for next time step
numPredictedCols = len(self._prevPredictedColumns)
predictedColumns = allTPCells.nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
if self._anomalyVectorLength is None:
self._anomalyVectorLength = len(classificationVector)
result = _CLAClassificationRecord(
ROWID=self._iteration, #__numRunCalls called
#at beginning of model.run
anomalyScore=score,
anomalyVector=classificationVector.nonzero()[0].tolist(),
anomalyLabel=[]
)
return result | python | def _constructClassificationRecord(self, inputs):
"""
Construct a _HTMClassificationRecord based on the state of the model
passed in through the inputs.
Types for self.classificationVectorType:
1 - TM active cells in learn state
2 - SP columns concatenated with error from TM column predictions and SP
"""
# Count the number of unpredicted columns
allSPColumns = inputs["spBottomUpOut"]
activeSPColumns = allSPColumns.nonzero()[0]
score = anomaly.computeRawAnomalyScore(activeSPColumns,
self._prevPredictedColumns)
spSize = len(allSPColumns)
allTPCells = inputs['tpTopDownOut']
tpSize = len(inputs['tpLrnActiveStateT'])
classificationVector = numpy.array([])
if self.classificationVectorType == 1:
# Classification Vector: [---TM Cells---]
classificationVector = numpy.zeros(tpSize)
activeCellMatrix = inputs["tpLrnActiveStateT"].reshape(tpSize, 1)
activeCellIdx = numpy.where(activeCellMatrix > 0)[0]
if activeCellIdx.shape[0] > 0:
classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1
elif self.classificationVectorType == 2:
# Classification Vecotr: [---SP---|---(TM-SP)----]
classificationVector = numpy.zeros(spSize+spSize)
if activeSPColumns.shape[0] > 0:
classificationVector[activeSPColumns] = 1.0
errorColumns = numpy.setdiff1d(self._prevPredictedColumns,
activeSPColumns)
if errorColumns.shape[0] > 0:
errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +
spSize )
classificationVector[errorColumnIndexes] = 1.0
else:
raise TypeError("Classification vector type must be either 'tpc' or"
" 'sp_tpe', current value is %s" % (self.classificationVectorType))
# Store the state for next time step
numPredictedCols = len(self._prevPredictedColumns)
predictedColumns = allTPCells.nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
if self._anomalyVectorLength is None:
self._anomalyVectorLength = len(classificationVector)
result = _CLAClassificationRecord(
ROWID=self._iteration, #__numRunCalls called
#at beginning of model.run
anomalyScore=score,
anomalyVector=classificationVector.nonzero()[0].tolist(),
anomalyLabel=[]
)
return result | [
"def",
"_constructClassificationRecord",
"(",
"self",
",",
"inputs",
")",
":",
"# Count the number of unpredicted columns",
"allSPColumns",
"=",
"inputs",
"[",
"\"spBottomUpOut\"",
"]",
"activeSPColumns",
"=",
"allSPColumns",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"score",
"=",
"anomaly",
".",
"computeRawAnomalyScore",
"(",
"activeSPColumns",
",",
"self",
".",
"_prevPredictedColumns",
")",
"spSize",
"=",
"len",
"(",
"allSPColumns",
")",
"allTPCells",
"=",
"inputs",
"[",
"'tpTopDownOut'",
"]",
"tpSize",
"=",
"len",
"(",
"inputs",
"[",
"'tpLrnActiveStateT'",
"]",
")",
"classificationVector",
"=",
"numpy",
".",
"array",
"(",
"[",
"]",
")",
"if",
"self",
".",
"classificationVectorType",
"==",
"1",
":",
"# Classification Vector: [---TM Cells---]",
"classificationVector",
"=",
"numpy",
".",
"zeros",
"(",
"tpSize",
")",
"activeCellMatrix",
"=",
"inputs",
"[",
"\"tpLrnActiveStateT\"",
"]",
".",
"reshape",
"(",
"tpSize",
",",
"1",
")",
"activeCellIdx",
"=",
"numpy",
".",
"where",
"(",
"activeCellMatrix",
">",
"0",
")",
"[",
"0",
"]",
"if",
"activeCellIdx",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"classificationVector",
"[",
"numpy",
".",
"array",
"(",
"activeCellIdx",
",",
"dtype",
"=",
"numpy",
".",
"uint16",
")",
"]",
"=",
"1",
"elif",
"self",
".",
"classificationVectorType",
"==",
"2",
":",
"# Classification Vecotr: [---SP---|---(TM-SP)----]",
"classificationVector",
"=",
"numpy",
".",
"zeros",
"(",
"spSize",
"+",
"spSize",
")",
"if",
"activeSPColumns",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"classificationVector",
"[",
"activeSPColumns",
"]",
"=",
"1.0",
"errorColumns",
"=",
"numpy",
".",
"setdiff1d",
"(",
"self",
".",
"_prevPredictedColumns",
",",
"activeSPColumns",
")",
"if",
"errorColumns",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"errorColumnIndexes",
"=",
"(",
"numpy",
".",
"array",
"(",
"errorColumns",
",",
"dtype",
"=",
"numpy",
".",
"uint16",
")",
"+",
"spSize",
")",
"classificationVector",
"[",
"errorColumnIndexes",
"]",
"=",
"1.0",
"else",
":",
"raise",
"TypeError",
"(",
"\"Classification vector type must be either 'tpc' or\"",
"\" 'sp_tpe', current value is %s\"",
"%",
"(",
"self",
".",
"classificationVectorType",
")",
")",
"# Store the state for next time step",
"numPredictedCols",
"=",
"len",
"(",
"self",
".",
"_prevPredictedColumns",
")",
"predictedColumns",
"=",
"allTPCells",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"self",
".",
"_prevPredictedColumns",
"=",
"copy",
".",
"deepcopy",
"(",
"predictedColumns",
")",
"if",
"self",
".",
"_anomalyVectorLength",
"is",
"None",
":",
"self",
".",
"_anomalyVectorLength",
"=",
"len",
"(",
"classificationVector",
")",
"result",
"=",
"_CLAClassificationRecord",
"(",
"ROWID",
"=",
"self",
".",
"_iteration",
",",
"#__numRunCalls called",
"#at beginning of model.run",
"anomalyScore",
"=",
"score",
",",
"anomalyVector",
"=",
"classificationVector",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
",",
"anomalyLabel",
"=",
"[",
"]",
")",
"return",
"result"
] | Construct a _HTMClassificationRecord based on the state of the model
passed in through the inputs.
Types for self.classificationVectorType:
1 - TM active cells in learn state
2 - SP columns concatenated with error from TM column predictions and SP | [
"Construct",
"a",
"_HTMClassificationRecord",
"based",
"on",
"the",
"state",
"of",
"the",
"model",
"passed",
"in",
"through",
"the",
"inputs",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L408-L470 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._addRecordToKNN | def _addRecordToKNN(self, record):
"""
Adds the record to the KNN classifier.
"""
knn = self._knnclassifier._knn
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList')
category = self._labelListToCategoryNumber(record.anomalyLabel)
# If record is already in the classifier, overwrite its labeling
if record.ROWID in prototype_idx:
knn.prototypeSetCategory(record.ROWID, category)
return
# Learn this pattern in the knn
pattern = self._getStateAnomalyVector(record)
rowID = record.ROWID
knn.learn(pattern, category, rowID=rowID) | python | def _addRecordToKNN(self, record):
"""
Adds the record to the KNN classifier.
"""
knn = self._knnclassifier._knn
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList')
category = self._labelListToCategoryNumber(record.anomalyLabel)
# If record is already in the classifier, overwrite its labeling
if record.ROWID in prototype_idx:
knn.prototypeSetCategory(record.ROWID, category)
return
# Learn this pattern in the knn
pattern = self._getStateAnomalyVector(record)
rowID = record.ROWID
knn.learn(pattern, category, rowID=rowID) | [
"def",
"_addRecordToKNN",
"(",
"self",
",",
"record",
")",
":",
"knn",
"=",
"self",
".",
"_knnclassifier",
".",
"_knn",
"prototype_idx",
"=",
"self",
".",
"_knnclassifier",
".",
"getParameter",
"(",
"'categoryRecencyList'",
")",
"category",
"=",
"self",
".",
"_labelListToCategoryNumber",
"(",
"record",
".",
"anomalyLabel",
")",
"# If record is already in the classifier, overwrite its labeling",
"if",
"record",
".",
"ROWID",
"in",
"prototype_idx",
":",
"knn",
".",
"prototypeSetCategory",
"(",
"record",
".",
"ROWID",
",",
"category",
")",
"return",
"# Learn this pattern in the knn",
"pattern",
"=",
"self",
".",
"_getStateAnomalyVector",
"(",
"record",
")",
"rowID",
"=",
"record",
".",
"ROWID",
"knn",
".",
"learn",
"(",
"pattern",
",",
"category",
",",
"rowID",
"=",
"rowID",
")"
] | Adds the record to the KNN classifier. | [
"Adds",
"the",
"record",
"to",
"the",
"KNN",
"classifier",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L473-L490 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._deleteRecordsFromKNN | def _deleteRecordsFromKNN(self, recordsToDelete):
"""
Removes the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier
"""
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList')
idsToDelete = ([r.ROWID for r in recordsToDelete if
not r.setByUser and r.ROWID in prototype_idx])
nProtos = self._knnclassifier._knn._numPatterns
self._knnclassifier._knn.removeIds(idsToDelete)
assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete) | python | def _deleteRecordsFromKNN(self, recordsToDelete):
"""
Removes the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier
"""
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList')
idsToDelete = ([r.ROWID for r in recordsToDelete if
not r.setByUser and r.ROWID in prototype_idx])
nProtos = self._knnclassifier._knn._numPatterns
self._knnclassifier._knn.removeIds(idsToDelete)
assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete) | [
"def",
"_deleteRecordsFromKNN",
"(",
"self",
",",
"recordsToDelete",
")",
":",
"prototype_idx",
"=",
"self",
".",
"_knnclassifier",
".",
"getParameter",
"(",
"'categoryRecencyList'",
")",
"idsToDelete",
"=",
"(",
"[",
"r",
".",
"ROWID",
"for",
"r",
"in",
"recordsToDelete",
"if",
"not",
"r",
".",
"setByUser",
"and",
"r",
".",
"ROWID",
"in",
"prototype_idx",
"]",
")",
"nProtos",
"=",
"self",
".",
"_knnclassifier",
".",
"_knn",
".",
"_numPatterns",
"self",
".",
"_knnclassifier",
".",
"_knn",
".",
"removeIds",
"(",
"idsToDelete",
")",
"assert",
"self",
".",
"_knnclassifier",
".",
"_knn",
".",
"_numPatterns",
"==",
"nProtos",
"-",
"len",
"(",
"idsToDelete",
")"
] | Removes the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier | [
"Removes",
"the",
"given",
"records",
"from",
"the",
"classifier",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L494-L509 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._deleteRangeFromKNN | def _deleteRangeFromKNN(self, start=0, end=None):
"""
Removes any stored records within the range from start to
end. Noninclusive of end.
parameters
------------
start - integer representing the ROWID of the start of the deletion range,
end - integer representing the ROWID of the end of the deletion range,
if None, it will default to end.
"""
prototype_idx = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
if end is None:
end = prototype_idx.max() + 1
idsIdxToDelete = numpy.logical_and(prototype_idx >= start,
prototype_idx < end)
idsToDelete = prototype_idx[idsIdxToDelete]
nProtos = self._knnclassifier._knn._numPatterns
self._knnclassifier._knn.removeIds(idsToDelete.tolist())
assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete) | python | def _deleteRangeFromKNN(self, start=0, end=None):
"""
Removes any stored records within the range from start to
end. Noninclusive of end.
parameters
------------
start - integer representing the ROWID of the start of the deletion range,
end - integer representing the ROWID of the end of the deletion range,
if None, it will default to end.
"""
prototype_idx = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
if end is None:
end = prototype_idx.max() + 1
idsIdxToDelete = numpy.logical_and(prototype_idx >= start,
prototype_idx < end)
idsToDelete = prototype_idx[idsIdxToDelete]
nProtos = self._knnclassifier._knn._numPatterns
self._knnclassifier._knn.removeIds(idsToDelete.tolist())
assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete) | [
"def",
"_deleteRangeFromKNN",
"(",
"self",
",",
"start",
"=",
"0",
",",
"end",
"=",
"None",
")",
":",
"prototype_idx",
"=",
"numpy",
".",
"array",
"(",
"self",
".",
"_knnclassifier",
".",
"getParameter",
"(",
"'categoryRecencyList'",
")",
")",
"if",
"end",
"is",
"None",
":",
"end",
"=",
"prototype_idx",
".",
"max",
"(",
")",
"+",
"1",
"idsIdxToDelete",
"=",
"numpy",
".",
"logical_and",
"(",
"prototype_idx",
">=",
"start",
",",
"prototype_idx",
"<",
"end",
")",
"idsToDelete",
"=",
"prototype_idx",
"[",
"idsIdxToDelete",
"]",
"nProtos",
"=",
"self",
".",
"_knnclassifier",
".",
"_knn",
".",
"_numPatterns",
"self",
".",
"_knnclassifier",
".",
"_knn",
".",
"removeIds",
"(",
"idsToDelete",
".",
"tolist",
"(",
")",
")",
"assert",
"self",
".",
"_knnclassifier",
".",
"_knn",
".",
"_numPatterns",
"==",
"nProtos",
"-",
"len",
"(",
"idsToDelete",
")"
] | Removes any stored records within the range from start to
end. Noninclusive of end.
parameters
------------
start - integer representing the ROWID of the start of the deletion range,
end - integer representing the ROWID of the end of the deletion range,
if None, it will default to end. | [
"Removes",
"any",
"stored",
"records",
"within",
"the",
"range",
"from",
"start",
"to",
"end",
".",
"Noninclusive",
"of",
"end",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L512-L535 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._recomputeRecordFromKNN | def _recomputeRecordFromKNN(self, record):
"""
returns the classified labeling of record
"""
inputs = {
"categoryIn": [None],
"bottomUpIn": self._getStateAnomalyVector(record),
}
outputs = {"categoriesOut": numpy.zeros((1,)),
"bestPrototypeIndices":numpy.zeros((1,)),
"categoryProbabilitiesOut":numpy.zeros((1,))}
# Only use points before record to classify and after the wait period.
classifier_indexes = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
valid_idx = numpy.where(
(classifier_indexes >= self.getParameter('trainRecords')) &
(classifier_indexes < record.ROWID)
)[0].tolist()
if len(valid_idx) == 0:
return None
self._knnclassifier.setParameter('inferenceMode', None, True)
self._knnclassifier.setParameter('learningMode', None, False)
self._knnclassifier.compute(inputs, outputs)
self._knnclassifier.setParameter('learningMode', None, True)
classifier_distances = self._knnclassifier.getLatestDistances()
valid_distances = classifier_distances[valid_idx]
if valid_distances.min() <= self._classificationMaxDist:
classifier_indexes_prev = classifier_indexes[valid_idx]
rowID = classifier_indexes_prev[valid_distances.argmin()]
indexID = numpy.where(classifier_indexes == rowID)[0][0]
category = self._knnclassifier.getCategoryList()[indexID]
return category
return None | python | def _recomputeRecordFromKNN(self, record):
"""
returns the classified labeling of record
"""
inputs = {
"categoryIn": [None],
"bottomUpIn": self._getStateAnomalyVector(record),
}
outputs = {"categoriesOut": numpy.zeros((1,)),
"bestPrototypeIndices":numpy.zeros((1,)),
"categoryProbabilitiesOut":numpy.zeros((1,))}
# Only use points before record to classify and after the wait period.
classifier_indexes = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
valid_idx = numpy.where(
(classifier_indexes >= self.getParameter('trainRecords')) &
(classifier_indexes < record.ROWID)
)[0].tolist()
if len(valid_idx) == 0:
return None
self._knnclassifier.setParameter('inferenceMode', None, True)
self._knnclassifier.setParameter('learningMode', None, False)
self._knnclassifier.compute(inputs, outputs)
self._knnclassifier.setParameter('learningMode', None, True)
classifier_distances = self._knnclassifier.getLatestDistances()
valid_distances = classifier_distances[valid_idx]
if valid_distances.min() <= self._classificationMaxDist:
classifier_indexes_prev = classifier_indexes[valid_idx]
rowID = classifier_indexes_prev[valid_distances.argmin()]
indexID = numpy.where(classifier_indexes == rowID)[0][0]
category = self._knnclassifier.getCategoryList()[indexID]
return category
return None | [
"def",
"_recomputeRecordFromKNN",
"(",
"self",
",",
"record",
")",
":",
"inputs",
"=",
"{",
"\"categoryIn\"",
":",
"[",
"None",
"]",
",",
"\"bottomUpIn\"",
":",
"self",
".",
"_getStateAnomalyVector",
"(",
"record",
")",
",",
"}",
"outputs",
"=",
"{",
"\"categoriesOut\"",
":",
"numpy",
".",
"zeros",
"(",
"(",
"1",
",",
")",
")",
",",
"\"bestPrototypeIndices\"",
":",
"numpy",
".",
"zeros",
"(",
"(",
"1",
",",
")",
")",
",",
"\"categoryProbabilitiesOut\"",
":",
"numpy",
".",
"zeros",
"(",
"(",
"1",
",",
")",
")",
"}",
"# Only use points before record to classify and after the wait period.",
"classifier_indexes",
"=",
"numpy",
".",
"array",
"(",
"self",
".",
"_knnclassifier",
".",
"getParameter",
"(",
"'categoryRecencyList'",
")",
")",
"valid_idx",
"=",
"numpy",
".",
"where",
"(",
"(",
"classifier_indexes",
">=",
"self",
".",
"getParameter",
"(",
"'trainRecords'",
")",
")",
"&",
"(",
"classifier_indexes",
"<",
"record",
".",
"ROWID",
")",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"if",
"len",
"(",
"valid_idx",
")",
"==",
"0",
":",
"return",
"None",
"self",
".",
"_knnclassifier",
".",
"setParameter",
"(",
"'inferenceMode'",
",",
"None",
",",
"True",
")",
"self",
".",
"_knnclassifier",
".",
"setParameter",
"(",
"'learningMode'",
",",
"None",
",",
"False",
")",
"self",
".",
"_knnclassifier",
".",
"compute",
"(",
"inputs",
",",
"outputs",
")",
"self",
".",
"_knnclassifier",
".",
"setParameter",
"(",
"'learningMode'",
",",
"None",
",",
"True",
")",
"classifier_distances",
"=",
"self",
".",
"_knnclassifier",
".",
"getLatestDistances",
"(",
")",
"valid_distances",
"=",
"classifier_distances",
"[",
"valid_idx",
"]",
"if",
"valid_distances",
".",
"min",
"(",
")",
"<=",
"self",
".",
"_classificationMaxDist",
":",
"classifier_indexes_prev",
"=",
"classifier_indexes",
"[",
"valid_idx",
"]",
"rowID",
"=",
"classifier_indexes_prev",
"[",
"valid_distances",
".",
"argmin",
"(",
")",
"]",
"indexID",
"=",
"numpy",
".",
"where",
"(",
"classifier_indexes",
"==",
"rowID",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"category",
"=",
"self",
".",
"_knnclassifier",
".",
"getCategoryList",
"(",
")",
"[",
"indexID",
"]",
"return",
"category",
"return",
"None"
] | returns the classified labeling of record | [
"returns",
"the",
"classified",
"labeling",
"of",
"record"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L538-L575 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._labelToCategoryNumber | def _labelToCategoryNumber(self, label):
"""
Since the KNN Classifier stores categories as numbers, we must store each
label as a number. This method converts from a label to a unique number.
Each label is assigned a unique bit so multiple labels may be assigned to
a single record.
"""
if label not in self.saved_categories:
self.saved_categories.append(label)
return pow(2, self.saved_categories.index(label)) | python | def _labelToCategoryNumber(self, label):
"""
Since the KNN Classifier stores categories as numbers, we must store each
label as a number. This method converts from a label to a unique number.
Each label is assigned a unique bit so multiple labels may be assigned to
a single record.
"""
if label not in self.saved_categories:
self.saved_categories.append(label)
return pow(2, self.saved_categories.index(label)) | [
"def",
"_labelToCategoryNumber",
"(",
"self",
",",
"label",
")",
":",
"if",
"label",
"not",
"in",
"self",
".",
"saved_categories",
":",
"self",
".",
"saved_categories",
".",
"append",
"(",
"label",
")",
"return",
"pow",
"(",
"2",
",",
"self",
".",
"saved_categories",
".",
"index",
"(",
"label",
")",
")"
] | Since the KNN Classifier stores categories as numbers, we must store each
label as a number. This method converts from a label to a unique number.
Each label is assigned a unique bit so multiple labels may be assigned to
a single record. | [
"Since",
"the",
"KNN",
"Classifier",
"stores",
"categories",
"as",
"numbers",
"we",
"must",
"store",
"each",
"label",
"as",
"a",
"number",
".",
"This",
"method",
"converts",
"from",
"a",
"label",
"to",
"a",
"unique",
"number",
".",
"Each",
"label",
"is",
"assigned",
"a",
"unique",
"bit",
"so",
"multiple",
"labels",
"may",
"be",
"assigned",
"to",
"a",
"single",
"record",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L578-L587 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._labelListToCategoryNumber | def _labelListToCategoryNumber(self, labelList):
"""
This method takes a list of labels and returns a unique category number.
This enables this class to store a list of categories for each point since
the KNN classifier only stores a single number category for each record.
"""
categoryNumber = 0
for label in labelList:
categoryNumber += self._labelToCategoryNumber(label)
return categoryNumber | python | def _labelListToCategoryNumber(self, labelList):
"""
This method takes a list of labels and returns a unique category number.
This enables this class to store a list of categories for each point since
the KNN classifier only stores a single number category for each record.
"""
categoryNumber = 0
for label in labelList:
categoryNumber += self._labelToCategoryNumber(label)
return categoryNumber | [
"def",
"_labelListToCategoryNumber",
"(",
"self",
",",
"labelList",
")",
":",
"categoryNumber",
"=",
"0",
"for",
"label",
"in",
"labelList",
":",
"categoryNumber",
"+=",
"self",
".",
"_labelToCategoryNumber",
"(",
"label",
")",
"return",
"categoryNumber"
] | This method takes a list of labels and returns a unique category number.
This enables this class to store a list of categories for each point since
the KNN classifier only stores a single number category for each record. | [
"This",
"method",
"takes",
"a",
"list",
"of",
"labels",
"and",
"returns",
"a",
"unique",
"category",
"number",
".",
"This",
"enables",
"this",
"class",
"to",
"store",
"a",
"list",
"of",
"categories",
"for",
"each",
"point",
"since",
"the",
"KNN",
"classifier",
"only",
"stores",
"a",
"single",
"number",
"category",
"for",
"each",
"record",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L589-L598 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._categoryToLabelList | def _categoryToLabelList(self, category):
"""
Converts a category number into a list of labels
"""
if category is None:
return []
labelList = []
labelNum = 0
while category > 0:
if category % 2 == 1:
labelList.append(self.saved_categories[labelNum])
labelNum += 1
category = category >> 1
return labelList | python | def _categoryToLabelList(self, category):
"""
Converts a category number into a list of labels
"""
if category is None:
return []
labelList = []
labelNum = 0
while category > 0:
if category % 2 == 1:
labelList.append(self.saved_categories[labelNum])
labelNum += 1
category = category >> 1
return labelList | [
"def",
"_categoryToLabelList",
"(",
"self",
",",
"category",
")",
":",
"if",
"category",
"is",
"None",
":",
"return",
"[",
"]",
"labelList",
"=",
"[",
"]",
"labelNum",
"=",
"0",
"while",
"category",
">",
"0",
":",
"if",
"category",
"%",
"2",
"==",
"1",
":",
"labelList",
".",
"append",
"(",
"self",
".",
"saved_categories",
"[",
"labelNum",
"]",
")",
"labelNum",
"+=",
"1",
"category",
"=",
"category",
">>",
"1",
"return",
"labelList"
] | Converts a category number into a list of labels | [
"Converts",
"a",
"category",
"number",
"into",
"a",
"list",
"of",
"labels"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L601-L615 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion._getStateAnomalyVector | def _getStateAnomalyVector(self, state):
"""
Returns a state's anomaly vertor converting it from spare to dense
"""
vector = numpy.zeros(self._anomalyVectorLength)
vector[state.anomalyVector] = 1
return vector | python | def _getStateAnomalyVector(self, state):
"""
Returns a state's anomaly vertor converting it from spare to dense
"""
vector = numpy.zeros(self._anomalyVectorLength)
vector[state.anomalyVector] = 1
return vector | [
"def",
"_getStateAnomalyVector",
"(",
"self",
",",
"state",
")",
":",
"vector",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"_anomalyVectorLength",
")",
"vector",
"[",
"state",
".",
"anomalyVector",
"]",
"=",
"1",
"return",
"vector"
] | Returns a state's anomaly vertor converting it from spare to dense | [
"Returns",
"a",
"state",
"s",
"anomaly",
"vertor",
"converting",
"it",
"from",
"spare",
"to",
"dense"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L618-L624 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion.getLabels | def getLabels(self, start=None, end=None):
"""
Get the labels on classified points within range start to end. Not inclusive
of end.
:returns: (dict) with format:
::
{
'isProcessing': boolean,
'recordLabels': list of results
}
``isProcessing`` - currently always false as recalculation blocks; used if
reprocessing of records is still being performed;
Each item in ``recordLabels`` is of format:
::
{
'ROWID': id of the row,
'labels': list of strings
}
"""
if len(self._recordsCache) == 0:
return {
'isProcessing': False,
'recordLabels': []
}
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
if end <= start:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'getLabels'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'numRecordsStored': len(self._recordsCache)
})
results = {
'isProcessing': False,
'recordLabels': []
}
ROWIDX = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[0].tolist()
categories = self._knnclassifier.getCategoryList()
for idx in validIdx:
row = dict(
ROWID=int(ROWIDX[idx]),
labels=self._categoryToLabelList(categories[idx]))
results['recordLabels'].append(row)
return results | python | def getLabels(self, start=None, end=None):
"""
Get the labels on classified points within range start to end. Not inclusive
of end.
:returns: (dict) with format:
::
{
'isProcessing': boolean,
'recordLabels': list of results
}
``isProcessing`` - currently always false as recalculation blocks; used if
reprocessing of records is still being performed;
Each item in ``recordLabels`` is of format:
::
{
'ROWID': id of the row,
'labels': list of strings
}
"""
if len(self._recordsCache) == 0:
return {
'isProcessing': False,
'recordLabels': []
}
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
if end <= start:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'getLabels'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'numRecordsStored': len(self._recordsCache)
})
results = {
'isProcessing': False,
'recordLabels': []
}
ROWIDX = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[0].tolist()
categories = self._knnclassifier.getCategoryList()
for idx in validIdx:
row = dict(
ROWID=int(ROWIDX[idx]),
labels=self._categoryToLabelList(categories[idx]))
results['recordLabels'].append(row)
return results | [
"def",
"getLabels",
"(",
"self",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"_recordsCache",
")",
"==",
"0",
":",
"return",
"{",
"'isProcessing'",
":",
"False",
",",
"'recordLabels'",
":",
"[",
"]",
"}",
"try",
":",
"start",
"=",
"int",
"(",
"start",
")",
"except",
"Exception",
":",
"start",
"=",
"0",
"try",
":",
"end",
"=",
"int",
"(",
"end",
")",
"except",
"Exception",
":",
"end",
"=",
"self",
".",
"_recordsCache",
"[",
"-",
"1",
"]",
".",
"ROWID",
"if",
"end",
"<=",
"start",
":",
"raise",
"HTMPredictionModelInvalidRangeError",
"(",
"\"Invalid supplied range for 'getLabels'.\"",
",",
"debugInfo",
"=",
"{",
"'requestRange'",
":",
"{",
"'startRecordID'",
":",
"start",
",",
"'endRecordID'",
":",
"end",
"}",
",",
"'numRecordsStored'",
":",
"len",
"(",
"self",
".",
"_recordsCache",
")",
"}",
")",
"results",
"=",
"{",
"'isProcessing'",
":",
"False",
",",
"'recordLabels'",
":",
"[",
"]",
"}",
"ROWIDX",
"=",
"numpy",
".",
"array",
"(",
"self",
".",
"_knnclassifier",
".",
"getParameter",
"(",
"'categoryRecencyList'",
")",
")",
"validIdx",
"=",
"numpy",
".",
"where",
"(",
"(",
"ROWIDX",
">=",
"start",
")",
"&",
"(",
"ROWIDX",
"<",
"end",
")",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"categories",
"=",
"self",
".",
"_knnclassifier",
".",
"getCategoryList",
"(",
")",
"for",
"idx",
"in",
"validIdx",
":",
"row",
"=",
"dict",
"(",
"ROWID",
"=",
"int",
"(",
"ROWIDX",
"[",
"idx",
"]",
")",
",",
"labels",
"=",
"self",
".",
"_categoryToLabelList",
"(",
"categories",
"[",
"idx",
"]",
")",
")",
"results",
"[",
"'recordLabels'",
"]",
".",
"append",
"(",
"row",
")",
"return",
"results"
] | Get the labels on classified points within range start to end. Not inclusive
of end.
:returns: (dict) with format:
::
{
'isProcessing': boolean,
'recordLabels': list of results
}
``isProcessing`` - currently always false as recalculation blocks; used if
reprocessing of records is still being performed;
Each item in ``recordLabels`` is of format:
::
{
'ROWID': id of the row,
'labels': list of strings
} | [
"Get",
"the",
"labels",
"on",
"classified",
"points",
"within",
"range",
"start",
"to",
"end",
".",
"Not",
"inclusive",
"of",
"end",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L627-L694 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion.addLabel | def addLabel(self, start, end, labelName):
"""
Add the label labelName to each record with record ROWID in range from
``start`` to ``end``, noninclusive of end.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
:param start: (int) start index
:param end: (int) end index (noninclusive)
:param labelName: (string) label name
"""
if len(self._recordsCache) == 0:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'. "
"Model has no saved records.")
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = int(self._recordsCache[-1].ROWID)
startID = self._recordsCache[0].ROWID
clippedStart = max(0, start - startID)
clippedEnd = max(0, min( len( self._recordsCache) , end - startID))
if clippedEnd <= clippedStart:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self._recordsCache[len(self._recordsCache)-1].ROWID
},
'numRecordsStored': len(self._recordsCache)
})
# Add label to range [clippedStart, clippedEnd)
for state in self._recordsCache[clippedStart:clippedEnd]:
if labelName not in state.anomalyLabel:
state.anomalyLabel.append(labelName)
state.setByUser = True
self._addRecordToKNN(state)
assert len(self.saved_categories) > 0
# Recompute [end, ...)
for state in self._recordsCache[clippedEnd:]:
self._classifyState(state) | python | def addLabel(self, start, end, labelName):
"""
Add the label labelName to each record with record ROWID in range from
``start`` to ``end``, noninclusive of end.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
:param start: (int) start index
:param end: (int) end index (noninclusive)
:param labelName: (string) label name
"""
if len(self._recordsCache) == 0:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'. "
"Model has no saved records.")
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = int(self._recordsCache[-1].ROWID)
startID = self._recordsCache[0].ROWID
clippedStart = max(0, start - startID)
clippedEnd = max(0, min( len( self._recordsCache) , end - startID))
if clippedEnd <= clippedStart:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self._recordsCache[len(self._recordsCache)-1].ROWID
},
'numRecordsStored': len(self._recordsCache)
})
# Add label to range [clippedStart, clippedEnd)
for state in self._recordsCache[clippedStart:clippedEnd]:
if labelName not in state.anomalyLabel:
state.anomalyLabel.append(labelName)
state.setByUser = True
self._addRecordToKNN(state)
assert len(self.saved_categories) > 0
# Recompute [end, ...)
for state in self._recordsCache[clippedEnd:]:
self._classifyState(state) | [
"def",
"addLabel",
"(",
"self",
",",
"start",
",",
"end",
",",
"labelName",
")",
":",
"if",
"len",
"(",
"self",
".",
"_recordsCache",
")",
"==",
"0",
":",
"raise",
"HTMPredictionModelInvalidRangeError",
"(",
"\"Invalid supplied range for 'addLabel'. \"",
"\"Model has no saved records.\"",
")",
"try",
":",
"start",
"=",
"int",
"(",
"start",
")",
"except",
"Exception",
":",
"start",
"=",
"0",
"try",
":",
"end",
"=",
"int",
"(",
"end",
")",
"except",
"Exception",
":",
"end",
"=",
"int",
"(",
"self",
".",
"_recordsCache",
"[",
"-",
"1",
"]",
".",
"ROWID",
")",
"startID",
"=",
"self",
".",
"_recordsCache",
"[",
"0",
"]",
".",
"ROWID",
"clippedStart",
"=",
"max",
"(",
"0",
",",
"start",
"-",
"startID",
")",
"clippedEnd",
"=",
"max",
"(",
"0",
",",
"min",
"(",
"len",
"(",
"self",
".",
"_recordsCache",
")",
",",
"end",
"-",
"startID",
")",
")",
"if",
"clippedEnd",
"<=",
"clippedStart",
":",
"raise",
"HTMPredictionModelInvalidRangeError",
"(",
"\"Invalid supplied range for 'addLabel'.\"",
",",
"debugInfo",
"=",
"{",
"'requestRange'",
":",
"{",
"'startRecordID'",
":",
"start",
",",
"'endRecordID'",
":",
"end",
"}",
",",
"'clippedRequestRange'",
":",
"{",
"'startRecordID'",
":",
"clippedStart",
",",
"'endRecordID'",
":",
"clippedEnd",
"}",
",",
"'validRange'",
":",
"{",
"'startRecordID'",
":",
"startID",
",",
"'endRecordID'",
":",
"self",
".",
"_recordsCache",
"[",
"len",
"(",
"self",
".",
"_recordsCache",
")",
"-",
"1",
"]",
".",
"ROWID",
"}",
",",
"'numRecordsStored'",
":",
"len",
"(",
"self",
".",
"_recordsCache",
")",
"}",
")",
"# Add label to range [clippedStart, clippedEnd)",
"for",
"state",
"in",
"self",
".",
"_recordsCache",
"[",
"clippedStart",
":",
"clippedEnd",
"]",
":",
"if",
"labelName",
"not",
"in",
"state",
".",
"anomalyLabel",
":",
"state",
".",
"anomalyLabel",
".",
"append",
"(",
"labelName",
")",
"state",
".",
"setByUser",
"=",
"True",
"self",
".",
"_addRecordToKNN",
"(",
"state",
")",
"assert",
"len",
"(",
"self",
".",
"saved_categories",
")",
">",
"0",
"# Recompute [end, ...)",
"for",
"state",
"in",
"self",
".",
"_recordsCache",
"[",
"clippedEnd",
":",
"]",
":",
"self",
".",
"_classifyState",
"(",
"state",
")"
] | Add the label labelName to each record with record ROWID in range from
``start`` to ``end``, noninclusive of end.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
:param start: (int) start index
:param end: (int) end index (noninclusive)
:param labelName: (string) label name | [
"Add",
"the",
"label",
"labelName",
"to",
"each",
"record",
"with",
"record",
"ROWID",
"in",
"range",
"from",
"start",
"to",
"end",
"noninclusive",
"of",
"end",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L697-L757 | valid |
numenta/nupic | src/nupic/regions/knn_anomaly_classifier_region.py | KNNAnomalyClassifierRegion.removeLabels | def removeLabels(self, start=None, end=None, labelFilter=None):
"""
Remove labels from each record with record ROWID in range from
``start`` to ``end``, noninclusive of end. Removes all records if
``labelFilter`` is None, otherwise only removes the labels equal to
``labelFilter``.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
:param start: (int) start index
:param end: (int) end index (noninclusive)
:param labelFilter: (string) label filter
"""
if len(self._recordsCache) == 0:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'. Model has no saved records.")
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
startID = self._recordsCache[0].ROWID
clippedStart = 0 if start is None else max(0, start - startID)
clippedEnd = len(self._recordsCache) if end is None else \
max(0, min( len( self._recordsCache) , end - startID))
if clippedEnd <= clippedStart:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'.", debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self._recordsCache[len(self._recordsCache)-1].ROWID
},
'numRecordsStored': len(self._recordsCache)
})
# Remove records within the cache
recordsToDelete = []
for state in self._recordsCache[clippedStart:clippedEnd]:
if labelFilter is not None:
if labelFilter in state.anomalyLabel:
state.anomalyLabel.remove(labelFilter)
else:
state.anomalyLabel = []
state.setByUser = False
recordsToDelete.append(state)
self._deleteRecordsFromKNN(recordsToDelete)
# Remove records not in cache
self._deleteRangeFromKNN(start, end)
# Recompute [clippedEnd, ...)
for state in self._recordsCache[clippedEnd:]:
self._classifyState(state) | python | def removeLabels(self, start=None, end=None, labelFilter=None):
"""
Remove labels from each record with record ROWID in range from
``start`` to ``end``, noninclusive of end. Removes all records if
``labelFilter`` is None, otherwise only removes the labels equal to
``labelFilter``.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
:param start: (int) start index
:param end: (int) end index (noninclusive)
:param labelFilter: (string) label filter
"""
if len(self._recordsCache) == 0:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'. Model has no saved records.")
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
startID = self._recordsCache[0].ROWID
clippedStart = 0 if start is None else max(0, start - startID)
clippedEnd = len(self._recordsCache) if end is None else \
max(0, min( len( self._recordsCache) , end - startID))
if clippedEnd <= clippedStart:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'.", debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self._recordsCache[len(self._recordsCache)-1].ROWID
},
'numRecordsStored': len(self._recordsCache)
})
# Remove records within the cache
recordsToDelete = []
for state in self._recordsCache[clippedStart:clippedEnd]:
if labelFilter is not None:
if labelFilter in state.anomalyLabel:
state.anomalyLabel.remove(labelFilter)
else:
state.anomalyLabel = []
state.setByUser = False
recordsToDelete.append(state)
self._deleteRecordsFromKNN(recordsToDelete)
# Remove records not in cache
self._deleteRangeFromKNN(start, end)
# Recompute [clippedEnd, ...)
for state in self._recordsCache[clippedEnd:]:
self._classifyState(state) | [
"def",
"removeLabels",
"(",
"self",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"labelFilter",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"_recordsCache",
")",
"==",
"0",
":",
"raise",
"HTMPredictionModelInvalidRangeError",
"(",
"\"Invalid supplied range for \"",
"\"'removeLabels'. Model has no saved records.\"",
")",
"try",
":",
"start",
"=",
"int",
"(",
"start",
")",
"except",
"Exception",
":",
"start",
"=",
"0",
"try",
":",
"end",
"=",
"int",
"(",
"end",
")",
"except",
"Exception",
":",
"end",
"=",
"self",
".",
"_recordsCache",
"[",
"-",
"1",
"]",
".",
"ROWID",
"startID",
"=",
"self",
".",
"_recordsCache",
"[",
"0",
"]",
".",
"ROWID",
"clippedStart",
"=",
"0",
"if",
"start",
"is",
"None",
"else",
"max",
"(",
"0",
",",
"start",
"-",
"startID",
")",
"clippedEnd",
"=",
"len",
"(",
"self",
".",
"_recordsCache",
")",
"if",
"end",
"is",
"None",
"else",
"max",
"(",
"0",
",",
"min",
"(",
"len",
"(",
"self",
".",
"_recordsCache",
")",
",",
"end",
"-",
"startID",
")",
")",
"if",
"clippedEnd",
"<=",
"clippedStart",
":",
"raise",
"HTMPredictionModelInvalidRangeError",
"(",
"\"Invalid supplied range for \"",
"\"'removeLabels'.\"",
",",
"debugInfo",
"=",
"{",
"'requestRange'",
":",
"{",
"'startRecordID'",
":",
"start",
",",
"'endRecordID'",
":",
"end",
"}",
",",
"'clippedRequestRange'",
":",
"{",
"'startRecordID'",
":",
"clippedStart",
",",
"'endRecordID'",
":",
"clippedEnd",
"}",
",",
"'validRange'",
":",
"{",
"'startRecordID'",
":",
"startID",
",",
"'endRecordID'",
":",
"self",
".",
"_recordsCache",
"[",
"len",
"(",
"self",
".",
"_recordsCache",
")",
"-",
"1",
"]",
".",
"ROWID",
"}",
",",
"'numRecordsStored'",
":",
"len",
"(",
"self",
".",
"_recordsCache",
")",
"}",
")",
"# Remove records within the cache",
"recordsToDelete",
"=",
"[",
"]",
"for",
"state",
"in",
"self",
".",
"_recordsCache",
"[",
"clippedStart",
":",
"clippedEnd",
"]",
":",
"if",
"labelFilter",
"is",
"not",
"None",
":",
"if",
"labelFilter",
"in",
"state",
".",
"anomalyLabel",
":",
"state",
".",
"anomalyLabel",
".",
"remove",
"(",
"labelFilter",
")",
"else",
":",
"state",
".",
"anomalyLabel",
"=",
"[",
"]",
"state",
".",
"setByUser",
"=",
"False",
"recordsToDelete",
".",
"append",
"(",
"state",
")",
"self",
".",
"_deleteRecordsFromKNN",
"(",
"recordsToDelete",
")",
"# Remove records not in cache",
"self",
".",
"_deleteRangeFromKNN",
"(",
"start",
",",
"end",
")",
"# Recompute [clippedEnd, ...)",
"for",
"state",
"in",
"self",
".",
"_recordsCache",
"[",
"clippedEnd",
":",
"]",
":",
"self",
".",
"_classifyState",
"(",
"state",
")"
] | Remove labels from each record with record ROWID in range from
``start`` to ``end``, noninclusive of end. Removes all records if
``labelFilter`` is None, otherwise only removes the labels equal to
``labelFilter``.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
:param start: (int) start index
:param end: (int) end index (noninclusive)
:param labelFilter: (string) label filter | [
"Remove",
"labels",
"from",
"each",
"record",
"with",
"record",
"ROWID",
"in",
"range",
"from",
"start",
"to",
"end",
"noninclusive",
"of",
"end",
".",
"Removes",
"all",
"records",
"if",
"labelFilter",
"is",
"None",
"otherwise",
"only",
"removes",
"the",
"labels",
"equal",
"to",
"labelFilter",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/knn_anomaly_classifier_region.py#L760-L829 | valid |
numenta/nupic | src/nupic/data/category_filter.py | CategoryFilter.match | def match(self, record):
'''
Returns True if the record matches any of the provided filters
'''
for field, meta in self.filterDict.iteritems():
index = meta['index']
categories = meta['categories']
for category in categories:
# Record might be blank, handle this
if not record:
continue
if record[index].find(category) != -1:
'''
This field contains the string we're searching for
so we'll keep the records
'''
return True
# None of the categories were found in this record
return False | python | def match(self, record):
'''
Returns True if the record matches any of the provided filters
'''
for field, meta in self.filterDict.iteritems():
index = meta['index']
categories = meta['categories']
for category in categories:
# Record might be blank, handle this
if not record:
continue
if record[index].find(category) != -1:
'''
This field contains the string we're searching for
so we'll keep the records
'''
return True
# None of the categories were found in this record
return False | [
"def",
"match",
"(",
"self",
",",
"record",
")",
":",
"for",
"field",
",",
"meta",
"in",
"self",
".",
"filterDict",
".",
"iteritems",
"(",
")",
":",
"index",
"=",
"meta",
"[",
"'index'",
"]",
"categories",
"=",
"meta",
"[",
"'categories'",
"]",
"for",
"category",
"in",
"categories",
":",
"# Record might be blank, handle this",
"if",
"not",
"record",
":",
"continue",
"if",
"record",
"[",
"index",
"]",
".",
"find",
"(",
"category",
")",
"!=",
"-",
"1",
":",
"'''\n This field contains the string we're searching for\n so we'll keep the records\n '''",
"return",
"True",
"# None of the categories were found in this record",
"return",
"False"
] | Returns True if the record matches any of the provided filters | [
"Returns",
"True",
"if",
"the",
"record",
"matches",
"any",
"of",
"the",
"provided",
"filters"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/category_filter.py#L58-L78 | valid |
numenta/nupic | src/nupic/algorithms/spatial_pooler.py | _SparseMatrixCorticalColumnAdapter.replace | def replace(self, columnIndex, bitmap):
""" Wraps replaceSparseRow()"""
return super(_SparseMatrixCorticalColumnAdapter, self).replaceSparseRow(
columnIndex, bitmap
) | python | def replace(self, columnIndex, bitmap):
""" Wraps replaceSparseRow()"""
return super(_SparseMatrixCorticalColumnAdapter, self).replaceSparseRow(
columnIndex, bitmap
) | [
"def",
"replace",
"(",
"self",
",",
"columnIndex",
",",
"bitmap",
")",
":",
"return",
"super",
"(",
"_SparseMatrixCorticalColumnAdapter",
",",
"self",
")",
".",
"replaceSparseRow",
"(",
"columnIndex",
",",
"bitmap",
")"
] | Wraps replaceSparseRow() | [
"Wraps",
"replaceSparseRow",
"()"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L67-L71 | valid |
numenta/nupic | src/nupic/algorithms/spatial_pooler.py | _SparseMatrixCorticalColumnAdapter.update | def update(self, columnIndex, vector):
""" Wraps setRowFromDense()"""
return super(_SparseMatrixCorticalColumnAdapter, self).setRowFromDense(
columnIndex, vector
) | python | def update(self, columnIndex, vector):
""" Wraps setRowFromDense()"""
return super(_SparseMatrixCorticalColumnAdapter, self).setRowFromDense(
columnIndex, vector
) | [
"def",
"update",
"(",
"self",
",",
"columnIndex",
",",
"vector",
")",
":",
"return",
"super",
"(",
"_SparseMatrixCorticalColumnAdapter",
",",
"self",
")",
".",
"setRowFromDense",
"(",
"columnIndex",
",",
"vector",
")"
] | Wraps setRowFromDense() | [
"Wraps",
"setRowFromDense",
"()"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L74-L78 | valid |
numenta/nupic | src/nupic/algorithms/spatial_pooler.py | SpatialPooler.setLocalAreaDensity | def setLocalAreaDensity(self, localAreaDensity):
"""
Sets the local area density. Invalidates the 'numActiveColumnsPerInhArea'
parameter
:param localAreaDensity: (float) value to set
"""
assert(localAreaDensity > 0 and localAreaDensity <= 1)
self._localAreaDensity = localAreaDensity
self._numActiveColumnsPerInhArea = 0 | python | def setLocalAreaDensity(self, localAreaDensity):
"""
Sets the local area density. Invalidates the 'numActiveColumnsPerInhArea'
parameter
:param localAreaDensity: (float) value to set
"""
assert(localAreaDensity > 0 and localAreaDensity <= 1)
self._localAreaDensity = localAreaDensity
self._numActiveColumnsPerInhArea = 0 | [
"def",
"setLocalAreaDensity",
"(",
"self",
",",
"localAreaDensity",
")",
":",
"assert",
"(",
"localAreaDensity",
">",
"0",
"and",
"localAreaDensity",
"<=",
"1",
")",
"self",
".",
"_localAreaDensity",
"=",
"localAreaDensity",
"self",
".",
"_numActiveColumnsPerInhArea",
"=",
"0"
] | Sets the local area density. Invalidates the 'numActiveColumnsPerInhArea'
parameter
:param localAreaDensity: (float) value to set | [
"Sets",
"the",
"local",
"area",
"density",
".",
"Invalidates",
"the",
"numActiveColumnsPerInhArea",
"parameter",
":",
"param",
"localAreaDensity",
":",
"(",
"float",
")",
"value",
"to",
"set"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L487-L496 | valid |
numenta/nupic | src/nupic/algorithms/spatial_pooler.py | SpatialPooler.getPotential | def getPotential(self, columnIndex, potential):
"""
:param columnIndex: (int) column index to get potential for.
:param potential: (list) will be overwritten with column potentials. Must
match the number of inputs.
"""
assert(columnIndex < self._numColumns)
potential[:] = self._potentialPools[columnIndex] | python | def getPotential(self, columnIndex, potential):
"""
:param columnIndex: (int) column index to get potential for.
:param potential: (list) will be overwritten with column potentials. Must
match the number of inputs.
"""
assert(columnIndex < self._numColumns)
potential[:] = self._potentialPools[columnIndex] | [
"def",
"getPotential",
"(",
"self",
",",
"columnIndex",
",",
"potential",
")",
":",
"assert",
"(",
"columnIndex",
"<",
"self",
".",
"_numColumns",
")",
"potential",
"[",
":",
"]",
"=",
"self",
".",
"_potentialPools",
"[",
"columnIndex",
"]"
] | :param columnIndex: (int) column index to get potential for.
:param potential: (list) will be overwritten with column potentials. Must
match the number of inputs. | [
":",
"param",
"columnIndex",
":",
"(",
"int",
")",
"column",
"index",
"to",
"get",
"potential",
"for",
".",
":",
"param",
"potential",
":",
"(",
"list",
")",
"will",
"be",
"overwritten",
"with",
"column",
"potentials",
".",
"Must",
"match",
"the",
"number",
"of",
"inputs",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L792-L799 | valid |
numenta/nupic | src/nupic/algorithms/spatial_pooler.py | SpatialPooler.setPotential | def setPotential(self, columnIndex, potential):
"""
Sets the potential mapping for a given column. ``potential`` size must match
the number of inputs, and must be greater than ``stimulusThreshold``.
:param columnIndex: (int) column index to set potential for.
:param potential: (list) value to set.
"""
assert(columnIndex < self._numColumns)
potentialSparse = numpy.where(potential > 0)[0]
if len(potentialSparse) < self._stimulusThreshold:
raise Exception("This is likely due to a " +
"value of stimulusThreshold that is too large relative " +
"to the input size.")
self._potentialPools.replace(columnIndex, potentialSparse) | python | def setPotential(self, columnIndex, potential):
"""
Sets the potential mapping for a given column. ``potential`` size must match
the number of inputs, and must be greater than ``stimulusThreshold``.
:param columnIndex: (int) column index to set potential for.
:param potential: (list) value to set.
"""
assert(columnIndex < self._numColumns)
potentialSparse = numpy.where(potential > 0)[0]
if len(potentialSparse) < self._stimulusThreshold:
raise Exception("This is likely due to a " +
"value of stimulusThreshold that is too large relative " +
"to the input size.")
self._potentialPools.replace(columnIndex, potentialSparse) | [
"def",
"setPotential",
"(",
"self",
",",
"columnIndex",
",",
"potential",
")",
":",
"assert",
"(",
"columnIndex",
"<",
"self",
".",
"_numColumns",
")",
"potentialSparse",
"=",
"numpy",
".",
"where",
"(",
"potential",
">",
"0",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"potentialSparse",
")",
"<",
"self",
".",
"_stimulusThreshold",
":",
"raise",
"Exception",
"(",
"\"This is likely due to a \"",
"+",
"\"value of stimulusThreshold that is too large relative \"",
"+",
"\"to the input size.\"",
")",
"self",
".",
"_potentialPools",
".",
"replace",
"(",
"columnIndex",
",",
"potentialSparse",
")"
] | Sets the potential mapping for a given column. ``potential`` size must match
the number of inputs, and must be greater than ``stimulusThreshold``.
:param columnIndex: (int) column index to set potential for.
:param potential: (list) value to set. | [
"Sets",
"the",
"potential",
"mapping",
"for",
"a",
"given",
"column",
".",
"potential",
"size",
"must",
"match",
"the",
"number",
"of",
"inputs",
"and",
"must",
"be",
"greater",
"than",
"stimulusThreshold",
".",
":",
"param",
"columnIndex",
":",
"(",
"int",
")",
"column",
"index",
"to",
"set",
"potential",
"for",
".",
":",
"param",
"potential",
":",
"(",
"list",
")",
"value",
"to",
"set",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L802-L818 | valid |
numenta/nupic | src/nupic/algorithms/spatial_pooler.py | SpatialPooler.getPermanence | def getPermanence(self, columnIndex, permanence):
"""
Returns the permanence values for a given column. ``permanence`` size
must match the number of inputs.
:param columnIndex: (int) column index to get permanence for.
:param permanence: (list) will be overwritten with permanences.
"""
assert(columnIndex < self._numColumns)
permanence[:] = self._permanences[columnIndex] | python | def getPermanence(self, columnIndex, permanence):
"""
Returns the permanence values for a given column. ``permanence`` size
must match the number of inputs.
:param columnIndex: (int) column index to get permanence for.
:param permanence: (list) will be overwritten with permanences.
"""
assert(columnIndex < self._numColumns)
permanence[:] = self._permanences[columnIndex] | [
"def",
"getPermanence",
"(",
"self",
",",
"columnIndex",
",",
"permanence",
")",
":",
"assert",
"(",
"columnIndex",
"<",
"self",
".",
"_numColumns",
")",
"permanence",
"[",
":",
"]",
"=",
"self",
".",
"_permanences",
"[",
"columnIndex",
"]"
] | Returns the permanence values for a given column. ``permanence`` size
must match the number of inputs.
:param columnIndex: (int) column index to get permanence for.
:param permanence: (list) will be overwritten with permanences. | [
"Returns",
"the",
"permanence",
"values",
"for",
"a",
"given",
"column",
".",
"permanence",
"size",
"must",
"match",
"the",
"number",
"of",
"inputs",
".",
":",
"param",
"columnIndex",
":",
"(",
"int",
")",
"column",
"index",
"to",
"get",
"permanence",
"for",
".",
":",
"param",
"permanence",
":",
"(",
"list",
")",
"will",
"be",
"overwritten",
"with",
"permanences",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L821-L830 | valid |
numenta/nupic | src/nupic/algorithms/spatial_pooler.py | SpatialPooler.setPermanence | def setPermanence(self, columnIndex, permanence):
"""
Sets the permanence values for a given column. ``permanence`` size must
match the number of inputs.
:param columnIndex: (int) column index to set permanence for.
:param permanence: (list) value to set.
"""
assert(columnIndex < self._numColumns)
self._updatePermanencesForColumn(permanence, columnIndex, raisePerm=False) | python | def setPermanence(self, columnIndex, permanence):
"""
Sets the permanence values for a given column. ``permanence`` size must
match the number of inputs.
:param columnIndex: (int) column index to set permanence for.
:param permanence: (list) value to set.
"""
assert(columnIndex < self._numColumns)
self._updatePermanencesForColumn(permanence, columnIndex, raisePerm=False) | [
"def",
"setPermanence",
"(",
"self",
",",
"columnIndex",
",",
"permanence",
")",
":",
"assert",
"(",
"columnIndex",
"<",
"self",
".",
"_numColumns",
")",
"self",
".",
"_updatePermanencesForColumn",
"(",
"permanence",
",",
"columnIndex",
",",
"raisePerm",
"=",
"False",
")"
] | Sets the permanence values for a given column. ``permanence`` size must
match the number of inputs.
:param columnIndex: (int) column index to set permanence for.
:param permanence: (list) value to set. | [
"Sets",
"the",
"permanence",
"values",
"for",
"a",
"given",
"column",
".",
"permanence",
"size",
"must",
"match",
"the",
"number",
"of",
"inputs",
".",
":",
"param",
"columnIndex",
":",
"(",
"int",
")",
"column",
"index",
"to",
"set",
"permanence",
"for",
".",
":",
"param",
"permanence",
":",
"(",
"list",
")",
"value",
"to",
"set",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L833-L842 | valid |