nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
sequence | function
stringlengths 18
4.83M
| function_tokens
sequence | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
google-research/rigl | f18abc7d82ae3acc6736068408a0186c9efa575c | rigl/experimental/jax/pruning/masked.py | python | _PerNeuronShuffle.__init__ | (self, init_rng, sparsity) | Creates the per-neuron shuffle class, with initial RNG state.
Args:
init_rng: The initial random number generator state to use.
sparsity: The per-layer sparsity of the mask (i.e. % of zeroes), 1.0 will
mask all weights, while 0 will mask none. | Creates the per-neuron shuffle class, with initial RNG state. | [
"Creates",
"the",
"per",
"-",
"neuron",
"shuffle",
"class",
"with",
"initial",
"RNG",
"state",
"."
] | def __init__(self, init_rng, sparsity):
"""Creates the per-neuron shuffle class, with initial RNG state.
Args:
init_rng: The initial random number generator state to use.
sparsity: The per-layer sparsity of the mask (i.e. % of zeroes), 1.0 will
mask all weights, while 0 will mask none.
"""
self._rng = init_rng
self._sparsity = sparsity | [
"def",
"__init__",
"(",
"self",
",",
"init_rng",
",",
"sparsity",
")",
":",
"self",
".",
"_rng",
"=",
"init_rng",
"self",
".",
"_sparsity",
"=",
"sparsity"
] | https://github.com/google-research/rigl/blob/f18abc7d82ae3acc6736068408a0186c9efa575c/rigl/experimental/jax/pruning/masked.py#L380-L389 |
||
facebookresearch/pytorch_GAN_zoo | b75dee40918caabb4fe7ec561522717bf096a8cb | models/trainer/DCGAN_trainer.py | python | DCGANTrainer.initModel | (self) | [] | def initModel(self):
self.model = DCGAN(useGPU=self.useGPU,
**vars(self.modelConfig)) | [
"def",
"initModel",
"(",
"self",
")",
":",
"self",
".",
"model",
"=",
"DCGAN",
"(",
"useGPU",
"=",
"self",
".",
"useGPU",
",",
"*",
"*",
"vars",
"(",
"self",
".",
"modelConfig",
")",
")"
] | https://github.com/facebookresearch/pytorch_GAN_zoo/blob/b75dee40918caabb4fe7ec561522717bf096a8cb/models/trainer/DCGAN_trainer.py#L33-L35 |
||||
biopython/biopython | 2dd97e71762af7b046d7f7f8a4f1e38db6b06c86 | Bio/SearchIO/HmmerIO/hmmer3_tab.py | python | Hmmer3TabWriter.write_file | (self, qresults) | return qresult_counter, hit_counter, hsp_counter, frag_counter | Write to the handle.
Returns a tuple of how many QueryResult, Hit, and HSP objects were written. | Write to the handle. | [
"Write",
"to",
"the",
"handle",
"."
] | def write_file(self, qresults):
"""Write to the handle.
Returns a tuple of how many QueryResult, Hit, and HSP objects were written.
"""
handle = self.handle
qresult_counter, hit_counter, hsp_counter, frag_counter = 0, 0, 0, 0
try:
first_qresult = next(qresults)
except StopIteration:
handle.write(self._build_header())
else:
# write header
handle.write(self._build_header(first_qresult))
# and then the qresults
for qresult in chain([first_qresult], qresults):
if qresult:
handle.write(self._build_row(qresult))
qresult_counter += 1
hit_counter += len(qresult)
hsp_counter += sum(len(hit) for hit in qresult)
frag_counter += sum(len(hit.fragments) for hit in qresult)
return qresult_counter, hit_counter, hsp_counter, frag_counter | [
"def",
"write_file",
"(",
"self",
",",
"qresults",
")",
":",
"handle",
"=",
"self",
".",
"handle",
"qresult_counter",
",",
"hit_counter",
",",
"hsp_counter",
",",
"frag_counter",
"=",
"0",
",",
"0",
",",
"0",
",",
"0",
"try",
":",
"first_qresult",
"=",
"next",
"(",
"qresults",
")",
"except",
"StopIteration",
":",
"handle",
".",
"write",
"(",
"self",
".",
"_build_header",
"(",
")",
")",
"else",
":",
"# write header",
"handle",
".",
"write",
"(",
"self",
".",
"_build_header",
"(",
"first_qresult",
")",
")",
"# and then the qresults",
"for",
"qresult",
"in",
"chain",
"(",
"[",
"first_qresult",
"]",
",",
"qresults",
")",
":",
"if",
"qresult",
":",
"handle",
".",
"write",
"(",
"self",
".",
"_build_row",
"(",
"qresult",
")",
")",
"qresult_counter",
"+=",
"1",
"hit_counter",
"+=",
"len",
"(",
"qresult",
")",
"hsp_counter",
"+=",
"sum",
"(",
"len",
"(",
"hit",
")",
"for",
"hit",
"in",
"qresult",
")",
"frag_counter",
"+=",
"sum",
"(",
"len",
"(",
"hit",
".",
"fragments",
")",
"for",
"hit",
"in",
"qresult",
")",
"return",
"qresult_counter",
",",
"hit_counter",
",",
"hsp_counter",
",",
"frag_counter"
] | https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/SearchIO/HmmerIO/hmmer3_tab.py#L222-L247 |
|
nansencenter/nansat | 5700ec673fbf522c19b8dedcb01cc15f7cd29a6a | nansat/vrt.py | python | VRT.fix_global_metadata | (self, rm_metadata) | Remove unwanted global metadata and escape special characters | Remove unwanted global metadata and escape special characters | [
"Remove",
"unwanted",
"global",
"metadata",
"and",
"escape",
"special",
"characters"
] | def fix_global_metadata(self, rm_metadata):
"""Remove unwanted global metadata and escape special characters"""
metadata = remove_keys(self.dataset.GetMetadata(), rm_metadata)
# Apply escaping to metadata strings to preserve special characters (in XML/HTML format)
metadata_escaped = {}
for key, val in list(metadata.items()):
# Keys not escaped - this may be changed if needed...
metadata_escaped[key] = gdal.EscapeString(val, gdal.CPLES_XML)
self.dataset.SetMetadata(metadata_escaped)
self.dataset.FlushCache() | [
"def",
"fix_global_metadata",
"(",
"self",
",",
"rm_metadata",
")",
":",
"metadata",
"=",
"remove_keys",
"(",
"self",
".",
"dataset",
".",
"GetMetadata",
"(",
")",
",",
"rm_metadata",
")",
"# Apply escaping to metadata strings to preserve special characters (in XML/HTML format)",
"metadata_escaped",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"list",
"(",
"metadata",
".",
"items",
"(",
")",
")",
":",
"# Keys not escaped - this may be changed if needed...",
"metadata_escaped",
"[",
"key",
"]",
"=",
"gdal",
".",
"EscapeString",
"(",
"val",
",",
"gdal",
".",
"CPLES_XML",
")",
"self",
".",
"dataset",
".",
"SetMetadata",
"(",
"metadata_escaped",
")",
"self",
".",
"dataset",
".",
"FlushCache",
"(",
")"
] | https://github.com/nansencenter/nansat/blob/5700ec673fbf522c19b8dedcb01cc15f7cd29a6a/nansat/vrt.py#L801-L810 |
||
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.py | python | Infinity.__eq__ | (self, other) | return isinstance(other, self.__class__) | [] | def __eq__(self, other):
return isinstance(other, self.__class__) | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"return",
"isinstance",
"(",
"other",
",",
"self",
".",
"__class__",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.py#L21-L22 |
|||
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/tci/v20190318/models.py | python | ModifyLibraryResponse.__init__ | (self) | r"""
:param LibraryId: 人员库唯一标识符
:type LibraryId: str
:param LibraryName: 人员库名称
:type LibraryName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str | r"""
:param LibraryId: 人员库唯一标识符
:type LibraryId: str
:param LibraryName: 人员库名称
:type LibraryName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str | [
"r",
":",
"param",
"LibraryId",
":",
"人员库唯一标识符",
":",
"type",
"LibraryId",
":",
"str",
":",
"param",
"LibraryName",
":",
"人员库名称",
":",
"type",
"LibraryName",
":",
"str",
":",
"param",
"RequestId",
":",
"唯一请求",
"ID,每次请求都会返回。定位问题时需要提供该次请求的",
"RequestId。",
":",
"type",
"RequestId",
":",
"str"
] | def __init__(self):
r"""
:param LibraryId: 人员库唯一标识符
:type LibraryId: str
:param LibraryName: 人员库名称
:type LibraryName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LibraryId = None
self.LibraryName = None
self.RequestId = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"LibraryId",
"=",
"None",
"self",
".",
"LibraryName",
"=",
"None",
"self",
".",
"RequestId",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/tci/v20190318/models.py#L3152-L3163 |
||
phonopy/phonopy | 816586d0ba8177482ecf40e52f20cbdee2260d51 | phonopy/api_phonopy.py | python | Phonopy._shape_supercell_matrix | (self, smat) | return shape_supercell_matrix(smat) | [] | def _shape_supercell_matrix(self, smat):
return shape_supercell_matrix(smat) | [
"def",
"_shape_supercell_matrix",
"(",
"self",
",",
"smat",
")",
":",
"return",
"shape_supercell_matrix",
"(",
"smat",
")"
] | https://github.com/phonopy/phonopy/blob/816586d0ba8177482ecf40e52f20cbdee2260d51/phonopy/api_phonopy.py#L3543-L3544 |
|||
openshift/openshift-tools | 1188778e728a6e4781acf728123e5b356380fe6f | openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_adm_policy_group.py | python | SecurityContextConstraints.groups | (self) | return self._groups | groups property getter | groups property getter | [
"groups",
"property",
"getter"
] | def groups(self):
''' groups property getter '''
if self._groups is None:
self._groups = self.get_groups()
return self._groups | [
"def",
"groups",
"(",
"self",
")",
":",
"if",
"self",
".",
"_groups",
"is",
"None",
":",
"self",
".",
"_groups",
"=",
"self",
".",
"get_groups",
"(",
")",
"return",
"self",
".",
"_groups"
] | https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_adm_policy_group.py#L1872-L1876 |
|
cournape/Bento | 37de23d784407a7c98a4a15770ffc570d5f32d70 | bento/private/version.py | python | NormalizedVersion.__eq__ | (self, other) | return self.parts == other.parts | [] | def __eq__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts == other.parts | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"NormalizedVersion",
")",
":",
"self",
".",
"_cannot_compare",
"(",
"other",
")",
"return",
"self",
".",
"parts",
"==",
"other",
".",
"parts"
] | https://github.com/cournape/Bento/blob/37de23d784407a7c98a4a15770ffc570d5f32d70/bento/private/version.py#L197-L200 |
|||
david8862/keras-YOLOv3-model-set | e9f0f94109430973525219e66eeafe8a2f51363d | common/backbones/shufflenet.py | python | ShuffleNet | (include_top=True,
input_tensor=None,
scale_factor=1.0,
pooling=None,
input_shape=None,
groups=1,
weights='imagenet',
num_shuffle_units=[3, 7, 3],
bottleneck_ratio=0.25,
classes=1000,
**kwargs) | return model | ShuffleNet implementation for Keras 2
ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun
https://arxiv.org/pdf/1707.01083.pdf
Note that only TensorFlow is supported for now, therefore it only works
with the data format `image_data_format='channels_last'` in your Keras
config at `~/.keras/keras.json`.
Parameters
----------
include_top: bool(True)
whether to include the fully-connected layer at the top of the network.
input_tensor:
optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
scale_factor:
scales the number of output channels
input_shape:
pooling:
Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
groups: int
number of groups per channel
num_shuffle_units: list([3,7,3])
number of stages (list length) and the number of shufflenet units in a
stage beginning with stage 2 because stage 1 is fixed
e.g. idx 0 contains 3 + 1 (first shuffle unit in each stage differs) shufflenet units for stage 2
idx 1 contains 7 + 1 Shufflenet Units for stage 3 and
idx 2 contains 3 + 1 Shufflenet Units
bottleneck_ratio:
bottleneck ratio implies the ratio of bottleneck channels to output channels.
For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
the width of the bottleneck feature map.
classes: int(1000)
number of classes to predict
Returns
-------
A Keras model instance
References
----------
- [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices]
(http://www.arxiv.org/pdf/1707.01083.pdf) | ShuffleNet implementation for Keras 2
ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun
https://arxiv.org/pdf/1707.01083.pdf
Note that only TensorFlow is supported for now, therefore it only works
with the data format `image_data_format='channels_last'` in your Keras
config at `~/.keras/keras.json`.
Parameters
----------
include_top: bool(True)
whether to include the fully-connected layer at the top of the network.
input_tensor:
optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
scale_factor:
scales the number of output channels
input_shape:
pooling:
Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
groups: int
number of groups per channel
num_shuffle_units: list([3,7,3])
number of stages (list length) and the number of shufflenet units in a
stage beginning with stage 2 because stage 1 is fixed
e.g. idx 0 contains 3 + 1 (first shuffle unit in each stage differs) shufflenet units for stage 2
idx 1 contains 7 + 1 Shufflenet Units for stage 3 and
idx 2 contains 3 + 1 Shufflenet Units
bottleneck_ratio:
bottleneck ratio implies the ratio of bottleneck channels to output channels.
For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
the width of the bottleneck feature map.
classes: int(1000)
number of classes to predict
Returns
-------
A Keras model instance
References
----------
- [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices]
(http://www.arxiv.org/pdf/1707.01083.pdf) | [
"ShuffleNet",
"implementation",
"for",
"Keras",
"2",
"ShuffleNet",
":",
"An",
"Extremely",
"Efficient",
"Convolutional",
"Neural",
"Network",
"for",
"Mobile",
"Devices",
"Xiangyu",
"Zhang",
"Xinyu",
"Zhou",
"Mengxiao",
"Lin",
"Jian",
"Sun",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1707",
".",
"01083",
".",
"pdf",
"Note",
"that",
"only",
"TensorFlow",
"is",
"supported",
"for",
"now",
"therefore",
"it",
"only",
"works",
"with",
"the",
"data",
"format",
"image_data_format",
"=",
"channels_last",
"in",
"your",
"Keras",
"config",
"at",
"~",
"/",
".",
"keras",
"/",
"keras",
".",
"json",
".",
"Parameters",
"----------",
"include_top",
":",
"bool",
"(",
"True",
")",
"whether",
"to",
"include",
"the",
"fully",
"-",
"connected",
"layer",
"at",
"the",
"top",
"of",
"the",
"network",
".",
"input_tensor",
":",
"optional",
"Keras",
"tensor",
"(",
"i",
".",
"e",
".",
"output",
"of",
"layers",
".",
"Input",
"()",
")",
"to",
"use",
"as",
"image",
"input",
"for",
"the",
"model",
".",
"scale_factor",
":",
"scales",
"the",
"number",
"of",
"output",
"channels",
"input_shape",
":",
"pooling",
":",
"Optional",
"pooling",
"mode",
"for",
"feature",
"extraction",
"when",
"include_top",
"is",
"False",
".",
"-",
"None",
"means",
"that",
"the",
"output",
"of",
"the",
"model",
"will",
"be",
"the",
"4D",
"tensor",
"output",
"of",
"the",
"last",
"convolutional",
"layer",
".",
"-",
"avg",
"means",
"that",
"global",
"average",
"pooling",
"will",
"be",
"applied",
"to",
"the",
"output",
"of",
"the",
"last",
"convolutional",
"layer",
"and",
"thus",
"the",
"output",
"of",
"the",
"model",
"will",
"be",
"a",
"2D",
"tensor",
".",
"-",
"max",
"means",
"that",
"global",
"max",
"pooling",
"will",
"be",
"applied",
".",
"groups",
":",
"int",
"number",
"of",
"groups",
"per",
"channel",
"num_shuffle_units",
":",
"list",
"(",
"[",
"3",
"7",
"3",
"]",
")",
"number",
"of",
"stages",
"(",
"list",
"length",
")",
"and",
"the",
"number",
"of",
"shufflenet",
"units",
"in",
"a",
"stage",
"beginning",
"with",
"stage",
"2",
"because",
"stage",
"1",
"is",
"fixed",
"e",
".",
"g",
".",
"idx",
"0",
"contains",
"3",
"+",
"1",
"(",
"first",
"shuffle",
"unit",
"in",
"each",
"stage",
"differs",
")",
"shufflenet",
"units",
"for",
"stage",
"2",
"idx",
"1",
"contains",
"7",
"+",
"1",
"Shufflenet",
"Units",
"for",
"stage",
"3",
"and",
"idx",
"2",
"contains",
"3",
"+",
"1",
"Shufflenet",
"Units",
"bottleneck_ratio",
":",
"bottleneck",
"ratio",
"implies",
"the",
"ratio",
"of",
"bottleneck",
"channels",
"to",
"output",
"channels",
".",
"For",
"example",
"bottleneck",
"ratio",
"=",
"1",
":",
"4",
"means",
"the",
"output",
"feature",
"map",
"is",
"4",
"times",
"the",
"width",
"of",
"the",
"bottleneck",
"feature",
"map",
".",
"classes",
":",
"int",
"(",
"1000",
")",
"number",
"of",
"classes",
"to",
"predict",
"Returns",
"-------",
"A",
"Keras",
"model",
"instance",
"References",
"----------",
"-",
"[",
"ShuffleNet",
":",
"An",
"Extremely",
"Efficient",
"Convolutional",
"Neural",
"Network",
"for",
"Mobile",
"Devices",
"]",
"(",
"http",
":",
"//",
"www",
".",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1707",
".",
"01083",
".",
"pdf",
")"
] | def ShuffleNet(include_top=True,
input_tensor=None,
scale_factor=1.0,
pooling=None,
input_shape=None,
groups=1,
weights='imagenet',
num_shuffle_units=[3, 7, 3],
bottleneck_ratio=0.25,
classes=1000,
**kwargs):
"""
ShuffleNet implementation for Keras 2
ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun
https://arxiv.org/pdf/1707.01083.pdf
Note that only TensorFlow is supported for now, therefore it only works
with the data format `image_data_format='channels_last'` in your Keras
config at `~/.keras/keras.json`.
Parameters
----------
include_top: bool(True)
whether to include the fully-connected layer at the top of the network.
input_tensor:
optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
scale_factor:
scales the number of output channels
input_shape:
pooling:
Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
groups: int
number of groups per channel
num_shuffle_units: list([3,7,3])
number of stages (list length) and the number of shufflenet units in a
stage beginning with stage 2 because stage 1 is fixed
e.g. idx 0 contains 3 + 1 (first shuffle unit in each stage differs) shufflenet units for stage 2
idx 1 contains 7 + 1 Shufflenet Units for stage 3 and
idx 2 contains 3 + 1 Shufflenet Units
bottleneck_ratio:
bottleneck ratio implies the ratio of bottleneck channels to output channels.
For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
the width of the bottleneck feature map.
classes: int(1000)
number of classes to predict
Returns
-------
A Keras model instance
References
----------
- [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices]
(http://www.arxiv.org/pdf/1707.01083.pdf)
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only TensorFlow backend is currently supported, '
'as other backends do not support ')
name = "ShuffleNet_%.2gX_g%d_br_%.2g_%s" % (scale_factor, groups, bottleneck_ratio, "".join([str(x) for x in num_shuffle_units]))
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=28,
require_flatten=include_top,
data_format=K.image_data_format())
out_dim_stage_two = {1: 144, 2: 200, 3: 240, 4: 272, 8: 384}
if groups not in out_dim_stage_two:
raise ValueError("Invalid number of groups.")
if pooling not in ['max','avg', None]:
raise ValueError("Invalid value for pooling.")
if not (float(scale_factor) * 4).is_integer():
raise ValueError("Invalid value for scale_factor. Should be x over 4.")
exp = np.insert(np.arange(0, len(num_shuffle_units), dtype=np.float32), 0, 0)
out_channels_in_stage = 2 ** exp
out_channels_in_stage *= out_dim_stage_two[groups] # calculate output channels for each stage
out_channels_in_stage[0] = 24 # first stage has always 24 output channels
out_channels_in_stage *= scale_factor
out_channels_in_stage = out_channels_in_stage.astype(int)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
#if not K.is_keras_tensor(input_tensor):
#img_input = Input(tensor=input_tensor, shape=input_shape)
#else:
#img_input = input_tensor
img_input = input_tensor
# create shufflenet architecture
x = YoloConv2D(filters=out_channels_in_stage[0], kernel_size=(3, 3), padding='same',
use_bias=False, strides=(2, 2), activation="relu", name="conv1")(img_input)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same', name="maxpool1")(x)
# create stages containing shufflenet units beginning at stage 2
for stage in range(0, len(num_shuffle_units)):
repeat = num_shuffle_units[stage]
x = _block(x, out_channels_in_stage, repeat=repeat,
bottleneck_ratio=bottleneck_ratio,
groups=groups, stage=stage + 2)
if include_top:
#x = Dense(units=classes, name="fc")(x)
#x = Activation('softmax', name='softmax')(x)
x = GlobalAveragePooling2D(name='global_avg_pool')(x)
x = Dense(units=classes, activation='softmax',
use_bias=True, name='Logits')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D(name='global_avg_pool')(x)
elif pooling == 'max':
x = GlobalMaxPooling2D(name='global_max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs=inputs, outputs=x, name=name)
# Load weights.
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_first" format '
'are not available.')
if include_top:
model_name = ('shufflenet_weights_tf_dim_ordering_tf_kernels_' +
str(alpha) + '_' + str(rows) + '.h5')
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(
model_name, weigh_path, cache_subdir='models')
else:
model_name = ('shufflenet_weights_tf_dim_ordering_tf_kernels_' +
str(alpha) + '_' + str(rows) + '_no_top' + '.h5')
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(
model_name, weigh_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model | [
"def",
"ShuffleNet",
"(",
"include_top",
"=",
"True",
",",
"input_tensor",
"=",
"None",
",",
"scale_factor",
"=",
"1.0",
",",
"pooling",
"=",
"None",
",",
"input_shape",
"=",
"None",
",",
"groups",
"=",
"1",
",",
"weights",
"=",
"'imagenet'",
",",
"num_shuffle_units",
"=",
"[",
"3",
",",
"7",
",",
"3",
"]",
",",
"bottleneck_ratio",
"=",
"0.25",
",",
"classes",
"=",
"1000",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"K",
".",
"backend",
"(",
")",
"!=",
"'tensorflow'",
":",
"raise",
"RuntimeError",
"(",
"'Only TensorFlow backend is currently supported, '",
"'as other backends do not support '",
")",
"name",
"=",
"\"ShuffleNet_%.2gX_g%d_br_%.2g_%s\"",
"%",
"(",
"scale_factor",
",",
"groups",
",",
"bottleneck_ratio",
",",
"\"\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"num_shuffle_units",
"]",
")",
")",
"input_shape",
"=",
"_obtain_input_shape",
"(",
"input_shape",
",",
"default_size",
"=",
"224",
",",
"min_size",
"=",
"28",
",",
"require_flatten",
"=",
"include_top",
",",
"data_format",
"=",
"K",
".",
"image_data_format",
"(",
")",
")",
"out_dim_stage_two",
"=",
"{",
"1",
":",
"144",
",",
"2",
":",
"200",
",",
"3",
":",
"240",
",",
"4",
":",
"272",
",",
"8",
":",
"384",
"}",
"if",
"groups",
"not",
"in",
"out_dim_stage_two",
":",
"raise",
"ValueError",
"(",
"\"Invalid number of groups.\"",
")",
"if",
"pooling",
"not",
"in",
"[",
"'max'",
",",
"'avg'",
",",
"None",
"]",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for pooling.\"",
")",
"if",
"not",
"(",
"float",
"(",
"scale_factor",
")",
"*",
"4",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for scale_factor. Should be x over 4.\"",
")",
"exp",
"=",
"np",
".",
"insert",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"len",
"(",
"num_shuffle_units",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
",",
"0",
",",
"0",
")",
"out_channels_in_stage",
"=",
"2",
"**",
"exp",
"out_channels_in_stage",
"*=",
"out_dim_stage_two",
"[",
"groups",
"]",
"# calculate output channels for each stage",
"out_channels_in_stage",
"[",
"0",
"]",
"=",
"24",
"# first stage has always 24 output channels",
"out_channels_in_stage",
"*=",
"scale_factor",
"out_channels_in_stage",
"=",
"out_channels_in_stage",
".",
"astype",
"(",
"int",
")",
"if",
"input_tensor",
"is",
"None",
":",
"img_input",
"=",
"Input",
"(",
"shape",
"=",
"input_shape",
")",
"else",
":",
"#if not K.is_keras_tensor(input_tensor):",
"#img_input = Input(tensor=input_tensor, shape=input_shape)",
"#else:",
"#img_input = input_tensor",
"img_input",
"=",
"input_tensor",
"# create shufflenet architecture",
"x",
"=",
"YoloConv2D",
"(",
"filters",
"=",
"out_channels_in_stage",
"[",
"0",
"]",
",",
"kernel_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"padding",
"=",
"'same'",
",",
"use_bias",
"=",
"False",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"activation",
"=",
"\"relu\"",
",",
"name",
"=",
"\"conv1\"",
")",
"(",
"img_input",
")",
"x",
"=",
"MaxPooling2D",
"(",
"pool_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"'same'",
",",
"name",
"=",
"\"maxpool1\"",
")",
"(",
"x",
")",
"# create stages containing shufflenet units beginning at stage 2",
"for",
"stage",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"num_shuffle_units",
")",
")",
":",
"repeat",
"=",
"num_shuffle_units",
"[",
"stage",
"]",
"x",
"=",
"_block",
"(",
"x",
",",
"out_channels_in_stage",
",",
"repeat",
"=",
"repeat",
",",
"bottleneck_ratio",
"=",
"bottleneck_ratio",
",",
"groups",
"=",
"groups",
",",
"stage",
"=",
"stage",
"+",
"2",
")",
"if",
"include_top",
":",
"#x = Dense(units=classes, name=\"fc\")(x)",
"#x = Activation('softmax', name='softmax')(x)",
"x",
"=",
"GlobalAveragePooling2D",
"(",
"name",
"=",
"'global_avg_pool'",
")",
"(",
"x",
")",
"x",
"=",
"Dense",
"(",
"units",
"=",
"classes",
",",
"activation",
"=",
"'softmax'",
",",
"use_bias",
"=",
"True",
",",
"name",
"=",
"'Logits'",
")",
"(",
"x",
")",
"else",
":",
"if",
"pooling",
"==",
"'avg'",
":",
"x",
"=",
"GlobalAveragePooling2D",
"(",
"name",
"=",
"'global_avg_pool'",
")",
"(",
"x",
")",
"elif",
"pooling",
"==",
"'max'",
":",
"x",
"=",
"GlobalMaxPooling2D",
"(",
"name",
"=",
"'global_max_pool'",
")",
"(",
"x",
")",
"# Ensure that the model takes into account",
"# any potential predecessors of `input_tensor`.",
"if",
"input_tensor",
"is",
"not",
"None",
":",
"inputs",
"=",
"get_source_inputs",
"(",
"input_tensor",
")",
"else",
":",
"inputs",
"=",
"img_input",
"# Create model.",
"model",
"=",
"Model",
"(",
"inputs",
"=",
"inputs",
",",
"outputs",
"=",
"x",
",",
"name",
"=",
"name",
")",
"# Load weights.",
"if",
"weights",
"==",
"'imagenet'",
":",
"if",
"K",
".",
"image_data_format",
"(",
")",
"==",
"'channels_first'",
":",
"raise",
"ValueError",
"(",
"'Weights for \"channels_first\" format '",
"'are not available.'",
")",
"if",
"include_top",
":",
"model_name",
"=",
"(",
"'shufflenet_weights_tf_dim_ordering_tf_kernels_'",
"+",
"str",
"(",
"alpha",
")",
"+",
"'_'",
"+",
"str",
"(",
"rows",
")",
"+",
"'.h5'",
")",
"weigh_path",
"=",
"BASE_WEIGHT_PATH",
"+",
"model_name",
"weights_path",
"=",
"get_file",
"(",
"model_name",
",",
"weigh_path",
",",
"cache_subdir",
"=",
"'models'",
")",
"else",
":",
"model_name",
"=",
"(",
"'shufflenet_weights_tf_dim_ordering_tf_kernels_'",
"+",
"str",
"(",
"alpha",
")",
"+",
"'_'",
"+",
"str",
"(",
"rows",
")",
"+",
"'_no_top'",
"+",
"'.h5'",
")",
"weigh_path",
"=",
"BASE_WEIGHT_PATH",
"+",
"model_name",
"weights_path",
"=",
"get_file",
"(",
"model_name",
",",
"weigh_path",
",",
"cache_subdir",
"=",
"'models'",
")",
"model",
".",
"load_weights",
"(",
"weights_path",
")",
"elif",
"weights",
"is",
"not",
"None",
":",
"model",
".",
"load_weights",
"(",
"weights",
")",
"return",
"model"
] | https://github.com/david8862/keras-YOLOv3-model-set/blob/e9f0f94109430973525219e66eeafe8a2f51363d/common/backbones/shufflenet.py#L23-L181 |
|
tp4a/teleport | 1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad | server/www/packages/packages-darwin/x64/PIL/ImageDraw.py | python | _color_diff | (rgb1, rgb2) | return abs(rgb1[0]-rgb2[0]) + abs(rgb1[1]-rgb2[1]) + abs(rgb1[2]-rgb2[2]) | Uses 1-norm distance to calculate difference between two rgb values. | Uses 1-norm distance to calculate difference between two rgb values. | [
"Uses",
"1",
"-",
"norm",
"distance",
"to",
"calculate",
"difference",
"between",
"two",
"rgb",
"values",
"."
] | def _color_diff(rgb1, rgb2):
"""
Uses 1-norm distance to calculate difference between two rgb values.
"""
return abs(rgb1[0]-rgb2[0]) + abs(rgb1[1]-rgb2[1]) + abs(rgb1[2]-rgb2[2]) | [
"def",
"_color_diff",
"(",
"rgb1",
",",
"rgb2",
")",
":",
"return",
"abs",
"(",
"rgb1",
"[",
"0",
"]",
"-",
"rgb2",
"[",
"0",
"]",
")",
"+",
"abs",
"(",
"rgb1",
"[",
"1",
"]",
"-",
"rgb2",
"[",
"1",
"]",
")",
"+",
"abs",
"(",
"rgb1",
"[",
"2",
"]",
"-",
"rgb2",
"[",
"2",
"]",
")"
] | https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/PIL/ImageDraw.py#L383-L387 |
|
intel/IntelSEAPI | 7997a782fd3fa5621e275bd31060f9795564e6ca | runtool/exporters/DGML.py | python | DGML.get_targets | (self) | return [self.args.output + ".dgml"] | [] | def get_targets(self):
return [self.args.output + ".dgml"] | [
"def",
"get_targets",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"args",
".",
"output",
"+",
"\".dgml\"",
"]"
] | https://github.com/intel/IntelSEAPI/blob/7997a782fd3fa5621e275bd31060f9795564e6ca/runtool/exporters/DGML.py#L15-L16 |
|||
pwnieexpress/pwn_plug_sources | 1a23324f5dc2c3de20f9c810269b6a29b2758cad | src/wifitap/scapy.py | python | PacketListField.do_copy | (self, x) | return map(lambda p:p.copy(), x) | [] | def do_copy(self, x):
return map(lambda p:p.copy(), x) | [
"def",
"do_copy",
"(",
"self",
",",
"x",
")",
":",
"return",
"map",
"(",
"lambda",
"p",
":",
"p",
".",
"copy",
"(",
")",
",",
"x",
")"
] | https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/wifitap/scapy.py#L4029-L4030 |
|||
boto/boto | b2a6f08122b2f1b89888d2848e730893595cd001 | boto/gs/bucket.py | python | Bucket.set_def_xml_acl | (self, acl_str, headers=None) | return self.set_xml_acl(acl_str, '', headers,
query_args=DEF_OBJ_ACL) | Sets a bucket's default ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request. | Sets a bucket's default ACL to an XML string. | [
"Sets",
"a",
"bucket",
"s",
"default",
"ACL",
"to",
"an",
"XML",
"string",
"."
] | def set_def_xml_acl(self, acl_str, headers=None):
"""Sets a bucket's default ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
return self.set_xml_acl(acl_str, '', headers,
query_args=DEF_OBJ_ACL) | [
"def",
"set_def_xml_acl",
"(",
"self",
",",
"acl_str",
",",
"headers",
"=",
"None",
")",
":",
"return",
"self",
".",
"set_xml_acl",
"(",
"acl_str",
",",
"''",
",",
"headers",
",",
"query_args",
"=",
"DEF_OBJ_ACL",
")"
] | https://github.com/boto/boto/blob/b2a6f08122b2f1b89888d2848e730893595cd001/boto/gs/bucket.py#L574-L584 |
|
twilio/twilio-python | 6e1e811ea57a1edfadd5161ace87397c563f6915 | twilio/rest/api/v2010/account/usage/record/__init__.py | python | RecordInstance.uri | (self) | return self._properties['uri'] | :returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode | :returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode | [
":",
"returns",
":",
"The",
"URI",
"of",
"the",
"resource",
"relative",
"to",
"https",
":",
"//",
"api",
".",
"twilio",
".",
"com",
":",
"rtype",
":",
"unicode"
] | def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri'] | [
"def",
"uri",
"(",
"self",
")",
":",
"return",
"self",
".",
"_properties",
"[",
"'uri'",
"]"
] | https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/api/v2010/account/usage/record/__init__.py#L687-L692 |
|
Bitmessage/PyBitmessage | 97612b049e0453867d6d90aa628f8e7b007b4d85 | src/network/tcp.py | python | TCPConnection.antiIntersectionDelay | (self, initial=False) | This is a defense against the so called intersection attacks.
It is called when you notice peer is requesting non-existing
objects, or right after the connection is established. It will
estimate how long an object will take to propagate across the
network, and skip processing "getdata" requests until then. This
means an attacker only has one shot per IP to perform the attack. | This is a defense against the so called intersection attacks. | [
"This",
"is",
"a",
"defense",
"against",
"the",
"so",
"called",
"intersection",
"attacks",
"."
] | def antiIntersectionDelay(self, initial=False):
"""
This is a defense against the so called intersection attacks.
It is called when you notice peer is requesting non-existing
objects, or right after the connection is established. It will
estimate how long an object will take to propagate across the
network, and skip processing "getdata" requests until then. This
means an attacker only has one shot per IP to perform the attack.
"""
# estimated time for a small object to propagate across the
# whole network
max_known_nodes = max(
len(knownnodes.knownNodes[x]) for x in knownnodes.knownNodes)
delay = math.ceil(math.log(max_known_nodes + 2, 20)) * (
0.2 + invQueue.queueCount / 2.0)
# take the stream with maximum amount of nodes
# +2 is to avoid problems with log(0) and log(1)
# 20 is avg connected nodes count
# 0.2 is avg message transmission time
if delay > 0:
if initial:
self.skipUntil = self.connectedAt + delay
if self.skipUntil > time.time():
logger.debug(
'Initial skipping processing getdata for %.2fs',
self.skipUntil - time.time())
else:
logger.debug(
'Skipping processing getdata due to missing object'
' for %.2fs', delay)
self.skipUntil = time.time() + delay | [
"def",
"antiIntersectionDelay",
"(",
"self",
",",
"initial",
"=",
"False",
")",
":",
"# estimated time for a small object to propagate across the",
"# whole network",
"max_known_nodes",
"=",
"max",
"(",
"len",
"(",
"knownnodes",
".",
"knownNodes",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"knownnodes",
".",
"knownNodes",
")",
"delay",
"=",
"math",
".",
"ceil",
"(",
"math",
".",
"log",
"(",
"max_known_nodes",
"+",
"2",
",",
"20",
")",
")",
"*",
"(",
"0.2",
"+",
"invQueue",
".",
"queueCount",
"/",
"2.0",
")",
"# take the stream with maximum amount of nodes",
"# +2 is to avoid problems with log(0) and log(1)",
"# 20 is avg connected nodes count",
"# 0.2 is avg message transmission time",
"if",
"delay",
">",
"0",
":",
"if",
"initial",
":",
"self",
".",
"skipUntil",
"=",
"self",
".",
"connectedAt",
"+",
"delay",
"if",
"self",
".",
"skipUntil",
">",
"time",
".",
"time",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"'Initial skipping processing getdata for %.2fs'",
",",
"self",
".",
"skipUntil",
"-",
"time",
".",
"time",
"(",
")",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'Skipping processing getdata due to missing object'",
"' for %.2fs'",
",",
"delay",
")",
"self",
".",
"skipUntil",
"=",
"time",
".",
"time",
"(",
")",
"+",
"delay"
] | https://github.com/Bitmessage/PyBitmessage/blob/97612b049e0453867d6d90aa628f8e7b007b4d85/src/network/tcp.py#L96-L127 |
||
Axelrod-Python/Axelrod | 00e18323c1b1af74df873773e44f31e1b9a299c6 | axelrod/strategy_transformers.py | python | mixed_reclassifier | (original_classifier, probability, m_player) | return original_classifier | Function to reclassify the strategy | Function to reclassify the strategy | [
"Function",
"to",
"reclassify",
"the",
"strategy"
] | def mixed_reclassifier(original_classifier, probability, m_player):
"""Function to reclassify the strategy"""
# If a single probability, player is passed
if isinstance(probability, float) or isinstance(probability, int):
m_player = [m_player]
probability = [probability]
if min(probability) == max(probability) == 0: # No probability given
return original_classifier
if 1 in probability: # If all probability given to one player
player = m_player[probability.index(1)]
original_classifier["stochastic"] = player.classifier["stochastic"]
return original_classifier
# Otherwise: stochastic.
original_classifier["stochastic"] = True
return original_classifier | [
"def",
"mixed_reclassifier",
"(",
"original_classifier",
",",
"probability",
",",
"m_player",
")",
":",
"# If a single probability, player is passed",
"if",
"isinstance",
"(",
"probability",
",",
"float",
")",
"or",
"isinstance",
"(",
"probability",
",",
"int",
")",
":",
"m_player",
"=",
"[",
"m_player",
"]",
"probability",
"=",
"[",
"probability",
"]",
"if",
"min",
"(",
"probability",
")",
"==",
"max",
"(",
"probability",
")",
"==",
"0",
":",
"# No probability given",
"return",
"original_classifier",
"if",
"1",
"in",
"probability",
":",
"# If all probability given to one player",
"player",
"=",
"m_player",
"[",
"probability",
".",
"index",
"(",
"1",
")",
"]",
"original_classifier",
"[",
"\"stochastic\"",
"]",
"=",
"player",
".",
"classifier",
"[",
"\"stochastic\"",
"]",
"return",
"original_classifier",
"# Otherwise: stochastic.",
"original_classifier",
"[",
"\"stochastic\"",
"]",
"=",
"True",
"return",
"original_classifier"
] | https://github.com/Axelrod-Python/Axelrod/blob/00e18323c1b1af74df873773e44f31e1b9a299c6/axelrod/strategy_transformers.py#L621-L638 |
|
google/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | grr/server/grr_response_server/gui/api_labels_restricted_call_router.py | python | ApiLabelsRestrictedCallRouter.ListClientApprovals | (self, args, context=None) | return self.delegate.ListClientApprovals(args, context=context) | [] | def ListClientApprovals(self, args, context=None):
# Everybody can list their own user client approvals.
return self.delegate.ListClientApprovals(args, context=context) | [
"def",
"ListClientApprovals",
"(",
"self",
",",
"args",
",",
"context",
"=",
"None",
")",
":",
"# Everybody can list their own user client approvals.",
"return",
"self",
".",
"delegate",
".",
"ListClientApprovals",
"(",
"args",
",",
"context",
"=",
"context",
")"
] | https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/gui/api_labels_restricted_call_router.py#L265-L268 |
|||
makelove/OpenCV-Python-Tutorial | e428d648f7aa50d6a0fb4f4d0fb1bd1a600fef41 | cv-Tkinter-GUI/kivy-GUI/kivy_cv1.py | python | KivyCamera.__init__ | (self, capture, fps, **kwargs) | [] | def __init__(self, capture, fps, **kwargs):
super(KivyCamera, self).__init__(**kwargs)
self.capture = capture
Clock.schedule_interval(self.update, 1.0 / fps) | [
"def",
"__init__",
"(",
"self",
",",
"capture",
",",
"fps",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"KivyCamera",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"capture",
"=",
"capture",
"Clock",
".",
"schedule_interval",
"(",
"self",
".",
"update",
",",
"1.0",
"/",
"fps",
")"
] | https://github.com/makelove/OpenCV-Python-Tutorial/blob/e428d648f7aa50d6a0fb4f4d0fb1bd1a600fef41/cv-Tkinter-GUI/kivy-GUI/kivy_cv1.py#L27-L30 |
||||
gkrizek/bash-lambda-layer | 703b0ade8174022d44779d823172ab7ac33a5505 | bin/docutils/utils/math/math2html.py | python | Newline.process | (self) | Process contents | Process contents | [
"Process",
"contents"
] | def process(self):
"Process contents"
self.html = ['<br/>\n'] | [
"def",
"process",
"(",
"self",
")",
":",
"self",
".",
"html",
"=",
"[",
"'<br/>\\n'",
"]"
] | https://github.com/gkrizek/bash-lambda-layer/blob/703b0ade8174022d44779d823172ab7ac33a5505/bin/docutils/utils/math/math2html.py#L3718-L3720 |
||
gpodder/mygpo | 7a028ad621d05d4ca0d58fd22fb92656c8835e43 | mygpo/search/index.py | python | search_podcasts | (query) | return results | Search for podcasts according to 'query | Search for podcasts according to 'query | [
"Search",
"for",
"podcasts",
"according",
"to",
"query"
] | def search_podcasts(query):
"""Search for podcasts according to 'query'"""
if is_query_too_short(query):
logger.debug('Found no podcasts for "{query}". Query is too short', query=query)
return Podcast.objects.none()
logger.debug('Searching for "{query}" podcasts"', query=query)
query = SearchQuery(query)
results = (
Podcast.objects.annotate(rank=SearchRank(F("search_vector"), query))
.annotate(
order=ExpressionWrapper(
F("rank") * F("subscribers"), output_field=FloatField()
)
)
.filter(rank__gte=SEARCH_CUTOFF)
.order_by("-order")[:100]
.prefetch_related("slugs")
)
logger.debug(
'Found {count} podcasts for "{query}"', count=len(results), query=query
)
return results | [
"def",
"search_podcasts",
"(",
"query",
")",
":",
"if",
"is_query_too_short",
"(",
"query",
")",
":",
"logger",
".",
"debug",
"(",
"'Found no podcasts for \"{query}\". Query is too short'",
",",
"query",
"=",
"query",
")",
"return",
"Podcast",
".",
"objects",
".",
"none",
"(",
")",
"logger",
".",
"debug",
"(",
"'Searching for \"{query}\" podcasts\"'",
",",
"query",
"=",
"query",
")",
"query",
"=",
"SearchQuery",
"(",
"query",
")",
"results",
"=",
"(",
"Podcast",
".",
"objects",
".",
"annotate",
"(",
"rank",
"=",
"SearchRank",
"(",
"F",
"(",
"\"search_vector\"",
")",
",",
"query",
")",
")",
".",
"annotate",
"(",
"order",
"=",
"ExpressionWrapper",
"(",
"F",
"(",
"\"rank\"",
")",
"*",
"F",
"(",
"\"subscribers\"",
")",
",",
"output_field",
"=",
"FloatField",
"(",
")",
")",
")",
".",
"filter",
"(",
"rank__gte",
"=",
"SEARCH_CUTOFF",
")",
".",
"order_by",
"(",
"\"-order\"",
")",
"[",
":",
"100",
"]",
".",
"prefetch_related",
"(",
"\"slugs\"",
")",
")",
"logger",
".",
"debug",
"(",
"'Found {count} podcasts for \"{query}\"'",
",",
"count",
"=",
"len",
"(",
"results",
")",
",",
"query",
"=",
"query",
")",
"return",
"results"
] | https://github.com/gpodder/mygpo/blob/7a028ad621d05d4ca0d58fd22fb92656c8835e43/mygpo/search/index.py#L24-L50 |
|
n1nj4sec/pupy | a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39 | pupy/network/lib/rpc/core/protocol.py | python | Connection._handle_getattr | (self, oid, name) | return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr) | [] | def _handle_getattr(self, oid, name):
return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr) | [
"def",
"_handle_getattr",
"(",
"self",
",",
"oid",
",",
"name",
")",
":",
"return",
"self",
".",
"_access_attr",
"(",
"oid",
",",
"name",
",",
"(",
")",
",",
"\"_rpyc_getattr\"",
",",
"\"allow_getattr\"",
",",
"getattr",
")"
] | https://github.com/n1nj4sec/pupy/blob/a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39/pupy/network/lib/rpc/core/protocol.py#L662-L663 |
|||
zhanghe06/python | a678ce38a3770c91ad12e617810bf9f5ccf7898b | fuck/pconline.py | python | get_link | (url, token) | return down_link | 组装下载链接 | 组装下载链接 | [
"组装下载链接"
] | def get_link(url, token):
"""
组装下载链接
"""
file_name = url.split('/')[-1]
print file_name
print token
down_link = url.rstrip(file_name)+token+'/'+file_name
print down_link
return down_link | [
"def",
"get_link",
"(",
"url",
",",
"token",
")",
":",
"file_name",
"=",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"print",
"file_name",
"print",
"token",
"down_link",
"=",
"url",
".",
"rstrip",
"(",
"file_name",
")",
"+",
"token",
"+",
"'/'",
"+",
"file_name",
"print",
"down_link",
"return",
"down_link"
] | https://github.com/zhanghe06/python/blob/a678ce38a3770c91ad12e617810bf9f5ccf7898b/fuck/pconline.py#L43-L52 |
|
sagemath/sage | f9b2db94f675ff16963ccdefba4f1a3393b3fe0d | src/sage/geometry/polyhedron/plot.py | python | Projection.tikz | (self, view=[0, 0, 1], angle=0, scale=1,
edge_color='blue!95!black', facet_color='blue!95!black',
opacity=0.8, vertex_color='green', axis=False) | r"""
Return a string ``tikz_pic`` consisting of a tikz picture of ``self``
according to a projection ``view`` and an angle ``angle``
obtained via Jmol through the current state property.
INPUT:
- ``view`` - list (default: [0,0,1]) representing the rotation axis (see note below).
- ``angle`` - integer (default: 0) angle of rotation in degree from 0 to 360 (see note
below).
- ``scale`` - integer (default: 1) specifying the scaling of the tikz picture.
- ``edge_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``facet_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``vertex_color`` - string (default: 'green') representing colors which tikz
recognize.
- ``opacity`` - real number (default: 0.8) between 0 and 1 giving the opacity of
the front facets.
- ``axis`` - Boolean (default: False) draw the axes at the origin or not.
OUTPUT:
- LatexExpr -- containing the TikZ picture.
.. NOTE::
The inputs ``view`` and ``angle`` can be obtained by visualizing it
using ``.show(aspect_ratio=1)``. This will open an interactive view
in your default browser, where you can rotate the polytope. Once
the desired view angle is found, click on the information icon in
the lower right-hand corner and select *Get Viewpoint*. This will
copy a string of the form '[x,y,z],angle' to your local clipboard.
Go back to Sage and type ``Img = P.projection().tikz([x,y,z],angle)``.
The inputs ``view`` and ``angle`` can also be obtained from the
viewer Jmol::
1) Right click on the image
2) Select ``Console``
3) Select the tab ``State``
4) Scroll to the line ``moveto``
It reads something like::
moveto 0.0 {x y z angle} Scale
The ``view`` is then [x,y,z] and ``angle`` is angle.
The following number is the scale.
Jmol performs a rotation of ``angle`` degrees along the
vector [x,y,z] and show the result from the z-axis.
EXAMPLES::
sage: P1 = polytopes.small_rhombicuboctahedron()
sage: Image1 = P1.projection().tikz([1,3,5], 175, scale=4)
sage: type(Image1)
<class 'sage.misc.latex.LatexExpr'>
sage: print('\n'.join(Image1.splitlines()[:4]))
\begin{tikzpicture}%
[x={(-0.939161cm, 0.244762cm)},
y={(0.097442cm, -0.482887cm)},
z={(0.329367cm, 0.840780cm)},
sage: with open('polytope-tikz1.tex', 'w') as f: # not tested
....: _ = f.write(Image1)
sage: P2 = Polyhedron(vertices=[[1, 1],[1, 2],[2, 1]])
sage: Image2 = P2.projection().tikz(scale=3, edge_color='blue!95!black', facet_color='orange!95!black', opacity=0.4, vertex_color='yellow', axis=True)
sage: type(Image2)
<class 'sage.misc.latex.LatexExpr'>
sage: print('\n'.join(Image2.splitlines()[:4]))
\begin{tikzpicture}%
[scale=3.000000,
back/.style={loosely dotted, thin},
edge/.style={color=blue!95!black, thick},
sage: with open('polytope-tikz2.tex', 'w') as f: # not tested
....: _ = f.write(Image2)
sage: P3 = Polyhedron(vertices=[[-1, -1, 2],[-1, 2, -1],[2, -1, -1]])
sage: P3
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: Image3 = P3.projection().tikz([0.5,-1,-0.1], 55, scale=3, edge_color='blue!95!black',facet_color='orange!95!black', opacity=0.7, vertex_color='yellow', axis=True)
sage: print('\n'.join(Image3.splitlines()[:4]))
\begin{tikzpicture}%
[x={(0.658184cm, -0.242192cm)},
y={(-0.096240cm, 0.912008cm)},
z={(-0.746680cm, -0.331036cm)},
sage: with open('polytope-tikz3.tex', 'w') as f: # not tested
....: _ = f.write(Image3)
sage: P = Polyhedron(vertices=[[1,1,0,0],[1,2,0,0],[2,1,0,0],[0,0,1,0],[0,0,0,1]])
sage: P
A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 5 vertices
sage: P.projection().tikz()
Traceback (most recent call last):
...
NotImplementedError: The polytope has to live in 2 or 3 dimensions.
.. TODO::
Make it possible to draw Schlegel diagram for 4-polytopes. ::
sage: P=Polyhedron(vertices=[[1,1,0,0],[1,2,0,0],[2,1,0,0],[0,0,1,0],[0,0,0,1]])
sage: P
A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 5 vertices
sage: P.projection().tikz()
Traceback (most recent call last):
...
NotImplementedError: The polytope has to live in 2 or 3 dimensions.
Make it possible to draw 3-polytopes living in higher dimension. | r"""
Return a string ``tikz_pic`` consisting of a tikz picture of ``self``
according to a projection ``view`` and an angle ``angle``
obtained via Jmol through the current state property. | [
"r",
"Return",
"a",
"string",
"tikz_pic",
"consisting",
"of",
"a",
"tikz",
"picture",
"of",
"self",
"according",
"to",
"a",
"projection",
"view",
"and",
"an",
"angle",
"angle",
"obtained",
"via",
"Jmol",
"through",
"the",
"current",
"state",
"property",
"."
] | def tikz(self, view=[0, 0, 1], angle=0, scale=1,
edge_color='blue!95!black', facet_color='blue!95!black',
opacity=0.8, vertex_color='green', axis=False):
r"""
Return a string ``tikz_pic`` consisting of a tikz picture of ``self``
according to a projection ``view`` and an angle ``angle``
obtained via Jmol through the current state property.
INPUT:
- ``view`` - list (default: [0,0,1]) representing the rotation axis (see note below).
- ``angle`` - integer (default: 0) angle of rotation in degree from 0 to 360 (see note
below).
- ``scale`` - integer (default: 1) specifying the scaling of the tikz picture.
- ``edge_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``facet_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``vertex_color`` - string (default: 'green') representing colors which tikz
recognize.
- ``opacity`` - real number (default: 0.8) between 0 and 1 giving the opacity of
the front facets.
- ``axis`` - Boolean (default: False) draw the axes at the origin or not.
OUTPUT:
- LatexExpr -- containing the TikZ picture.
.. NOTE::
The inputs ``view`` and ``angle`` can be obtained by visualizing it
using ``.show(aspect_ratio=1)``. This will open an interactive view
in your default browser, where you can rotate the polytope. Once
the desired view angle is found, click on the information icon in
the lower right-hand corner and select *Get Viewpoint*. This will
copy a string of the form '[x,y,z],angle' to your local clipboard.
Go back to Sage and type ``Img = P.projection().tikz([x,y,z],angle)``.
The inputs ``view`` and ``angle`` can also be obtained from the
viewer Jmol::
1) Right click on the image
2) Select ``Console``
3) Select the tab ``State``
4) Scroll to the line ``moveto``
It reads something like::
moveto 0.0 {x y z angle} Scale
The ``view`` is then [x,y,z] and ``angle`` is angle.
The following number is the scale.
Jmol performs a rotation of ``angle`` degrees along the
vector [x,y,z] and show the result from the z-axis.
EXAMPLES::
sage: P1 = polytopes.small_rhombicuboctahedron()
sage: Image1 = P1.projection().tikz([1,3,5], 175, scale=4)
sage: type(Image1)
<class 'sage.misc.latex.LatexExpr'>
sage: print('\n'.join(Image1.splitlines()[:4]))
\begin{tikzpicture}%
[x={(-0.939161cm, 0.244762cm)},
y={(0.097442cm, -0.482887cm)},
z={(0.329367cm, 0.840780cm)},
sage: with open('polytope-tikz1.tex', 'w') as f: # not tested
....: _ = f.write(Image1)
sage: P2 = Polyhedron(vertices=[[1, 1],[1, 2],[2, 1]])
sage: Image2 = P2.projection().tikz(scale=3, edge_color='blue!95!black', facet_color='orange!95!black', opacity=0.4, vertex_color='yellow', axis=True)
sage: type(Image2)
<class 'sage.misc.latex.LatexExpr'>
sage: print('\n'.join(Image2.splitlines()[:4]))
\begin{tikzpicture}%
[scale=3.000000,
back/.style={loosely dotted, thin},
edge/.style={color=blue!95!black, thick},
sage: with open('polytope-tikz2.tex', 'w') as f: # not tested
....: _ = f.write(Image2)
sage: P3 = Polyhedron(vertices=[[-1, -1, 2],[-1, 2, -1],[2, -1, -1]])
sage: P3
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: Image3 = P3.projection().tikz([0.5,-1,-0.1], 55, scale=3, edge_color='blue!95!black',facet_color='orange!95!black', opacity=0.7, vertex_color='yellow', axis=True)
sage: print('\n'.join(Image3.splitlines()[:4]))
\begin{tikzpicture}%
[x={(0.658184cm, -0.242192cm)},
y={(-0.096240cm, 0.912008cm)},
z={(-0.746680cm, -0.331036cm)},
sage: with open('polytope-tikz3.tex', 'w') as f: # not tested
....: _ = f.write(Image3)
sage: P = Polyhedron(vertices=[[1,1,0,0],[1,2,0,0],[2,1,0,0],[0,0,1,0],[0,0,0,1]])
sage: P
A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 5 vertices
sage: P.projection().tikz()
Traceback (most recent call last):
...
NotImplementedError: The polytope has to live in 2 or 3 dimensions.
.. TODO::
Make it possible to draw Schlegel diagram for 4-polytopes. ::
sage: P=Polyhedron(vertices=[[1,1,0,0],[1,2,0,0],[2,1,0,0],[0,0,1,0],[0,0,0,1]])
sage: P
A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 5 vertices
sage: P.projection().tikz()
Traceback (most recent call last):
...
NotImplementedError: The polytope has to live in 2 or 3 dimensions.
Make it possible to draw 3-polytopes living in higher dimension.
"""
if self.polyhedron_ambient_dim > 3 or self.polyhedron_ambient_dim < 2:
raise NotImplementedError("The polytope has to live in 2 or 3 dimensions.")
elif self.polyhedron_dim < 2 or self.polyhedron_dim > 3:
raise NotImplementedError("The polytope has to be 2 or 3-dimensional.")
elif self.polyhedron_ambient_dim == 2: # self is a polygon in 2-space
return self._tikz_2d(scale, edge_color, facet_color, opacity,
vertex_color, axis)
elif self.polyhedron_dim == 2: # self is a polygon in 3-space
return self._tikz_2d_in_3d(view, angle, scale, edge_color,
facet_color, opacity, vertex_color, axis)
else: # self is a 3-polytope in 3-space
return self._tikz_3d_in_3d(view, angle, scale, edge_color,
facet_color, opacity, vertex_color, axis) | [
"def",
"tikz",
"(",
"self",
",",
"view",
"=",
"[",
"0",
",",
"0",
",",
"1",
"]",
",",
"angle",
"=",
"0",
",",
"scale",
"=",
"1",
",",
"edge_color",
"=",
"'blue!95!black'",
",",
"facet_color",
"=",
"'blue!95!black'",
",",
"opacity",
"=",
"0.8",
",",
"vertex_color",
"=",
"'green'",
",",
"axis",
"=",
"False",
")",
":",
"if",
"self",
".",
"polyhedron_ambient_dim",
">",
"3",
"or",
"self",
".",
"polyhedron_ambient_dim",
"<",
"2",
":",
"raise",
"NotImplementedError",
"(",
"\"The polytope has to live in 2 or 3 dimensions.\"",
")",
"elif",
"self",
".",
"polyhedron_dim",
"<",
"2",
"or",
"self",
".",
"polyhedron_dim",
">",
"3",
":",
"raise",
"NotImplementedError",
"(",
"\"The polytope has to be 2 or 3-dimensional.\"",
")",
"elif",
"self",
".",
"polyhedron_ambient_dim",
"==",
"2",
":",
"# self is a polygon in 2-space",
"return",
"self",
".",
"_tikz_2d",
"(",
"scale",
",",
"edge_color",
",",
"facet_color",
",",
"opacity",
",",
"vertex_color",
",",
"axis",
")",
"elif",
"self",
".",
"polyhedron_dim",
"==",
"2",
":",
"# self is a polygon in 3-space",
"return",
"self",
".",
"_tikz_2d_in_3d",
"(",
"view",
",",
"angle",
",",
"scale",
",",
"edge_color",
",",
"facet_color",
",",
"opacity",
",",
"vertex_color",
",",
"axis",
")",
"else",
":",
"# self is a 3-polytope in 3-space",
"return",
"self",
".",
"_tikz_3d_in_3d",
"(",
"view",
",",
"angle",
",",
"scale",
",",
"edge_color",
",",
"facet_color",
",",
"opacity",
",",
"vertex_color",
",",
"axis",
")"
] | https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/geometry/polyhedron/plot.py#L1181-L1309 |
||
SymbiFlow/prjxray | 5349556bc2c230801d6df0cf11bccb9cfd171639 | prjxray/tile_segbits.py | python | parsebit | (val) | return Bit(
word_column=int(seg_word_column),
word_bit=int(word_bit_n),
isset=isset,
) | Return "!012_23" => (12, 23, False) | Return "!012_23" => (12, 23, False) | [
"Return",
"!012_23",
"=",
">",
"(",
"12",
"23",
"False",
")"
] | def parsebit(val):
'''Return "!012_23" => (12, 23, False)'''
isset = True
# Default is 0. Skip explicit call outs
if val[0] == '!':
isset = False
val = val[1:]
# 28_05 => 28, 05
parts = val.split('_')
assert len(parts) == 2, val
seg_word_column, word_bit_n = parts
return Bit(
word_column=int(seg_word_column),
word_bit=int(word_bit_n),
isset=isset,
) | [
"def",
"parsebit",
"(",
"val",
")",
":",
"isset",
"=",
"True",
"# Default is 0. Skip explicit call outs",
"if",
"val",
"[",
"0",
"]",
"==",
"'!'",
":",
"isset",
"=",
"False",
"val",
"=",
"val",
"[",
"1",
":",
"]",
"# 28_05 => 28, 05",
"parts",
"=",
"val",
".",
"split",
"(",
"'_'",
")",
"assert",
"len",
"(",
"parts",
")",
"==",
"2",
",",
"val",
"seg_word_column",
",",
"word_bit_n",
"=",
"parts",
"return",
"Bit",
"(",
"word_column",
"=",
"int",
"(",
"seg_word_column",
")",
",",
"word_bit",
"=",
"int",
"(",
"word_bit_n",
")",
",",
"isset",
"=",
"isset",
",",
")"
] | https://github.com/SymbiFlow/prjxray/blob/5349556bc2c230801d6df0cf11bccb9cfd171639/prjxray/tile_segbits.py#L41-L57 |
|
researchmm/tasn | 5dba8ccc096cedc63913730eeea14a9647911129 | tasn-mxnet/python/mxnet/symbol/symbol.py | python | Symbol.broadcast_like | (self, *args, **kwargs) | return op.broadcast_like(self, *args, **kwargs) | Convenience fluent method for :py:func:`broadcast_like`.
The arguments are the same as for :py:func:`broadcast_like`, with
this array as data. | Convenience fluent method for :py:func:`broadcast_like`. | [
"Convenience",
"fluent",
"method",
"for",
":",
"py",
":",
"func",
":",
"broadcast_like",
"."
] | def broadcast_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_like`.
The arguments are the same as for :py:func:`broadcast_like`, with
this array as data.
"""
return op.broadcast_like(self, *args, **kwargs) | [
"def",
"broadcast_like",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"op",
".",
"broadcast_like",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/researchmm/tasn/blob/5dba8ccc096cedc63913730eeea14a9647911129/tasn-mxnet/python/mxnet/symbol/symbol.py#L2018-L2024 |
|
miyakogi/pyppeteer | f5313d0e7f973c57ed31fa443cea1834e223a96c | pyppeteer/dialog.py | python | Dialog.accept | (self, promptText: str = '') | Accept the dialog.
* ``promptText`` (str): A text to enter in prompt. If the dialog's type
is not prompt, this does not cause any effect. | Accept the dialog. | [
"Accept",
"the",
"dialog",
"."
] | async def accept(self, promptText: str = '') -> None:
"""Accept the dialog.
* ``promptText`` (str): A text to enter in prompt. If the dialog's type
is not prompt, this does not cause any effect.
"""
self._handled = True
await self._client.send('Page.handleJavaScriptDialog', {
'accept': True,
'promptText': promptText,
}) | [
"async",
"def",
"accept",
"(",
"self",
",",
"promptText",
":",
"str",
"=",
"''",
")",
"->",
"None",
":",
"self",
".",
"_handled",
"=",
"True",
"await",
"self",
".",
"_client",
".",
"send",
"(",
"'Page.handleJavaScriptDialog'",
",",
"{",
"'accept'",
":",
"True",
",",
"'promptText'",
":",
"promptText",
",",
"}",
")"
] | https://github.com/miyakogi/pyppeteer/blob/f5313d0e7f973c57ed31fa443cea1834e223a96c/pyppeteer/dialog.py#L71-L81 |
||
AI-ON/Multitask-and-Transfer-Learning | 31e0798d436e314ddbc64c4a6b935df1b2160e50 | architectures/chainer/models/predictive_autoencoder.py | python | normalize_2d | (x) | return exp / denominator | [] | def normalize_2d(x):
exp = F.exp(x[0])
sums = F.sum(F.sum(exp, axis=-1), axis=-1)
expanded = F.expand_dims(F.expand_dims(sums, axis=-1), axis=-1)
denominator = F.tile(expanded, (1, 160, 210))
return exp / denominator | [
"def",
"normalize_2d",
"(",
"x",
")",
":",
"exp",
"=",
"F",
".",
"exp",
"(",
"x",
"[",
"0",
"]",
")",
"sums",
"=",
"F",
".",
"sum",
"(",
"F",
".",
"sum",
"(",
"exp",
",",
"axis",
"=",
"-",
"1",
")",
",",
"axis",
"=",
"-",
"1",
")",
"expanded",
"=",
"F",
".",
"expand_dims",
"(",
"F",
".",
"expand_dims",
"(",
"sums",
",",
"axis",
"=",
"-",
"1",
")",
",",
"axis",
"=",
"-",
"1",
")",
"denominator",
"=",
"F",
".",
"tile",
"(",
"expanded",
",",
"(",
"1",
",",
"160",
",",
"210",
")",
")",
"return",
"exp",
"/",
"denominator"
] | https://github.com/AI-ON/Multitask-and-Transfer-Learning/blob/31e0798d436e314ddbc64c4a6b935df1b2160e50/architectures/chainer/models/predictive_autoencoder.py#L247-L252 |
|||
Tencent/bk-bcs-saas | 2b437bf2f5fd5ce2078f7787c3a12df609f7679d | bcs-app/backend/container_service/clusters/views/node.py | python | NodeLabelQueryCreateViewSet.create_node_labels | (self, request, project_id) | return Response({"code": 0, "message": _("创建成功!")}) | 添加节点标签 | 添加节点标签 | [
"添加节点标签"
] | def create_node_labels(self, request, project_id):
"""添加节点标签"""
# 解析参数
node_id_list, node_label_info = self.get_create_label_params(request)
# 校验label中key和value
self.label_regex(node_label_info)
# 获取数据库中节点的label
# NOTE: 节点为正常状态时,才允许设置标签
project_node_info = self.get_node_list(request, project_id, None).get('results') or []
if not project_node_info:
raise error_codes.APIError(_("当前项目下节点为空,请确认"))
all_node_id_list = []
all_node_id_ip_map = {}
for info in project_node_info:
all_node_id_list.append(info["id"])
all_node_id_ip_map[info["id"]] = {"inner_ip": info["inner_ip"], "cluster_id": info["cluster_id"]}
if info['id'] in node_id_list and info['status'] != CommonStatus.Normal:
raise error_codes.CheckFailed(_("节点不是正常状态时,不允许设置标签"))
diff_node_id_list = set(node_id_list) - set(all_node_id_list)
if diff_node_id_list:
raise error_codes.CheckFailed(_("节点ID [{}] 不属于当前项目,请确认").format(",".join(diff_node_id_list)))
# 校验权限
self.check_perm(request, project_id, all_node_id_ip_map, node_id_list)
# 匹配数据
pre_node_labels = self.get_labels_by_node(request, project_id, node_id_list)
label_operation_map = self.get_label_operation(
pre_node_labels, node_label_info, node_id_list, all_node_id_ip_map
)
# k8s 是以节点为维度
self.create_node_label_via_k8s(request, project_id, label_operation_map)
# 写入数据库
self.create_or_update(request, project_id, label_operation_map)
client.ContextActivityLogClient(
project_id=project_id,
user=request.user.username,
resource_type="node",
resource=str(node_id_list),
resource_id=str(node_id_list),
extra=json.dumps(node_label_info),
description=_("节点打标签"),
).log_add(activity_status="succeed")
return Response({"code": 0, "message": _("创建成功!")}) | [
"def",
"create_node_labels",
"(",
"self",
",",
"request",
",",
"project_id",
")",
":",
"# 解析参数",
"node_id_list",
",",
"node_label_info",
"=",
"self",
".",
"get_create_label_params",
"(",
"request",
")",
"# 校验label中key和value",
"self",
".",
"label_regex",
"(",
"node_label_info",
")",
"# 获取数据库中节点的label",
"# NOTE: 节点为正常状态时,才允许设置标签",
"project_node_info",
"=",
"self",
".",
"get_node_list",
"(",
"request",
",",
"project_id",
",",
"None",
")",
".",
"get",
"(",
"'results'",
")",
"or",
"[",
"]",
"if",
"not",
"project_node_info",
":",
"raise",
"error_codes",
".",
"APIError",
"(",
"_",
"(",
"\"当前项目下节点为空,请确认\"))",
"",
"",
"all_node_id_list",
"=",
"[",
"]",
"all_node_id_ip_map",
"=",
"{",
"}",
"for",
"info",
"in",
"project_node_info",
":",
"all_node_id_list",
".",
"append",
"(",
"info",
"[",
"\"id\"",
"]",
")",
"all_node_id_ip_map",
"[",
"info",
"[",
"\"id\"",
"]",
"]",
"=",
"{",
"\"inner_ip\"",
":",
"info",
"[",
"\"inner_ip\"",
"]",
",",
"\"cluster_id\"",
":",
"info",
"[",
"\"cluster_id\"",
"]",
"}",
"if",
"info",
"[",
"'id'",
"]",
"in",
"node_id_list",
"and",
"info",
"[",
"'status'",
"]",
"!=",
"CommonStatus",
".",
"Normal",
":",
"raise",
"error_codes",
".",
"CheckFailed",
"(",
"_",
"(",
"\"节点不是正常状态时,不允许设置标签\"))",
"",
"",
"diff_node_id_list",
"=",
"set",
"(",
"node_id_list",
")",
"-",
"set",
"(",
"all_node_id_list",
")",
"if",
"diff_node_id_list",
":",
"raise",
"error_codes",
".",
"CheckFailed",
"(",
"_",
"(",
"\"节点ID [{}] 不属于当前项目,请确认\").format(\",\".join(diff_nod",
"e",
"_",
"id_lis",
"t",
")))",
"",
"",
"",
"",
"",
"",
"",
"# 校验权限",
"self",
".",
"check_perm",
"(",
"request",
",",
"project_id",
",",
"all_node_id_ip_map",
",",
"node_id_list",
")",
"# 匹配数据",
"pre_node_labels",
"=",
"self",
".",
"get_labels_by_node",
"(",
"request",
",",
"project_id",
",",
"node_id_list",
")",
"label_operation_map",
"=",
"self",
".",
"get_label_operation",
"(",
"pre_node_labels",
",",
"node_label_info",
",",
"node_id_list",
",",
"all_node_id_ip_map",
")",
"# k8s 是以节点为维度",
"self",
".",
"create_node_label_via_k8s",
"(",
"request",
",",
"project_id",
",",
"label_operation_map",
")",
"# 写入数据库",
"self",
".",
"create_or_update",
"(",
"request",
",",
"project_id",
",",
"label_operation_map",
")",
"client",
".",
"ContextActivityLogClient",
"(",
"project_id",
"=",
"project_id",
",",
"user",
"=",
"request",
".",
"user",
".",
"username",
",",
"resource_type",
"=",
"\"node\"",
",",
"resource",
"=",
"str",
"(",
"node_id_list",
")",
",",
"resource_id",
"=",
"str",
"(",
"node_id_list",
")",
",",
"extra",
"=",
"json",
".",
"dumps",
"(",
"node_label_info",
")",
",",
"description",
"=",
"_",
"(",
"\"节点打标签\"),",
"",
"",
")",
".",
"log_add",
"(",
"activity_status",
"=",
"\"succeed\"",
")",
"return",
"Response",
"(",
"{",
"\"code\"",
":",
"0",
",",
"\"message\"",
":",
"_",
"(",
"\"创建成功!\")})",
"",
"",
""
] | https://github.com/Tencent/bk-bcs-saas/blob/2b437bf2f5fd5ce2078f7787c3a12df609f7679d/bcs-app/backend/container_service/clusters/views/node.py#L725-L767 |
|
guildai/guildai | 1665985a3d4d788efc1a3180ca51cc417f71ca78 | guild/external/setuptools/command/sdist.py | python | sdist.make_distribution | (self) | Workaround for #516 | Workaround for #516 | [
"Workaround",
"for",
"#516"
] | def make_distribution(self):
"""
Workaround for #516
"""
with self._remove_os_link():
orig.sdist.make_distribution(self) | [
"def",
"make_distribution",
"(",
"self",
")",
":",
"with",
"self",
".",
"_remove_os_link",
"(",
")",
":",
"orig",
".",
"sdist",
".",
"make_distribution",
"(",
"self",
")"
] | https://github.com/guildai/guildai/blob/1665985a3d4d788efc1a3180ca51cc417f71ca78/guild/external/setuptools/command/sdist.py#L73-L78 |
||
jython/frozen-mirror | b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99 | lib-python/2.7/hotshot/__init__.py | python | Profile.stop | (self) | Stop the profiler. | Stop the profiler. | [
"Stop",
"the",
"profiler",
"."
] | def stop(self):
"""Stop the profiler."""
self._prof.stop() | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"_prof",
".",
"stop",
"(",
")"
] | https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/hotshot/__init__.py#L38-L40 |
||
fossasia/x-mario-center | fe67afe28d995dcf4e2498e305825a4859566172 | build/lib.linux-i686-2.7/softwarecenter/ui/gtk3/app.py | python | SoftwareCenterAppGtk3.show_available_packages | (self, packages) | Show packages given as arguments in the available_pane
If the list of packages is only one element long show that,
otherwise turn it into a comma seperated search | Show packages given as arguments in the available_pane
If the list of packages is only one element long show that,
otherwise turn it into a comma seperated search | [
"Show",
"packages",
"given",
"as",
"arguments",
"in",
"the",
"available_pane",
"If",
"the",
"list",
"of",
"packages",
"is",
"only",
"one",
"element",
"long",
"show",
"that",
"otherwise",
"turn",
"it",
"into",
"a",
"comma",
"seperated",
"search"
] | def show_available_packages(self, packages):
""" Show packages given as arguments in the available_pane
If the list of packages is only one element long show that,
otherwise turn it into a comma seperated search
"""
try:
search_text, app = parse_packages_args(packages)
except DebFileOpenError as e:
LOG.exception("show_available_packages: can not open %r, error:",
packages)
dialogs.error(None,
_("Error"),
_("The file “%s” could not be opened.") % e.path)
search_text = app = None
LOG.info('show_available_packages: search_text is %r, app is %r.',
search_text, app)
if search_text:
self.available_pane.init_view()
self.available_pane.searchentry.set_text(search_text)
elif app is not None:
self.show_app(app)
else:
# normal startup, show the lobby (it will have a spinner when
# its not ready yet) - it will also initialize the view
self.view_manager.set_active_view(ViewPages.AVAILABLE) | [
"def",
"show_available_packages",
"(",
"self",
",",
"packages",
")",
":",
"try",
":",
"search_text",
",",
"app",
"=",
"parse_packages_args",
"(",
"packages",
")",
"except",
"DebFileOpenError",
"as",
"e",
":",
"LOG",
".",
"exception",
"(",
"\"show_available_packages: can not open %r, error:\"",
",",
"packages",
")",
"dialogs",
".",
"error",
"(",
"None",
",",
"_",
"(",
"\"Error\"",
")",
",",
"_",
"(",
"\"The file “%s” could not be opened.\") % ",
"e",
"p",
"t",
"h",
")",
"",
"search_text",
"=",
"app",
"=",
"None",
"LOG",
".",
"info",
"(",
"'show_available_packages: search_text is %r, app is %r.'",
",",
"search_text",
",",
"app",
")",
"if",
"search_text",
":",
"self",
".",
"available_pane",
".",
"init_view",
"(",
")",
"self",
".",
"available_pane",
".",
"searchentry",
".",
"set_text",
"(",
"search_text",
")",
"elif",
"app",
"is",
"not",
"None",
":",
"self",
".",
"show_app",
"(",
"app",
")",
"else",
":",
"# normal startup, show the lobby (it will have a spinner when",
"# its not ready yet) - it will also initialize the view",
"self",
".",
"view_manager",
".",
"set_active_view",
"(",
"ViewPages",
".",
"AVAILABLE",
")"
] | https://github.com/fossasia/x-mario-center/blob/fe67afe28d995dcf4e2498e305825a4859566172/build/lib.linux-i686-2.7/softwarecenter/ui/gtk3/app.py#L1324-L1350 |
||
NervanaSystems/ngraph-python | ac032c83c7152b615a9ad129d54d350f9d6a2986 | ngraph/transformers/exop.py | python | TensorViewDecl.key | (self) | return self.tensor_description.parameter_key | Returns: A tuple unique to this view of the tensor. | Returns: A tuple unique to this view of the tensor. | [
"Returns",
":",
"A",
"tuple",
"unique",
"to",
"this",
"view",
"of",
"the",
"tensor",
"."
] | def key(self):
"""
Returns: A tuple unique to this view of the tensor.
"""
return self.tensor_description.parameter_key | [
"def",
"key",
"(",
"self",
")",
":",
"return",
"self",
".",
"tensor_description",
".",
"parameter_key"
] | https://github.com/NervanaSystems/ngraph-python/blob/ac032c83c7152b615a9ad129d54d350f9d6a2986/ngraph/transformers/exop.py#L1175-L1180 |
|
Tautulli/Tautulli | 2410eb33805aaac4bd1c5dad0f71e4f15afaf742 | lib/html5lib/treebuilders/base.py | python | TreeBuilder.elementInActiveFormattingElements | (self, name) | return False | Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false | Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false | [
"Check",
"if",
"an",
"element",
"exists",
"between",
"the",
"end",
"of",
"the",
"active",
"formatting",
"elements",
"and",
"the",
"last",
"marker",
".",
"If",
"it",
"does",
"return",
"it",
"else",
"return",
"false"
] | def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False | [
"def",
"elementInActiveFormattingElements",
"(",
"self",
",",
"name",
")",
":",
"for",
"item",
"in",
"self",
".",
"activeFormattingElements",
"[",
":",
":",
"-",
"1",
"]",
":",
"# Check for Marker first because if it's a Marker it doesn't have a",
"# name attribute.",
"if",
"item",
"==",
"Marker",
":",
"break",
"elif",
"item",
".",
"name",
"==",
"name",
":",
"return",
"item",
"return",
"False"
] | https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/html5lib/treebuilders/base.py#L269-L281 |
|
ducksboard/libsaas | 615981a3336f65be9d51ae95a48aed9ad3bd1c3c | libsaas/services/bitbucket/issues.py | python | RepoIssues.filter | (self, filters) | return http.Request('GET', url), parsers.parse_json | Search through the issues applying filters.
Look at https://confluence.atlassian.com/display/BITBUCKET/Issues
to get a complete list of possible filters.
:var filters: A dictionary of filters. Keys are strings corresponding
to the filter names and values are ether string filter values or
tuples, in which case their conditions are implicitly ORed. For
example, {"title": ("~one", "~two")} would mean issues with the
title containing either "one" or "two"
:vartype filters: dict of str to str or tuple of str | Search through the issues applying filters. | [
"Search",
"through",
"the",
"issues",
"applying",
"filters",
"."
] | def filter(self, filters):
"""
Search through the issues applying filters.
Look at https://confluence.atlassian.com/display/BITBUCKET/Issues
to get a complete list of possible filters.
:var filters: A dictionary of filters. Keys are strings corresponding
to the filter names and values are ether string filter values or
tuples, in which case their conditions are implicitly ORed. For
example, {"title": ("~one", "~two")} would mean issues with the
title containing either "one" or "two"
:vartype filters: dict of str to str or tuple of str
"""
# because http.Request needs params to be a dict of strings to strings
# (roughly) and since BitBucket wants repeated parameters to express
# OR, we'll do the quoting by hand ourselves
def flatten_conditions(filters):
for key, val in filters.items():
if isinstance(val, (list, tuple)):
for v in val:
yield (port.to_b(key), port.to_b(v))
else:
yield (port.to_b(key), port.to_b(val))
to_encode = tuple(flatten_conditions(filters))
qs = port.urlencode(to_encode)
url = '{0}/?{1}'.format(self.get_url(), qs)
return http.Request('GET', url), parsers.parse_json | [
"def",
"filter",
"(",
"self",
",",
"filters",
")",
":",
"# because http.Request needs params to be a dict of strings to strings",
"# (roughly) and since BitBucket wants repeated parameters to express",
"# OR, we'll do the quoting by hand ourselves",
"def",
"flatten_conditions",
"(",
"filters",
")",
":",
"for",
"key",
",",
"val",
"in",
"filters",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"v",
"in",
"val",
":",
"yield",
"(",
"port",
".",
"to_b",
"(",
"key",
")",
",",
"port",
".",
"to_b",
"(",
"v",
")",
")",
"else",
":",
"yield",
"(",
"port",
".",
"to_b",
"(",
"key",
")",
",",
"port",
".",
"to_b",
"(",
"val",
")",
")",
"to_encode",
"=",
"tuple",
"(",
"flatten_conditions",
"(",
"filters",
")",
")",
"qs",
"=",
"port",
".",
"urlencode",
"(",
"to_encode",
")",
"url",
"=",
"'{0}/?{1}'",
".",
"format",
"(",
"self",
".",
"get_url",
"(",
")",
",",
"qs",
")",
"return",
"http",
".",
"Request",
"(",
"'GET'",
",",
"url",
")",
",",
"parsers",
".",
"parse_json"
] | https://github.com/ducksboard/libsaas/blob/615981a3336f65be9d51ae95a48aed9ad3bd1c3c/libsaas/services/bitbucket/issues.py#L128-L157 |
|
openstack/cinder | 23494a6d6c51451688191e1847a458f1d3cdcaa5 | cinder/zonemanager/utils.py | python | get_formatted_wwn | (wwn_str) | Utility API that formats WWN to insert ':'. | Utility API that formats WWN to insert ':'. | [
"Utility",
"API",
"that",
"formats",
"WWN",
"to",
"insert",
":",
"."
] | def get_formatted_wwn(wwn_str):
"""Utility API that formats WWN to insert ':'."""
if (len(wwn_str) != 16):
return wwn_str.lower()
else:
return (':'.join([wwn_str[i:i + 2]
for i in range(0, len(wwn_str), 2)])).lower() | [
"def",
"get_formatted_wwn",
"(",
"wwn_str",
")",
":",
"if",
"(",
"len",
"(",
"wwn_str",
")",
"!=",
"16",
")",
":",
"return",
"wwn_str",
".",
"lower",
"(",
")",
"else",
":",
"return",
"(",
"':'",
".",
"join",
"(",
"[",
"wwn_str",
"[",
"i",
":",
"i",
"+",
"2",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"wwn_str",
")",
",",
"2",
")",
"]",
")",
")",
".",
"lower",
"(",
")"
] | https://github.com/openstack/cinder/blob/23494a6d6c51451688191e1847a458f1d3cdcaa5/cinder/zonemanager/utils.py#L67-L73 |
||
ailabx/ailabx | 4a8c701a3604bbc34157167224588041944ac1a2 | codes/qlib-main/qlib/workflow/online/utils.py | python | OnlineToolR.get_online_tag | (self, recorder: Recorder) | return tags.get(self.ONLINE_KEY, self.OFFLINE_TAG) | Given a model recorder and return its online tag.
Args:
recorder (Recorder): an instance of recorder
Returns:
str: the online tag | Given a model recorder and return its online tag. | [
"Given",
"a",
"model",
"recorder",
"and",
"return",
"its",
"online",
"tag",
"."
] | def get_online_tag(self, recorder: Recorder) -> str:
"""
Given a model recorder and return its online tag.
Args:
recorder (Recorder): an instance of recorder
Returns:
str: the online tag
"""
tags = recorder.list_tags()
return tags.get(self.ONLINE_KEY, self.OFFLINE_TAG) | [
"def",
"get_online_tag",
"(",
"self",
",",
"recorder",
":",
"Recorder",
")",
"->",
"str",
":",
"tags",
"=",
"recorder",
".",
"list_tags",
"(",
")",
"return",
"tags",
".",
"get",
"(",
"self",
".",
"ONLINE_KEY",
",",
"self",
".",
"OFFLINE_TAG",
")"
] | https://github.com/ailabx/ailabx/blob/4a8c701a3604bbc34157167224588041944ac1a2/codes/qlib-main/qlib/workflow/online/utils.py#L118-L129 |
|
bruceyang2012/Face-detection-with-mobilenet-ssd | 58fafb6e93d28531797aac1e9a4436730c8cee7c | keras_ssd_loss.py | python | SSDLoss.compute_loss | (self, y_true, y_pred) | return total_loss | Compute the loss of the SSD model prediction against the ground truth.
Arguments:
y_true (array): A Numpy array of shape `(batch_size, #boxes, #classes + 8)`,
where `#boxes` is the total number of boxes that the model predicts
per image. Be careful to make sure that the index of each given
box in `y_true` is the same as the index for the corresponding
box in `y_pred`. The last axis must have length `#classes + 8` and contain
`[classes one-hot encoded, 4 ground truth box coordinates, 4 arbitrary entries]`
in this order, including the background class. The last four entries of the
last axis are not used by this function and therefore their contents are
irrelevant, they only exist so that `y_true` has the same shape as `y_pred`,
where the last four entries of the last axis contain the anchor box
coordinates, which are needed during inference. Important: Boxes that
you want the cost function to ignore need to have a one-hot
class vector of all zeros.
y_pred (Keras tensor): The model prediction. The shape is identical
to that of `y_true`.
Returns:
A scalar, the total multitask loss for classification and localization. | Compute the loss of the SSD model prediction against the ground truth. | [
"Compute",
"the",
"loss",
"of",
"the",
"SSD",
"model",
"prediction",
"against",
"the",
"ground",
"truth",
"."
] | def compute_loss(self, y_true, y_pred):
'''
Compute the loss of the SSD model prediction against the ground truth.
Arguments:
y_true (array): A Numpy array of shape `(batch_size, #boxes, #classes + 8)`,
where `#boxes` is the total number of boxes that the model predicts
per image. Be careful to make sure that the index of each given
box in `y_true` is the same as the index for the corresponding
box in `y_pred`. The last axis must have length `#classes + 8` and contain
`[classes one-hot encoded, 4 ground truth box coordinates, 4 arbitrary entries]`
in this order, including the background class. The last four entries of the
last axis are not used by this function and therefore their contents are
irrelevant, they only exist so that `y_true` has the same shape as `y_pred`,
where the last four entries of the last axis contain the anchor box
coordinates, which are needed during inference. Important: Boxes that
you want the cost function to ignore need to have a one-hot
class vector of all zeros.
y_pred (Keras tensor): The model prediction. The shape is identical
to that of `y_true`.
Returns:
A scalar, the total multitask loss for classification and localization.
'''
batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32
n_boxes = tf.shape(y_pred)[
1] # Output dtype: tf.int32, note that `n_boxes` in this context denotes the total number of boxes per image, not the number of boxes per cell
# 1: Compute the losses for class and box predictions for every box
classification_loss = tf.cast(
self.log_loss(y_true[:, :, :-12], y_pred[:, :, :-12]),
dtype=tf.float32) # Output shape: (batch_size, n_boxes)
localization_loss = tf.cast(
self.smooth_L1_loss(y_true[:, :, -12:-8], y_pred[:, :, -12:-8]),
dtype=tf.float32) # Output shape: (batch_size, n_boxes)
# 2: Compute the classification losses for the positive and negative targets
# Create masks for the positive and negative ground truth classes
negatives = y_true[:, :, 0] # Tensor of shape (batch_size, n_boxes)
positives = tf.cast(tf.reduce_max(y_true[:, :, 1:-12], axis=-1),
dtype=tf.float32) # Tensor of shape (batch_size, n_boxes)
# Count the number of positive boxes (classes 1 to n) in y_true across the whole batch
n_positive = tf.reduce_sum(positives)
# Now mask all negative boxes and sum up the losses for the positive boxes PER batch item
# (Keras loss functions must output one scalar loss value PER batch item, rather than just
# one scalar for the entire batch, that's why we're not summing across all axes)
pos_class_loss = tf.reduce_sum(classification_loss * positives, axis=-1) # Tensor of shape (batch_size,)
# Compute the classification loss for the negative default boxes (if there are any)
# First, compute the classification loss for all negative boxes
neg_class_loss_all = classification_loss * negatives # Tensor of shape (batch_size, n_boxes)
n_neg_losses = tf.math.count_nonzero(neg_class_loss_all,
dtype=tf.int32) # The number of non-zero loss entries in `neg_class_loss_all`
# What's the point of `n_neg_losses`? For the next step, which will be to compute which negative boxes enter the classification
# loss, we don't just want to know how many negative ground truth boxes there are, but for how many of those there actually is
# a positive (i.e. non-zero) loss. This is necessary because `tf.nn.top-k()` in the function below will pick the top k boxes with
# the highest losses no matter what, even if it receives a vector where all losses are zero. In the unlikely event that all negative
# classification losses ARE actually zero though, this behavior might lead to `tf.nn.top-k()` returning the indices of positive
# boxes, leading to an incorrect negative classification loss computation, and hence an incorrect overall loss computation.
# We therefore need to make sure that `n_negative_keep`, which assumes the role of the `k` argument in `tf.nn.top-k()`,
# is at most the number of negative boxes for which there is a positive classification loss.
# Compute the number of negative examples we want to account for in the loss
# We'll keep at most `self.neg_pos_ratio` times the number of positives in `y_true`, but at least `self.n_neg_min` (unless `n_neg_loses` is smaller)
n_negative_keep = tf.minimum(
tf.maximum(self.neg_pos_ratio * tf.cast(n_positive, dtype=tf.int32), self.n_neg_min),
n_neg_losses)
# In the unlikely case when either (1) there are no negative ground truth boxes at all
# or (2) the classification loss for all negative boxes is zero, return zero as the `neg_class_loss`
def f1():
return tf.zeros([batch_size])
# Otherwise compute the negative loss
def f2():
# Now we'll identify the top-k (where k == `n_negative_keep`) boxes with the highest confidence loss that
# belong to the background class in the ground truth data. Note that this doesn't necessarily mean that the model
# predicted the wrong class for those boxes, it just means that the loss for those boxes is the highest.
# To do this, we reshape `neg_class_loss_all` to 1D...
neg_class_loss_all_1D = tf.reshape(neg_class_loss_all, [-1]) # Tensor of shape (batch_size * n_boxes,)
# ...and then we get the indices for the `n_negative_keep` boxes with the highest loss out of those...
values, indices = tf.nn.top_k(neg_class_loss_all_1D, n_negative_keep, False) # We don't need sorting
# ...and with these indices we'll create a mask...
negatives_keep = tf.scatter_nd(tf.expand_dims(indices, axis=1),
updates=tf.ones_like(indices, dtype=tf.int32), shape=tf.shape(
neg_class_loss_all_1D)) # Tensor of shape (batch_size * n_boxes,)
negatives_keep = tf.cast(
tf.reshape(negatives_keep, [batch_size, n_boxes]),
dtype=tf.float32) # Tensor of shape (batch_size, n_boxes)
# ...and use it to keep only those boxes and mask all other classification losses
neg_class_loss = tf.reduce_sum(classification_loss * negatives_keep,
axis=-1) # Tensor of shape (batch_size,)
return neg_class_loss
neg_class_loss = tf.cond(tf.equal(n_neg_losses, tf.constant(0)), f1, f2)
class_loss = pos_class_loss + neg_class_loss # Tensor of shape (batch_size,)
# 3: Compute the localization loss for the positive targets
# We don't penalize localization loss for negative predicted boxes (obviously: there are no ground truth boxes they would correspond to)
loc_loss = tf.reduce_sum(localization_loss * positives, axis=-1) # Tensor of shape (batch_size,)
# 4: Compute the total loss
total_loss = (self.beta * class_loss + self.alpha * loc_loss) / tf.maximum(1.0,
n_positive) # In case `n_positive == 0`
return total_loss | [
"def",
"compute_loss",
"(",
"self",
",",
"y_true",
",",
"y_pred",
")",
":",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"y_pred",
")",
"[",
"0",
"]",
"# Output dtype: tf.int32",
"n_boxes",
"=",
"tf",
".",
"shape",
"(",
"y_pred",
")",
"[",
"1",
"]",
"# Output dtype: tf.int32, note that `n_boxes` in this context denotes the total number of boxes per image, not the number of boxes per cell",
"# 1: Compute the losses for class and box predictions for every box",
"classification_loss",
"=",
"tf",
".",
"cast",
"(",
"self",
".",
"log_loss",
"(",
"y_true",
"[",
":",
",",
":",
",",
":",
"-",
"12",
"]",
",",
"y_pred",
"[",
":",
",",
":",
",",
":",
"-",
"12",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Output shape: (batch_size, n_boxes)",
"localization_loss",
"=",
"tf",
".",
"cast",
"(",
"self",
".",
"smooth_L1_loss",
"(",
"y_true",
"[",
":",
",",
":",
",",
"-",
"12",
":",
"-",
"8",
"]",
",",
"y_pred",
"[",
":",
",",
":",
",",
"-",
"12",
":",
"-",
"8",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Output shape: (batch_size, n_boxes)",
"# 2: Compute the classification losses for the positive and negative targets",
"# Create masks for the positive and negative ground truth classes",
"negatives",
"=",
"y_true",
"[",
":",
",",
":",
",",
"0",
"]",
"# Tensor of shape (batch_size, n_boxes)",
"positives",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"reduce_max",
"(",
"y_true",
"[",
":",
",",
":",
",",
"1",
":",
"-",
"12",
"]",
",",
"axis",
"=",
"-",
"1",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Tensor of shape (batch_size, n_boxes)",
"# Count the number of positive boxes (classes 1 to n) in y_true across the whole batch",
"n_positive",
"=",
"tf",
".",
"reduce_sum",
"(",
"positives",
")",
"# Now mask all negative boxes and sum up the losses for the positive boxes PER batch item",
"# (Keras loss functions must output one scalar loss value PER batch item, rather than just",
"# one scalar for the entire batch, that's why we're not summing across all axes)",
"pos_class_loss",
"=",
"tf",
".",
"reduce_sum",
"(",
"classification_loss",
"*",
"positives",
",",
"axis",
"=",
"-",
"1",
")",
"# Tensor of shape (batch_size,)",
"# Compute the classification loss for the negative default boxes (if there are any)",
"# First, compute the classification loss for all negative boxes",
"neg_class_loss_all",
"=",
"classification_loss",
"*",
"negatives",
"# Tensor of shape (batch_size, n_boxes)",
"n_neg_losses",
"=",
"tf",
".",
"math",
".",
"count_nonzero",
"(",
"neg_class_loss_all",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"# The number of non-zero loss entries in `neg_class_loss_all`",
"# What's the point of `n_neg_losses`? For the next step, which will be to compute which negative boxes enter the classification",
"# loss, we don't just want to know how many negative ground truth boxes there are, but for how many of those there actually is",
"# a positive (i.e. non-zero) loss. This is necessary because `tf.nn.top-k()` in the function below will pick the top k boxes with",
"# the highest losses no matter what, even if it receives a vector where all losses are zero. In the unlikely event that all negative",
"# classification losses ARE actually zero though, this behavior might lead to `tf.nn.top-k()` returning the indices of positive",
"# boxes, leading to an incorrect negative classification loss computation, and hence an incorrect overall loss computation.",
"# We therefore need to make sure that `n_negative_keep`, which assumes the role of the `k` argument in `tf.nn.top-k()`,",
"# is at most the number of negative boxes for which there is a positive classification loss.",
"# Compute the number of negative examples we want to account for in the loss",
"# We'll keep at most `self.neg_pos_ratio` times the number of positives in `y_true`, but at least `self.n_neg_min` (unless `n_neg_loses` is smaller)",
"n_negative_keep",
"=",
"tf",
".",
"minimum",
"(",
"tf",
".",
"maximum",
"(",
"self",
".",
"neg_pos_ratio",
"*",
"tf",
".",
"cast",
"(",
"n_positive",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"self",
".",
"n_neg_min",
")",
",",
"n_neg_losses",
")",
"# In the unlikely case when either (1) there are no negative ground truth boxes at all",
"# or (2) the classification loss for all negative boxes is zero, return zero as the `neg_class_loss`",
"def",
"f1",
"(",
")",
":",
"return",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
"]",
")",
"# Otherwise compute the negative loss",
"def",
"f2",
"(",
")",
":",
"# Now we'll identify the top-k (where k == `n_negative_keep`) boxes with the highest confidence loss that",
"# belong to the background class in the ground truth data. Note that this doesn't necessarily mean that the model",
"# predicted the wrong class for those boxes, it just means that the loss for those boxes is the highest.",
"# To do this, we reshape `neg_class_loss_all` to 1D...",
"neg_class_loss_all_1D",
"=",
"tf",
".",
"reshape",
"(",
"neg_class_loss_all",
",",
"[",
"-",
"1",
"]",
")",
"# Tensor of shape (batch_size * n_boxes,)",
"# ...and then we get the indices for the `n_negative_keep` boxes with the highest loss out of those...",
"values",
",",
"indices",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"neg_class_loss_all_1D",
",",
"n_negative_keep",
",",
"False",
")",
"# We don't need sorting",
"# ...and with these indices we'll create a mask...",
"negatives_keep",
"=",
"tf",
".",
"scatter_nd",
"(",
"tf",
".",
"expand_dims",
"(",
"indices",
",",
"axis",
"=",
"1",
")",
",",
"updates",
"=",
"tf",
".",
"ones_like",
"(",
"indices",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"shape",
"=",
"tf",
".",
"shape",
"(",
"neg_class_loss_all_1D",
")",
")",
"# Tensor of shape (batch_size * n_boxes,)",
"negatives_keep",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"reshape",
"(",
"negatives_keep",
",",
"[",
"batch_size",
",",
"n_boxes",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Tensor of shape (batch_size, n_boxes)",
"# ...and use it to keep only those boxes and mask all other classification losses",
"neg_class_loss",
"=",
"tf",
".",
"reduce_sum",
"(",
"classification_loss",
"*",
"negatives_keep",
",",
"axis",
"=",
"-",
"1",
")",
"# Tensor of shape (batch_size,)",
"return",
"neg_class_loss",
"neg_class_loss",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"equal",
"(",
"n_neg_losses",
",",
"tf",
".",
"constant",
"(",
"0",
")",
")",
",",
"f1",
",",
"f2",
")",
"class_loss",
"=",
"pos_class_loss",
"+",
"neg_class_loss",
"# Tensor of shape (batch_size,)",
"# 3: Compute the localization loss for the positive targets",
"# We don't penalize localization loss for negative predicted boxes (obviously: there are no ground truth boxes they would correspond to)",
"loc_loss",
"=",
"tf",
".",
"reduce_sum",
"(",
"localization_loss",
"*",
"positives",
",",
"axis",
"=",
"-",
"1",
")",
"# Tensor of shape (batch_size,)",
"# 4: Compute the total loss",
"total_loss",
"=",
"(",
"self",
".",
"beta",
"*",
"class_loss",
"+",
"self",
".",
"alpha",
"*",
"loc_loss",
")",
"/",
"tf",
".",
"maximum",
"(",
"1.0",
",",
"n_positive",
")",
"# In case `n_positive == 0`",
"return",
"total_loss"
] | https://github.com/bruceyang2012/Face-detection-with-mobilenet-ssd/blob/58fafb6e93d28531797aac1e9a4436730c8cee7c/keras_ssd_loss.py#L101-L215 |
|
getting-things-gnome/gtg | 4b02c43744b32a00facb98174f04ec5953bd055d | GTG/core/datastore.py | python | TaskSource.get_task_filter_for_backend | (self) | return lambda task: backend_filter(self.requester, task,
{"tags": set(attached_tags)}) | Filter that checks if the task should be stored in this backend.
@returns function: a function that accepts a task and returns
True/False whether the task should be stored or not | Filter that checks if the task should be stored in this backend. | [
"Filter",
"that",
"checks",
"if",
"the",
"task",
"should",
"be",
"stored",
"in",
"this",
"backend",
"."
] | def get_task_filter_for_backend(self):
"""
Filter that checks if the task should be stored in this backend.
@returns function: a function that accepts a task and returns
True/False whether the task should be stored or not
"""
def backend_filter(req, task, parameters):
"""
Filter that checks if two tags sets intersect. It is used to check
if a task should be stored inside a backend
@param task: a task object
@param tags_to_match_set: a *set* of tag names
"""
try:
tags_to_match_set = parameters['tags']
except KeyError:
return []
all_tasks_tag = req.get_alltag_tag().get_name()
if all_tasks_tag in tags_to_match_set:
return True
task_tags = set(task.get_tags_name())
return task_tags.intersection(tags_to_match_set)
attached_tags = self.backend.get_attached_tags()
return lambda task: backend_filter(self.requester, task,
{"tags": set(attached_tags)}) | [
"def",
"get_task_filter_for_backend",
"(",
"self",
")",
":",
"def",
"backend_filter",
"(",
"req",
",",
"task",
",",
"parameters",
")",
":",
"\"\"\"\n Filter that checks if two tags sets intersect. It is used to check\n if a task should be stored inside a backend\n @param task: a task object\n @param tags_to_match_set: a *set* of tag names\n \"\"\"",
"try",
":",
"tags_to_match_set",
"=",
"parameters",
"[",
"'tags'",
"]",
"except",
"KeyError",
":",
"return",
"[",
"]",
"all_tasks_tag",
"=",
"req",
".",
"get_alltag_tag",
"(",
")",
".",
"get_name",
"(",
")",
"if",
"all_tasks_tag",
"in",
"tags_to_match_set",
":",
"return",
"True",
"task_tags",
"=",
"set",
"(",
"task",
".",
"get_tags_name",
"(",
")",
")",
"return",
"task_tags",
".",
"intersection",
"(",
"tags_to_match_set",
")",
"attached_tags",
"=",
"self",
".",
"backend",
".",
"get_attached_tags",
"(",
")",
"return",
"lambda",
"task",
":",
"backend_filter",
"(",
"self",
".",
"requester",
",",
"task",
",",
"{",
"\"tags\"",
":",
"set",
"(",
"attached_tags",
")",
"}",
")"
] | https://github.com/getting-things-gnome/gtg/blob/4b02c43744b32a00facb98174f04ec5953bd055d/GTG/core/datastore.py#L699-L726 |
|
kobiso/CBAM-keras | 796ae9ea31253d87f46ac4908e94ad5d799fbdd5 | models/.ipynb_checkpoints/mobilenets-checkpoint.py | python | _depthwise_conv_block | (inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1), block_id=1, attention_module=None) | return x | Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block. | Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block. | [
"Adds",
"a",
"depthwise",
"convolution",
"block",
".",
"A",
"depthwise",
"convolution",
"block",
"consists",
"of",
"a",
"depthwise",
"conv",
"batch",
"normalization",
"relu6",
"pointwise",
"convolution",
"batch",
"normalization",
"and",
"relu6",
"activation",
".",
"#",
"Arguments",
"inputs",
":",
"Input",
"tensor",
"of",
"shape",
"(",
"rows",
"cols",
"channels",
")",
"(",
"with",
"channels_last",
"data",
"format",
")",
"or",
"(",
"channels",
"rows",
"cols",
")",
"(",
"with",
"channels_first",
"data",
"format",
")",
".",
"pointwise_conv_filters",
":",
"Integer",
"the",
"dimensionality",
"of",
"the",
"output",
"space",
"(",
"i",
".",
"e",
".",
"the",
"number",
"output",
"of",
"filters",
"in",
"the",
"pointwise",
"convolution",
")",
".",
"alpha",
":",
"controls",
"the",
"width",
"of",
"the",
"network",
".",
"-",
"If",
"alpha",
"<",
"1",
".",
"0",
"proportionally",
"decreases",
"the",
"number",
"of",
"filters",
"in",
"each",
"layer",
".",
"-",
"If",
"alpha",
">",
"1",
".",
"0",
"proportionally",
"increases",
"the",
"number",
"of",
"filters",
"in",
"each",
"layer",
".",
"-",
"If",
"alpha",
"=",
"1",
"default",
"number",
"of",
"filters",
"from",
"the",
"paper",
"are",
"used",
"at",
"each",
"layer",
".",
"depth_multiplier",
":",
"The",
"number",
"of",
"depthwise",
"convolution",
"output",
"channels",
"for",
"each",
"input",
"channel",
".",
"The",
"total",
"number",
"of",
"depthwise",
"convolution",
"output",
"channels",
"will",
"be",
"equal",
"to",
"filters_in",
"*",
"depth_multiplier",
".",
"strides",
":",
"An",
"integer",
"or",
"tuple",
"/",
"list",
"of",
"2",
"integers",
"specifying",
"the",
"strides",
"of",
"the",
"convolution",
"along",
"the",
"width",
"and",
"height",
".",
"Can",
"be",
"a",
"single",
"integer",
"to",
"specify",
"the",
"same",
"value",
"for",
"all",
"spatial",
"dimensions",
".",
"Specifying",
"any",
"stride",
"value",
"!",
"=",
"1",
"is",
"incompatible",
"with",
"specifying",
"any",
"dilation_rate",
"value",
"!",
"=",
"1",
".",
"block_id",
":",
"Integer",
"a",
"unique",
"identification",
"designating",
"the",
"block",
"number",
".",
"#",
"Input",
"shape",
"4D",
"tensor",
"with",
"shape",
":",
"(",
"batch",
"channels",
"rows",
"cols",
")",
"if",
"data_format",
"=",
"channels_first",
"or",
"4D",
"tensor",
"with",
"shape",
":",
"(",
"batch",
"rows",
"cols",
"channels",
")",
"if",
"data_format",
"=",
"channels_last",
".",
"#",
"Output",
"shape",
"4D",
"tensor",
"with",
"shape",
":",
"(",
"batch",
"filters",
"new_rows",
"new_cols",
")",
"if",
"data_format",
"=",
"channels_first",
"or",
"4D",
"tensor",
"with",
"shape",
":",
"(",
"batch",
"new_rows",
"new_cols",
"filters",
")",
"if",
"data_format",
"=",
"channels_last",
".",
"rows",
"and",
"cols",
"values",
"might",
"have",
"changed",
"due",
"to",
"stride",
".",
"#",
"Returns",
"Output",
"tensor",
"of",
"block",
"."
] | def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1), block_id=1, attention_module=None):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
x = DepthwiseConv2D((3, 3),
padding='same',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(inputs)
x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(x)
x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
if attention_module == 'se_block':
x = se_block(x)
if attention_module == 'cbam_block':
x = cbam_block(x)
return x | [
"def",
"_depthwise_conv_block",
"(",
"inputs",
",",
"pointwise_conv_filters",
",",
"alpha",
",",
"depth_multiplier",
"=",
"1",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"block_id",
"=",
"1",
",",
"attention_module",
"=",
"None",
")",
":",
"channel_axis",
"=",
"1",
"if",
"K",
".",
"image_data_format",
"(",
")",
"==",
"'channels_first'",
"else",
"-",
"1",
"pointwise_conv_filters",
"=",
"int",
"(",
"pointwise_conv_filters",
"*",
"alpha",
")",
"x",
"=",
"DepthwiseConv2D",
"(",
"(",
"3",
",",
"3",
")",
",",
"padding",
"=",
"'same'",
",",
"depth_multiplier",
"=",
"depth_multiplier",
",",
"strides",
"=",
"strides",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"'conv_dw_%d'",
"%",
"block_id",
")",
"(",
"inputs",
")",
"x",
"=",
"BatchNormalization",
"(",
"axis",
"=",
"channel_axis",
",",
"name",
"=",
"'conv_dw_%d_bn'",
"%",
"block_id",
")",
"(",
"x",
")",
"x",
"=",
"Activation",
"(",
"relu6",
",",
"name",
"=",
"'conv_dw_%d_relu'",
"%",
"block_id",
")",
"(",
"x",
")",
"x",
"=",
"Conv2D",
"(",
"pointwise_conv_filters",
",",
"(",
"1",
",",
"1",
")",
",",
"padding",
"=",
"'same'",
",",
"use_bias",
"=",
"False",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"name",
"=",
"'conv_pw_%d'",
"%",
"block_id",
")",
"(",
"x",
")",
"x",
"=",
"BatchNormalization",
"(",
"axis",
"=",
"channel_axis",
",",
"name",
"=",
"'conv_pw_%d_bn'",
"%",
"block_id",
")",
"(",
"x",
")",
"x",
"=",
"Activation",
"(",
"relu6",
",",
"name",
"=",
"'conv_pw_%d_relu'",
"%",
"block_id",
")",
"(",
"x",
")",
"if",
"attention_module",
"==",
"'se_block'",
":",
"x",
"=",
"se_block",
"(",
"x",
")",
"if",
"attention_module",
"==",
"'cbam_block'",
":",
"x",
"=",
"cbam_block",
"(",
"x",
")",
"return",
"x"
] | https://github.com/kobiso/CBAM-keras/blob/796ae9ea31253d87f46ac4908e94ad5d799fbdd5/models/.ipynb_checkpoints/mobilenets-checkpoint.py#L477-L546 |
|
microsoft/unilm | 65f15af2a307ebb64cfb25adf54375b002e6fe8d | infoxlm/fairseq/fairseq/progress_bar.py | python | simple_progress_bar.print | (self, stats, tag='', step=None) | Print end-of-epoch stats. | Print end-of-epoch stats. | [
"Print",
"end",
"-",
"of",
"-",
"epoch",
"stats",
"."
] | def print(self, stats, tag='', step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
print('{} | {}'.format(self.prefix, postfix), flush=True) | [
"def",
"print",
"(",
"self",
",",
"stats",
",",
"tag",
"=",
"''",
",",
"step",
"=",
"None",
")",
":",
"postfix",
"=",
"self",
".",
"_str_pipes",
"(",
"self",
".",
"_format_stats",
"(",
"stats",
")",
")",
"print",
"(",
"'{} | {}'",
".",
"format",
"(",
"self",
".",
"prefix",
",",
"postfix",
")",
",",
"flush",
"=",
"True",
")"
] | https://github.com/microsoft/unilm/blob/65f15af2a307ebb64cfb25adf54375b002e6fe8d/infoxlm/fairseq/fairseq/progress_bar.py#L194-L197 |
||
Yelp/mrjob | 091572e87bc24cc64be40278dd0f5c3617c98d4b | mrjob/emr.py | python | EMRJobRunner.get_cluster_id | (self) | return self._cluster_id | Get the ID of the cluster our job is running on, or ``None``. | Get the ID of the cluster our job is running on, or ``None``. | [
"Get",
"the",
"ID",
"of",
"the",
"cluster",
"our",
"job",
"is",
"running",
"on",
"or",
"None",
"."
] | def get_cluster_id(self):
"""Get the ID of the cluster our job is running on, or ``None``."""
return self._cluster_id | [
"def",
"get_cluster_id",
"(",
"self",
")",
":",
"return",
"self",
".",
"_cluster_id"
] | https://github.com/Yelp/mrjob/blob/091572e87bc24cc64be40278dd0f5c3617c98d4b/mrjob/emr.py#L2388-L2390 |
|
Komodo/KomodoEdit | 61edab75dce2bdb03943b387b0608ea36f548e8e | contrib/paramiko/paramiko/transport.py | python | Transport.open_channel | (self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None,
timeout=None) | Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the channel created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:param float timeout:
optional timeout opening a channel, default 3600s (1h)
:return: a new `.Channel` on success
:raises SSHException: if the request is rejected, the session ends
prematurely or there is a timeout openning a channel
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments. | Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating. | [
"Request",
"a",
"new",
"channel",
"to",
"the",
"server",
".",
"Channels",
"<",
".",
"Channel",
">",
"are",
"socket",
"-",
"like",
"objects",
"used",
"for",
"the",
"actual",
"transfer",
"of",
"data",
"across",
"the",
"session",
".",
"You",
"may",
"only",
"request",
"a",
"channel",
"after",
"negotiating",
"encryption",
"(",
"using",
"connect",
"or",
"start_client",
")",
"and",
"authenticating",
"."
] | def open_channel(self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None,
timeout=None):
"""
Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the channel created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:param float timeout:
optional timeout opening a channel, default 3600s (1h)
:return: a new `.Channel` on success
:raises SSHException: if the request is rejected, the session ends
prematurely or there is a timeout openning a channel
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
if not self.active:
raise SSHException('SSH session not active')
timeout = 3600 if timeout is None else timeout
self.lock.acquire()
try:
window_size = self._sanitize_window_size(window_size)
max_packet_size = self._sanitize_packet_size(max_packet_size)
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(window_size)
m.add_int(max_packet_size)
if (kind == 'forwarded-tcpip') or (kind == 'direct-tcpip'):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == 'x11':
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(window_size, max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
start_ts = time.time()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
if event.is_set():
break
elif start_ts + timeout < time.time():
raise SSHException('Timeout openning channel.')
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e | [
"def",
"open_channel",
"(",
"self",
",",
"kind",
",",
"dest_addr",
"=",
"None",
",",
"src_addr",
"=",
"None",
",",
"window_size",
"=",
"None",
",",
"max_packet_size",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"active",
":",
"raise",
"SSHException",
"(",
"'SSH session not active'",
")",
"timeout",
"=",
"3600",
"if",
"timeout",
"is",
"None",
"else",
"timeout",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"window_size",
"=",
"self",
".",
"_sanitize_window_size",
"(",
"window_size",
")",
"max_packet_size",
"=",
"self",
".",
"_sanitize_packet_size",
"(",
"max_packet_size",
")",
"chanid",
"=",
"self",
".",
"_next_channel",
"(",
")",
"m",
"=",
"Message",
"(",
")",
"m",
".",
"add_byte",
"(",
"cMSG_CHANNEL_OPEN",
")",
"m",
".",
"add_string",
"(",
"kind",
")",
"m",
".",
"add_int",
"(",
"chanid",
")",
"m",
".",
"add_int",
"(",
"window_size",
")",
"m",
".",
"add_int",
"(",
"max_packet_size",
")",
"if",
"(",
"kind",
"==",
"'forwarded-tcpip'",
")",
"or",
"(",
"kind",
"==",
"'direct-tcpip'",
")",
":",
"m",
".",
"add_string",
"(",
"dest_addr",
"[",
"0",
"]",
")",
"m",
".",
"add_int",
"(",
"dest_addr",
"[",
"1",
"]",
")",
"m",
".",
"add_string",
"(",
"src_addr",
"[",
"0",
"]",
")",
"m",
".",
"add_int",
"(",
"src_addr",
"[",
"1",
"]",
")",
"elif",
"kind",
"==",
"'x11'",
":",
"m",
".",
"add_string",
"(",
"src_addr",
"[",
"0",
"]",
")",
"m",
".",
"add_int",
"(",
"src_addr",
"[",
"1",
"]",
")",
"chan",
"=",
"Channel",
"(",
"chanid",
")",
"self",
".",
"_channels",
".",
"put",
"(",
"chanid",
",",
"chan",
")",
"self",
".",
"channel_events",
"[",
"chanid",
"]",
"=",
"event",
"=",
"threading",
".",
"Event",
"(",
")",
"self",
".",
"channels_seen",
"[",
"chanid",
"]",
"=",
"True",
"chan",
".",
"_set_transport",
"(",
"self",
")",
"chan",
".",
"_set_window",
"(",
"window_size",
",",
"max_packet_size",
")",
"finally",
":",
"self",
".",
"lock",
".",
"release",
"(",
")",
"self",
".",
"_send_user_message",
"(",
"m",
")",
"start_ts",
"=",
"time",
".",
"time",
"(",
")",
"while",
"True",
":",
"event",
".",
"wait",
"(",
"0.1",
")",
"if",
"not",
"self",
".",
"active",
":",
"e",
"=",
"self",
".",
"get_exception",
"(",
")",
"if",
"e",
"is",
"None",
":",
"e",
"=",
"SSHException",
"(",
"'Unable to open channel.'",
")",
"raise",
"e",
"if",
"event",
".",
"is_set",
"(",
")",
":",
"break",
"elif",
"start_ts",
"+",
"timeout",
"<",
"time",
".",
"time",
"(",
")",
":",
"raise",
"SSHException",
"(",
"'Timeout openning channel.'",
")",
"chan",
"=",
"self",
".",
"_channels",
".",
"get",
"(",
"chanid",
")",
"if",
"chan",
"is",
"not",
"None",
":",
"return",
"chan",
"e",
"=",
"self",
".",
"get_exception",
"(",
")",
"if",
"e",
"is",
"None",
":",
"e",
"=",
"SSHException",
"(",
"'Unable to open channel.'",
")",
"raise",
"e"
] | https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/contrib/paramiko/paramiko/transport.py#L746-L836 |
||
AppScale/gts | 46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9 | AppTaskQueue/appscale/taskqueue/queue.py | python | PostgresPullQueue.get_task | (self, task, omit_payload=False) | return self._task_from_row(columns, row, id=task.id) | Gets a task from the queue.
Args:
task: A Task object.
omit_payload: A boolean indicating that the payload should not be
fetched.
Returns:
A task object or None. | Gets a task from the queue. | [
"Gets",
"a",
"task",
"from",
"the",
"queue",
"."
] | def get_task(self, task, omit_payload=False):
""" Gets a task from the queue.
Args:
task: A Task object.
omit_payload: A boolean indicating that the payload should not be
fetched.
Returns:
A task object or None.
"""
if omit_payload:
columns = ['task_name', 'time_enqueued',
'lease_expires', 'lease_count', 'tag']
else:
columns = ['payload', 'task_name', 'time_enqueued',
'lease_expires', 'lease_count', 'tag']
pg_connection = pg_wrapper.get_connection()
with pg_connection:
with pg_connection.cursor() as pg_cursor:
pg_cursor.execute(
'SELECT {columns} FROM "{tasks_table}" '
'WHERE task_name = %(task_name)s AND time_deleted IS NULL'
.format(columns=', '.join(columns),
tasks_table=self.tasks_table_name),
vars={
'task_name': task.id,
}
)
row = pg_cursor.fetchone()
if not row:
return None
return self._task_from_row(columns, row, id=task.id) | [
"def",
"get_task",
"(",
"self",
",",
"task",
",",
"omit_payload",
"=",
"False",
")",
":",
"if",
"omit_payload",
":",
"columns",
"=",
"[",
"'task_name'",
",",
"'time_enqueued'",
",",
"'lease_expires'",
",",
"'lease_count'",
",",
"'tag'",
"]",
"else",
":",
"columns",
"=",
"[",
"'payload'",
",",
"'task_name'",
",",
"'time_enqueued'",
",",
"'lease_expires'",
",",
"'lease_count'",
",",
"'tag'",
"]",
"pg_connection",
"=",
"pg_wrapper",
".",
"get_connection",
"(",
")",
"with",
"pg_connection",
":",
"with",
"pg_connection",
".",
"cursor",
"(",
")",
"as",
"pg_cursor",
":",
"pg_cursor",
".",
"execute",
"(",
"'SELECT {columns} FROM \"{tasks_table}\" '",
"'WHERE task_name = %(task_name)s AND time_deleted IS NULL'",
".",
"format",
"(",
"columns",
"=",
"', '",
".",
"join",
"(",
"columns",
")",
",",
"tasks_table",
"=",
"self",
".",
"tasks_table_name",
")",
",",
"vars",
"=",
"{",
"'task_name'",
":",
"task",
".",
"id",
",",
"}",
")",
"row",
"=",
"pg_cursor",
".",
"fetchone",
"(",
")",
"if",
"not",
"row",
":",
"return",
"None",
"return",
"self",
".",
"_task_from_row",
"(",
"columns",
",",
"row",
",",
"id",
"=",
"task",
".",
"id",
")"
] | https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppTaskQueue/appscale/taskqueue/queue.py#L313-L345 |
|
andresriancho/w3af | cd22e5252243a87aaa6d0ddea47cf58dacfe00a9 | w3af/core/ui/gui/tools/proxywin.py | python | ProxiedRequests.reload_options | (self) | Reload options.
1. Stop proxy
2. Try to start proxy with new params
3. If can't => alert
4. If everything is ok then start proxy
5. Set Trap options
6. Save options | Reload options.
1. Stop proxy
2. Try to start proxy with new params
3. If can't => alert
4. If everything is ok then start proxy
5. Set Trap options
6. Save options | [
"Reload",
"options",
".",
"1",
".",
"Stop",
"proxy",
"2",
".",
"Try",
"to",
"start",
"proxy",
"with",
"new",
"params",
"3",
".",
"If",
"can",
"t",
"=",
">",
"alert",
"4",
".",
"If",
"everything",
"is",
"ok",
"then",
"start",
"proxy",
"5",
".",
"Set",
"Trap",
"options",
"6",
".",
"Save",
"options"
] | def reload_options(self):
"""Reload options.
1. Stop proxy
2. Try to start proxy with new params
3. If can't => alert
4. If everything is ok then start proxy
5. Set Trap options
6. Save options
"""
new_port = self.pref.get_value('proxy', 'ipport')
if new_port != self._prev_ip_port:
self.w3af.mainwin.sb(_("Stopping local proxy"))
if self.proxy:
self.proxy.stop()
try:
self._start_proxy()
except ProxyException:
# Ups, port looks already used..:(
# Let's show alert and focus Options tab
self.w3af.mainwin.sb(_("Failed to start local proxy"))
self.fuzzable = None
self.waiting_requests = False
self.keep_checking = False
# Focus Options tab
self.nb.set_current_page(2)
return
else:
self.fuzzable = None
self.waiting_requests = True
self.keep_checking = True
# Config test
try:
self.proxy.set_what_to_trap(self.pref.get_value('proxy', 'trap'))
self.proxy.set_what_not_to_trap(self.pref.get_value('proxy', 'notrap'))
self.proxy.set_methods_to_trap(self.pref.get_value('proxy', 'methodtrap'))
except BaseFrameworkException, w3:
self.show_alert(_("Invalid configuration!\n" + str(w3)))
self._prev_ip_port = new_port
httpeditor = self.reqresp.request.get_view_by_id('HttpRawView')
httpeditor.set_show_line_numbers(self.pref.get_value('editor',
'display_line_num'))
httpeditor.set_highlight_current_line(self.pref.get_value('editor',
'highlight_current_line'))
httpeditor.set_highlight_syntax(self.pref.get_value('editor',
'highlight_syntax'))
httpeditor.set_wrap(self.pref.get_value('editor', 'wrap'))
self.pref.save()
if self._layout != self.pref.get_value('proxy', 'trap_view'):
self.show_alert(_('Some of options will take effect after you'
' restart proxy tool')) | [
"def",
"reload_options",
"(",
"self",
")",
":",
"new_port",
"=",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'ipport'",
")",
"if",
"new_port",
"!=",
"self",
".",
"_prev_ip_port",
":",
"self",
".",
"w3af",
".",
"mainwin",
".",
"sb",
"(",
"_",
"(",
"\"Stopping local proxy\"",
")",
")",
"if",
"self",
".",
"proxy",
":",
"self",
".",
"proxy",
".",
"stop",
"(",
")",
"try",
":",
"self",
".",
"_start_proxy",
"(",
")",
"except",
"ProxyException",
":",
"# Ups, port looks already used..:(",
"# Let's show alert and focus Options tab",
"self",
".",
"w3af",
".",
"mainwin",
".",
"sb",
"(",
"_",
"(",
"\"Failed to start local proxy\"",
")",
")",
"self",
".",
"fuzzable",
"=",
"None",
"self",
".",
"waiting_requests",
"=",
"False",
"self",
".",
"keep_checking",
"=",
"False",
"# Focus Options tab",
"self",
".",
"nb",
".",
"set_current_page",
"(",
"2",
")",
"return",
"else",
":",
"self",
".",
"fuzzable",
"=",
"None",
"self",
".",
"waiting_requests",
"=",
"True",
"self",
".",
"keep_checking",
"=",
"True",
"# Config test",
"try",
":",
"self",
".",
"proxy",
".",
"set_what_to_trap",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'trap'",
")",
")",
"self",
".",
"proxy",
".",
"set_what_not_to_trap",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'notrap'",
")",
")",
"self",
".",
"proxy",
".",
"set_methods_to_trap",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'methodtrap'",
")",
")",
"except",
"BaseFrameworkException",
",",
"w3",
":",
"self",
".",
"show_alert",
"(",
"_",
"(",
"\"Invalid configuration!\\n\"",
"+",
"str",
"(",
"w3",
")",
")",
")",
"self",
".",
"_prev_ip_port",
"=",
"new_port",
"httpeditor",
"=",
"self",
".",
"reqresp",
".",
"request",
".",
"get_view_by_id",
"(",
"'HttpRawView'",
")",
"httpeditor",
".",
"set_show_line_numbers",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'editor'",
",",
"'display_line_num'",
")",
")",
"httpeditor",
".",
"set_highlight_current_line",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'editor'",
",",
"'highlight_current_line'",
")",
")",
"httpeditor",
".",
"set_highlight_syntax",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'editor'",
",",
"'highlight_syntax'",
")",
")",
"httpeditor",
".",
"set_wrap",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'editor'",
",",
"'wrap'",
")",
")",
"self",
".",
"pref",
".",
"save",
"(",
")",
"if",
"self",
".",
"_layout",
"!=",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'trap_view'",
")",
":",
"self",
".",
"show_alert",
"(",
"_",
"(",
"'Some of options will take effect after you'",
"' restart proxy tool'",
")",
")"
] | https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/core/ui/gui/tools/proxywin.py#L238-L291 |
||
mathics/Mathics | 318e06dea8f1c70758a50cb2f95c9900150e3a68 | mathics/builtin/structure.py | python | Apply.apply_invalidlevel | (self, f, expr, ls, evaluation, options={}) | Apply[f_, expr_, ls_, OptionsPattern[Apply]] | Apply[f_, expr_, ls_, OptionsPattern[Apply]] | [
"Apply",
"[",
"f_",
"expr_",
"ls_",
"OptionsPattern",
"[",
"Apply",
"]]"
] | def apply_invalidlevel(self, f, expr, ls, evaluation, options={}):
"Apply[f_, expr_, ls_, OptionsPattern[Apply]]"
evaluation.message("Apply", "level", ls) | [
"def",
"apply_invalidlevel",
"(",
"self",
",",
"f",
",",
"expr",
",",
"ls",
",",
"evaluation",
",",
"options",
"=",
"{",
"}",
")",
":",
"evaluation",
".",
"message",
"(",
"\"Apply\"",
",",
"\"level\"",
",",
"ls",
")"
] | https://github.com/mathics/Mathics/blob/318e06dea8f1c70758a50cb2f95c9900150e3a68/mathics/builtin/structure.py#L434-L437 |
||
Trusted-AI/adversarial-robustness-toolbox | 9fabffdbb92947efa1ecc5d825d634d30dfbaf29 | art/attacks/evasion/pe_malware_attack.py | python | MalwareGDTensorFlow.check_valid_size | (
self,
y: np.ndarray,
sample_sizes: np.ndarray,
append_perturbation_size: np.ndarray,
) | return adv_label_vector | Checks that we can append the l0 perturbation to the malware sample and not exceed the
maximum file size. A new label vector with just the valid files indicated is created.
:param y: Labels.
:param sample_sizes: The size of the original file, before it was padded to the input size required by MalConv.
:param append_perturbation_size: Size of the perturbations in L0 terms to put at end of file.
:return adv_label_vector: Labels which indicate which malware samples have enough free features to
accommodate all the adversarial perturbation. | Checks that we can append the l0 perturbation to the malware sample and not exceed the
maximum file size. A new label vector with just the valid files indicated is created. | [
"Checks",
"that",
"we",
"can",
"append",
"the",
"l0",
"perturbation",
"to",
"the",
"malware",
"sample",
"and",
"not",
"exceed",
"the",
"maximum",
"file",
"size",
".",
"A",
"new",
"label",
"vector",
"with",
"just",
"the",
"valid",
"files",
"indicated",
"is",
"created",
"."
] | def check_valid_size(
self,
y: np.ndarray,
sample_sizes: np.ndarray,
append_perturbation_size: np.ndarray,
) -> np.ndarray:
"""
Checks that we can append the l0 perturbation to the malware sample and not exceed the
maximum file size. A new label vector with just the valid files indicated is created.
:param y: Labels.
:param sample_sizes: The size of the original file, before it was padded to the input size required by MalConv.
:param append_perturbation_size: Size of the perturbations in L0 terms to put at end of file.
:return adv_label_vector: Labels which indicate which malware samples have enough free features to
accommodate all the adversarial perturbation.
"""
adv_label_vector = np.zeros_like(y)
for i, label in enumerate(y):
if label == 1:
if sample_sizes[i] + append_perturbation_size[i] <= self.param_dic["maxlen"]:
adv_label_vector[i] = 1
logger.info("size to append on sample %d is %d", i, append_perturbation_size[i])
return adv_label_vector | [
"def",
"check_valid_size",
"(",
"self",
",",
"y",
":",
"np",
".",
"ndarray",
",",
"sample_sizes",
":",
"np",
".",
"ndarray",
",",
"append_perturbation_size",
":",
"np",
".",
"ndarray",
",",
")",
"->",
"np",
".",
"ndarray",
":",
"adv_label_vector",
"=",
"np",
".",
"zeros_like",
"(",
"y",
")",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"y",
")",
":",
"if",
"label",
"==",
"1",
":",
"if",
"sample_sizes",
"[",
"i",
"]",
"+",
"append_perturbation_size",
"[",
"i",
"]",
"<=",
"self",
".",
"param_dic",
"[",
"\"maxlen\"",
"]",
":",
"adv_label_vector",
"[",
"i",
"]",
"=",
"1",
"logger",
".",
"info",
"(",
"\"size to append on sample %d is %d\"",
",",
"i",
",",
"append_perturbation_size",
"[",
"i",
"]",
")",
"return",
"adv_label_vector"
] | https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/9fabffdbb92947efa1ecc5d825d634d30dfbaf29/art/attacks/evasion/pe_malware_attack.py#L170-L194 |
|
python/cpython | e13cdca0f5224ec4e23bdd04bb3120506964bc8b | Lib/importlib/metadata/__init__.py | python | distributions | (**kwargs) | return Distribution.discover(**kwargs) | Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances. | Get all ``Distribution`` instances in the current environment. | [
"Get",
"all",
"Distribution",
"instances",
"in",
"the",
"current",
"environment",
"."
] | def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs) | [
"def",
"distributions",
"(",
"*",
"*",
"kwargs",
")",
":",
"return",
"Distribution",
".",
"discover",
"(",
"*",
"*",
"kwargs",
")"
] | https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/importlib/metadata/__init__.py#L956-L961 |
|
nitishsrivastava/deepnet | f4e4ff207923e01552c96038a1e2c29eb5d16160 | eigenmat/eigenmat.py | python | EigenMatrix.overwrite | (self, array) | Overwrites self with array.
'array' should have a size smaller than that of the array used to
initialize the EigenMatrix. The method will not throw an Exception just
yet if this is not true. It will throw exceptions or behave in strange
ways later on. | Overwrites self with array.
'array' should have a size smaller than that of the array used to
initialize the EigenMatrix. The method will not throw an Exception just
yet if this is not true. It will throw exceptions or behave in strange
ways later on. | [
"Overwrites",
"self",
"with",
"array",
".",
"array",
"should",
"have",
"a",
"size",
"smaller",
"than",
"that",
"of",
"the",
"array",
"used",
"to",
"initialize",
"the",
"EigenMatrix",
".",
"The",
"method",
"will",
"not",
"throw",
"an",
"Exception",
"just",
"yet",
"if",
"this",
"is",
"not",
"true",
".",
"It",
"will",
"throw",
"exceptions",
"or",
"behave",
"in",
"strange",
"ways",
"later",
"on",
"."
] | def overwrite(self, array):
"""Overwrites self with array.
'array' should have a size smaller than that of the array used to
initialize the EigenMatrix. The method will not throw an Exception just
yet if this is not true. It will throw exceptions or behave in strange
ways later on.
"""
assert type(array) == np.ndarray, 'array must be a np.ndarray.'
array = reformat(array)
self.numpy_array = array
_eigenmat.init_from_array(self.p_mat, array.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_int(array.shape[0]), ct.c_int(array.shape[1])) | [
"def",
"overwrite",
"(",
"self",
",",
"array",
")",
":",
"assert",
"type",
"(",
"array",
")",
"==",
"np",
".",
"ndarray",
",",
"'array must be a np.ndarray.'",
"array",
"=",
"reformat",
"(",
"array",
")",
"self",
".",
"numpy_array",
"=",
"array",
"_eigenmat",
".",
"init_from_array",
"(",
"self",
".",
"p_mat",
",",
"array",
".",
"ctypes",
".",
"data_as",
"(",
"ct",
".",
"POINTER",
"(",
"ct",
".",
"c_float",
")",
")",
",",
"ct",
".",
"c_int",
"(",
"array",
".",
"shape",
"[",
"0",
"]",
")",
",",
"ct",
".",
"c_int",
"(",
"array",
".",
"shape",
"[",
"1",
"]",
")",
")"
] | https://github.com/nitishsrivastava/deepnet/blob/f4e4ff207923e01552c96038a1e2c29eb5d16160/eigenmat/eigenmat.py#L88-L99 |
||
XUSean0118/DVSNet | 2b67d991ca13de0a1210fbfbab4ad68f8c2f193a | inference.py | python | get_arguments | () | return parser.parse_args() | Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments. | Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments. | [
"Parse",
"all",
"the",
"arguments",
"provided",
"from",
"the",
"CLI",
".",
"Returns",
":",
"A",
"list",
"of",
"parsed",
"arguments",
"."
] | def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Dynamic Video Segmentation Network")
parser.add_argument("--data_dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the dataset.")
parser.add_argument("--data_list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--restore_from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--decision_from", type=str, default=RESTORE_FROM,
help="Where restore decision model parameters from.")
parser.add_argument("--save_dir", type=str, default=SAVE_DIR,
help="Where to save segmented output.")
parser.add_argument("--num_steps", type=int, default=NUM_STEPS,
help="Number of images in the video.")
parser.add_argument("--overlap", type=int, default=OVERLAP,
help="Overlapping size.")
parser.add_argument("--target", type=float, default=TARGET,
help="Confidence score threshold.")
parser.add_argument("--dynamic", action="store_true",
help="Whether to dynamically adjust target")
return parser.parse_args() | [
"def",
"get_arguments",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Dynamic Video Segmentation Network\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--data_dir\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"DATA_DIRECTORY",
",",
"help",
"=",
"\"Path to the directory containing the dataset.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--data_list\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"DATA_LIST_PATH",
",",
"help",
"=",
"\"Path to the file listing the images in the dataset.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--restore_from\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"RESTORE_FROM",
",",
"help",
"=",
"\"Where restore model parameters from.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--decision_from\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"RESTORE_FROM",
",",
"help",
"=",
"\"Where restore decision model parameters from.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--save_dir\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"SAVE_DIR",
",",
"help",
"=",
"\"Where to save segmented output.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--num_steps\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"NUM_STEPS",
",",
"help",
"=",
"\"Number of images in the video.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--overlap\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"OVERLAP",
",",
"help",
"=",
"\"Overlapping size.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--target\"",
",",
"type",
"=",
"float",
",",
"default",
"=",
"TARGET",
",",
"help",
"=",
"\"Confidence score threshold.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--dynamic\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Whether to dynamically adjust target\"",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] | https://github.com/XUSean0118/DVSNet/blob/2b67d991ca13de0a1210fbfbab4ad68f8c2f193a/inference.py#L29-L54 |
|
pallets/werkzeug | 9efe8c00dcb2b6fc086961ba304729db01912652 | src/werkzeug/datastructures.py | python | MultiDict.getlist | (self, key, type=None) | return result | Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just like `get`,
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key. | Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just like `get`,
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there. | [
"Return",
"the",
"list",
"of",
"items",
"for",
"a",
"given",
"key",
".",
"If",
"that",
"key",
"is",
"not",
"in",
"the",
"MultiDict",
"the",
"return",
"value",
"will",
"be",
"an",
"empty",
"list",
".",
"Just",
"like",
"get",
"getlist",
"accepts",
"a",
"type",
"parameter",
".",
"All",
"items",
"will",
"be",
"converted",
"with",
"the",
"callable",
"defined",
"there",
"."
] | def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just like `get`,
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result | [
"def",
"getlist",
"(",
"self",
",",
"key",
",",
"type",
"=",
"None",
")",
":",
"try",
":",
"rv",
"=",
"dict",
".",
"__getitem__",
"(",
"self",
",",
"key",
")",
"except",
"KeyError",
":",
"return",
"[",
"]",
"if",
"type",
"is",
"None",
":",
"return",
"list",
"(",
"rv",
")",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"rv",
":",
"try",
":",
"result",
".",
"append",
"(",
"type",
"(",
"item",
")",
")",
"except",
"ValueError",
":",
"pass",
"return",
"result"
] | https://github.com/pallets/werkzeug/blob/9efe8c00dcb2b6fc086961ba304729db01912652/src/werkzeug/datastructures.py#L395-L419 |
|
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e | tensorflow_dl_models/research/lfads/synth_data/synthetic_data_utils.py | python | spikify_data | (data_e, rng, dt=1.0, max_firing_rate=100) | return spikes_e | Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process. | Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process. | [
"Apply",
"spikes",
"to",
"a",
"continuous",
"dataset",
"whose",
"values",
"are",
"between",
"0",
".",
"0",
"and",
"1",
".",
"0",
"Args",
":",
"data_e",
":",
"nexamples",
"length",
"list",
"of",
"NxT",
"trials",
"dt",
":",
"how",
"often",
"the",
"data",
"are",
"sampled",
"max_firing_rate",
":",
"the",
"firing",
"rate",
"that",
"is",
"associated",
"with",
"a",
"value",
"of",
"1",
".",
"0",
"Returns",
":",
"spikified_e",
":",
"a",
"list",
"of",
"length",
"b",
"of",
"the",
"data",
"represented",
"as",
"spikes",
"sampled",
"from",
"the",
"underlying",
"poisson",
"process",
"."
] | def spikify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process.
"""
E = len(data_e)
spikes_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
data_s = np.zeros([N,T]).astype(np.int)
for n in range(N):
f = data[n,:]
s = rng.poisson(f*max_firing_rate*dt, size=T)
data_s[n,:] = s
spikes_e.append(data_s)
return spikes_e | [
"def",
"spikify_data",
"(",
"data_e",
",",
"rng",
",",
"dt",
"=",
"1.0",
",",
"max_firing_rate",
"=",
"100",
")",
":",
"E",
"=",
"len",
"(",
"data_e",
")",
"spikes_e",
"=",
"[",
"]",
"for",
"e",
"in",
"range",
"(",
"E",
")",
":",
"data",
"=",
"data_e",
"[",
"e",
"]",
"N",
",",
"T",
"=",
"data",
".",
"shape",
"data_s",
"=",
"np",
".",
"zeros",
"(",
"[",
"N",
",",
"T",
"]",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"for",
"n",
"in",
"range",
"(",
"N",
")",
":",
"f",
"=",
"data",
"[",
"n",
",",
":",
"]",
"s",
"=",
"rng",
".",
"poisson",
"(",
"f",
"*",
"max_firing_rate",
"*",
"dt",
",",
"size",
"=",
"T",
")",
"data_s",
"[",
"n",
",",
":",
"]",
"=",
"s",
"spikes_e",
".",
"append",
"(",
"data_s",
")",
"return",
"spikes_e"
] | https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/tensorflow_dl_models/research/lfads/synth_data/synthetic_data_utils.py#L128-L151 |
|
nosmokingbandit/watcher | dadacd21a5790ee609058a98a17fcc8954d24439 | lib/sqlalchemy/orm/strategy_options.py | python | subqueryload | (loadopt, attr) | return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"}) | Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
query(Order).options(subqueryload(Order.items).subqueryload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
query(Order).options(lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:func:`.orm.joinedload`
:func:`.orm.lazyload`
:paramref:`.relationship.lazy` | Indicate that the given attribute should be loaded using
subquery eager loading. | [
"Indicate",
"that",
"the",
"given",
"attribute",
"should",
"be",
"loaded",
"using",
"subquery",
"eager",
"loading",
"."
] | def subqueryload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
query(Order).options(subqueryload(Order.items).subqueryload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
query(Order).options(lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:func:`.orm.joinedload`
:func:`.orm.lazyload`
:paramref:`.relationship.lazy`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"}) | [
"def",
"subqueryload",
"(",
"loadopt",
",",
"attr",
")",
":",
"return",
"loadopt",
".",
"set_relationship_strategy",
"(",
"attr",
",",
"{",
"\"lazy\"",
":",
"\"subquery\"",
"}",
")"
] | https://github.com/nosmokingbandit/watcher/blob/dadacd21a5790ee609058a98a17fcc8954d24439/lib/sqlalchemy/orm/strategy_options.py#L770-L801 |
|
gem/oq-engine | 1bdb88f3914e390abcbd285600bfd39477aae47c | openquake/calculators/base.py | python | create_gmf_data | (dstore, prim_imts, sec_imts=(), data=None) | Create and possibly populate the datasets in the gmf_data group | Create and possibly populate the datasets in the gmf_data group | [
"Create",
"and",
"possibly",
"populate",
"the",
"datasets",
"in",
"the",
"gmf_data",
"group"
] | def create_gmf_data(dstore, prim_imts, sec_imts=(), data=None):
"""
Create and possibly populate the datasets in the gmf_data group
"""
oq = dstore['oqparam']
R = dstore['full_lt'].get_num_rlzs()
M = len(prim_imts)
n = 0 if data is None else len(data['sid'])
items = [('sid', U32 if n == 0 else data['sid']),
('eid', U32 if n == 0 else data['eid'])]
for m in range(M):
col = f'gmv_{m}'
items.append((col, F32 if data is None else data[col]))
for imt in sec_imts:
items.append((str(imt), F32 if n == 0 else data[imt]))
if oq.investigation_time:
eff_time = oq.investigation_time * oq.ses_per_logic_tree_path * R
else:
eff_time = 0
dstore.create_df('gmf_data', items, 'gzip')
dstore.set_attrs('gmf_data', num_events=len(dstore['events']),
imts=' '.join(map(str, prim_imts)),
effective_time=eff_time)
if data is not None:
df = pandas.DataFrame(dict(items))
avg_gmf = numpy.zeros((2, n, M + len(sec_imts)), F32)
for sid, df in df.groupby(df.sid):
df.pop('eid')
df.pop('sid')
avg_gmf[:, sid] = stats.avg_std(df.to_numpy())
dstore['avg_gmf'] = avg_gmf | [
"def",
"create_gmf_data",
"(",
"dstore",
",",
"prim_imts",
",",
"sec_imts",
"=",
"(",
")",
",",
"data",
"=",
"None",
")",
":",
"oq",
"=",
"dstore",
"[",
"'oqparam'",
"]",
"R",
"=",
"dstore",
"[",
"'full_lt'",
"]",
".",
"get_num_rlzs",
"(",
")",
"M",
"=",
"len",
"(",
"prim_imts",
")",
"n",
"=",
"0",
"if",
"data",
"is",
"None",
"else",
"len",
"(",
"data",
"[",
"'sid'",
"]",
")",
"items",
"=",
"[",
"(",
"'sid'",
",",
"U32",
"if",
"n",
"==",
"0",
"else",
"data",
"[",
"'sid'",
"]",
")",
",",
"(",
"'eid'",
",",
"U32",
"if",
"n",
"==",
"0",
"else",
"data",
"[",
"'eid'",
"]",
")",
"]",
"for",
"m",
"in",
"range",
"(",
"M",
")",
":",
"col",
"=",
"f'gmv_{m}'",
"items",
".",
"append",
"(",
"(",
"col",
",",
"F32",
"if",
"data",
"is",
"None",
"else",
"data",
"[",
"col",
"]",
")",
")",
"for",
"imt",
"in",
"sec_imts",
":",
"items",
".",
"append",
"(",
"(",
"str",
"(",
"imt",
")",
",",
"F32",
"if",
"n",
"==",
"0",
"else",
"data",
"[",
"imt",
"]",
")",
")",
"if",
"oq",
".",
"investigation_time",
":",
"eff_time",
"=",
"oq",
".",
"investigation_time",
"*",
"oq",
".",
"ses_per_logic_tree_path",
"*",
"R",
"else",
":",
"eff_time",
"=",
"0",
"dstore",
".",
"create_df",
"(",
"'gmf_data'",
",",
"items",
",",
"'gzip'",
")",
"dstore",
".",
"set_attrs",
"(",
"'gmf_data'",
",",
"num_events",
"=",
"len",
"(",
"dstore",
"[",
"'events'",
"]",
")",
",",
"imts",
"=",
"' '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"prim_imts",
")",
")",
",",
"effective_time",
"=",
"eff_time",
")",
"if",
"data",
"is",
"not",
"None",
":",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"dict",
"(",
"items",
")",
")",
"avg_gmf",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"2",
",",
"n",
",",
"M",
"+",
"len",
"(",
"sec_imts",
")",
")",
",",
"F32",
")",
"for",
"sid",
",",
"df",
"in",
"df",
".",
"groupby",
"(",
"df",
".",
"sid",
")",
":",
"df",
".",
"pop",
"(",
"'eid'",
")",
"df",
".",
"pop",
"(",
"'sid'",
")",
"avg_gmf",
"[",
":",
",",
"sid",
"]",
"=",
"stats",
".",
"avg_std",
"(",
"df",
".",
"to_numpy",
"(",
")",
")",
"dstore",
"[",
"'avg_gmf'",
"]",
"=",
"avg_gmf"
] | https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/calculators/base.py#L1141-L1171 |
||
linxid/Machine_Learning_Study_Path | 558e82d13237114bbb8152483977806fc0c222af | Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/ipaddress.py | python | _BaseNetwork.hostmask | (self) | return x | [] | def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x | [
"def",
"hostmask",
"(",
"self",
")",
":",
"x",
"=",
"self",
".",
"_cache",
".",
"get",
"(",
"'hostmask'",
")",
"if",
"x",
"is",
"None",
":",
"x",
"=",
"self",
".",
"_address_class",
"(",
"int",
"(",
"self",
".",
"netmask",
")",
"^",
"self",
".",
"_ALL_ONES",
")",
"self",
".",
"_cache",
"[",
"'hostmask'",
"]",
"=",
"x",
"return",
"x"
] | https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/ipaddress.py#L826-L831 |
|||
dropbox/dropbox-sdk-python | 015437429be224732990041164a21a0501235db1 | dropbox/base.py | python | DropboxBase.sharing_create_shared_link | (self,
path,
short_url=False,
pending_upload=None) | return r | Create a shared link. If a shared link already exists for the given
path, that link is returned. Previously, it was technically possible to
break a shared link by moving or renaming the corresponding file or
folder. In the future, this will no longer be the case, so your app
shouldn't rely on this behavior. Instead, if your app needs to revoke a
shared link, use :meth:`sharing_revoke_shared_link`.
:param str path: The path to share.
:type short_url: bool
:param Nullable[:class:`dropbox.sharing.PendingUploadMode`]
pending_upload: If it's okay to share a path that does not yet
exist, set this to either ``PendingUploadMode.file`` or
``PendingUploadMode.folder`` to indicate whether to assume it's a
file or folder.
:rtype: :class:`dropbox.sharing.PathLinkMetadata`
:raises: :class:`.exceptions.ApiError`
If this raises, ApiError will contain:
:class:`dropbox.sharing.CreateSharedLinkError` | Create a shared link. If a shared link already exists for the given
path, that link is returned. Previously, it was technically possible to
break a shared link by moving or renaming the corresponding file or
folder. In the future, this will no longer be the case, so your app
shouldn't rely on this behavior. Instead, if your app needs to revoke a
shared link, use :meth:`sharing_revoke_shared_link`. | [
"Create",
"a",
"shared",
"link",
".",
"If",
"a",
"shared",
"link",
"already",
"exists",
"for",
"the",
"given",
"path",
"that",
"link",
"is",
"returned",
".",
"Previously",
"it",
"was",
"technically",
"possible",
"to",
"break",
"a",
"shared",
"link",
"by",
"moving",
"or",
"renaming",
"the",
"corresponding",
"file",
"or",
"folder",
".",
"In",
"the",
"future",
"this",
"will",
"no",
"longer",
"be",
"the",
"case",
"so",
"your",
"app",
"shouldn",
"t",
"rely",
"on",
"this",
"behavior",
".",
"Instead",
"if",
"your",
"app",
"needs",
"to",
"revoke",
"a",
"shared",
"link",
"use",
":",
"meth",
":",
"sharing_revoke_shared_link",
"."
] | def sharing_create_shared_link(self,
path,
short_url=False,
pending_upload=None):
"""
Create a shared link. If a shared link already exists for the given
path, that link is returned. Previously, it was technically possible to
break a shared link by moving or renaming the corresponding file or
folder. In the future, this will no longer be the case, so your app
shouldn't rely on this behavior. Instead, if your app needs to revoke a
shared link, use :meth:`sharing_revoke_shared_link`.
:param str path: The path to share.
:type short_url: bool
:param Nullable[:class:`dropbox.sharing.PendingUploadMode`]
pending_upload: If it's okay to share a path that does not yet
exist, set this to either ``PendingUploadMode.file`` or
``PendingUploadMode.folder`` to indicate whether to assume it's a
file or folder.
:rtype: :class:`dropbox.sharing.PathLinkMetadata`
:raises: :class:`.exceptions.ApiError`
If this raises, ApiError will contain:
:class:`dropbox.sharing.CreateSharedLinkError`
"""
warnings.warn(
'create_shared_link is deprecated. Use create_shared_link_with_settings.',
DeprecationWarning,
)
arg = sharing.CreateSharedLinkArg(path,
short_url,
pending_upload)
r = self.request(
sharing.create_shared_link,
'sharing',
arg,
None,
)
return r | [
"def",
"sharing_create_shared_link",
"(",
"self",
",",
"path",
",",
"short_url",
"=",
"False",
",",
"pending_upload",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"'create_shared_link is deprecated. Use create_shared_link_with_settings.'",
",",
"DeprecationWarning",
",",
")",
"arg",
"=",
"sharing",
".",
"CreateSharedLinkArg",
"(",
"path",
",",
"short_url",
",",
"pending_upload",
")",
"r",
"=",
"self",
".",
"request",
"(",
"sharing",
".",
"create_shared_link",
",",
"'sharing'",
",",
"arg",
",",
"None",
",",
")",
"return",
"r"
] | https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/base.py#L4071-L4109 |
|
cisco/mindmeld | 809c36112e9ea8019fe29d54d136ca14eb4fd8db | mindmeld/system_entity_recognizer.py | python | SystemEntityRecognizer.load_from_app_path | (app_path) | If the application configuration is empty, we do not use Duckling.
Otherwise, we return the Duckling recognizer with the URL defined in the application's
config, default to the DEFAULT_DUCKLING_URL.
Args:
app_path (str): Application path
Returns:
(SystemEntityRecognizer) | If the application configuration is empty, we do not use Duckling. | [
"If",
"the",
"application",
"configuration",
"is",
"empty",
"we",
"do",
"not",
"use",
"Duckling",
"."
] | def load_from_app_path(app_path):
"""If the application configuration is empty, we do not use Duckling.
Otherwise, we return the Duckling recognizer with the URL defined in the application's
config, default to the DEFAULT_DUCKLING_URL.
Args:
app_path (str): Application path
Returns:
(SystemEntityRecognizer)
"""
if not app_path:
raise SystemEntityError(
"App path must be valid to load entity recognizer config."
)
if is_duckling_configured(app_path):
url = get_system_entity_url_config(app_path=app_path)
return DucklingRecognizer.get_instance(url)
else:
return NoOpSystemEntityRecognizer.get_instance() | [
"def",
"load_from_app_path",
"(",
"app_path",
")",
":",
"if",
"not",
"app_path",
":",
"raise",
"SystemEntityError",
"(",
"\"App path must be valid to load entity recognizer config.\"",
")",
"if",
"is_duckling_configured",
"(",
"app_path",
")",
":",
"url",
"=",
"get_system_entity_url_config",
"(",
"app_path",
"=",
"app_path",
")",
"return",
"DucklingRecognizer",
".",
"get_instance",
"(",
"url",
")",
"else",
":",
"return",
"NoOpSystemEntityRecognizer",
".",
"get_instance",
"(",
")"
] | https://github.com/cisco/mindmeld/blob/809c36112e9ea8019fe29d54d136ca14eb4fd8db/mindmeld/system_entity_recognizer.py#L118-L139 |
||
Delta-ML/delta | 31dfebc8f20b7cb282b62f291ff25a87e403cc86 | delta/utils/solver/utils/callbacks.py | python | ParallelModelCheckpoint.__init__ | (self,
model,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
load_weights_on_restart=False,
period=1) | [] | def __init__(self,
model,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
load_weights_on_restart=False,
period=1):
self.model_to_save = model
super().__init__(
filepath=filepath,
monitor=monitor,
verbose=verbose,
save_best_only=save_best_only,
save_weights_only=save_weights_only,
mode=mode,
save_freq=save_freq,
load_weights_on_restart=load_weights_on_restart,
period=period) | [
"def",
"__init__",
"(",
"self",
",",
"model",
",",
"filepath",
",",
"monitor",
"=",
"'val_loss'",
",",
"verbose",
"=",
"0",
",",
"save_best_only",
"=",
"False",
",",
"save_weights_only",
"=",
"False",
",",
"mode",
"=",
"'auto'",
",",
"save_freq",
"=",
"'epoch'",
",",
"load_weights_on_restart",
"=",
"False",
",",
"period",
"=",
"1",
")",
":",
"self",
".",
"model_to_save",
"=",
"model",
"super",
"(",
")",
".",
"__init__",
"(",
"filepath",
"=",
"filepath",
",",
"monitor",
"=",
"monitor",
",",
"verbose",
"=",
"verbose",
",",
"save_best_only",
"=",
"save_best_only",
",",
"save_weights_only",
"=",
"save_weights_only",
",",
"mode",
"=",
"mode",
",",
"save_freq",
"=",
"save_freq",
",",
"load_weights_on_restart",
"=",
"load_weights_on_restart",
",",
"period",
"=",
"period",
")"
] | https://github.com/Delta-ML/delta/blob/31dfebc8f20b7cb282b62f291ff25a87e403cc86/delta/utils/solver/utils/callbacks.py#L160-L181 |
||||
vmware/vsphere-automation-sdk-python | ba7d4e0742f58a641dfed9538ecbbb1db4f3891e | samples/vmc/draas/site_recovery_activation_ops.py | python | SiteRecoveryActivationOperations.deactivate_srm | (self) | [] | def deactivate_srm(self):
if self.cleanup:
try:
srm_deactivation_task = self.vmc_client.draas.SiteRecovery.delete(self.org_id,
self.sddc_id,
force=True)
except InvalidRequest as e:
# Convert InvalidRequest to ErrorResponse to get error message
error_response = e.data.convert_to(ErrorResponse)
raise Exception(error_response.error_messages)
wait_for_task(task_client=self.vmc_client.draas.Task,
org_id=self.org_id,
task_id=srm_deactivation_task.id,
interval_sec=self.interval_sec) | [
"def",
"deactivate_srm",
"(",
"self",
")",
":",
"if",
"self",
".",
"cleanup",
":",
"try",
":",
"srm_deactivation_task",
"=",
"self",
".",
"vmc_client",
".",
"draas",
".",
"SiteRecovery",
".",
"delete",
"(",
"self",
".",
"org_id",
",",
"self",
".",
"sddc_id",
",",
"force",
"=",
"True",
")",
"except",
"InvalidRequest",
"as",
"e",
":",
"# Convert InvalidRequest to ErrorResponse to get error message",
"error_response",
"=",
"e",
".",
"data",
".",
"convert_to",
"(",
"ErrorResponse",
")",
"raise",
"Exception",
"(",
"error_response",
".",
"error_messages",
")",
"wait_for_task",
"(",
"task_client",
"=",
"self",
".",
"vmc_client",
".",
"draas",
".",
"Task",
",",
"org_id",
"=",
"self",
".",
"org_id",
",",
"task_id",
"=",
"srm_deactivation_task",
".",
"id",
",",
"interval_sec",
"=",
"self",
".",
"interval_sec",
")"
] | https://github.com/vmware/vsphere-automation-sdk-python/blob/ba7d4e0742f58a641dfed9538ecbbb1db4f3891e/samples/vmc/draas/site_recovery_activation_ops.py#L81-L95 |
||||
chainer/chainer-chemistry | efe323aa21f63a815130d673781e7cca1ccb72d2 | chainer_chemistry/dataset/networkx_preprocessors/reddit_coo.py | python | get_reddit_coo_data | (dirpath) | return PaddingGraphData(
x=reddit_data['feature'].astype(numpy.float32),
adj=adj,
y=reddit_data['label'].astype(numpy.int32),
label_num=41
) | Temporary function to obtain reddit coo data for GIN
(because it takes to much time to convert it to networkx)
Returns:
PaddingGraphData: `PaddingGraphData` of reddit | Temporary function to obtain reddit coo data for GIN | [
"Temporary",
"function",
"to",
"obtain",
"reddit",
"coo",
"data",
"for",
"GIN"
] | def get_reddit_coo_data(dirpath):
"""Temporary function to obtain reddit coo data for GIN
(because it takes to much time to convert it to networkx)
Returns:
PaddingGraphData: `PaddingGraphData` of reddit
"""
print("Loading node feature and label")
reddit_data = numpy.load(os.path.join(dirpath, "reddit_data.npz"))
print("Loading edge data")
coo_adj = scipy.sparse.load_npz(os.path.join(dirpath, "reddit_graph.npz"))
row = coo_adj.row.astype(numpy.int32)
col = coo_adj.col.astype(numpy.int32)
data = coo_adj.data.astype(numpy.float32)
# ensure row is sorted
if not numpy.all(row[:-1] <= row[1:]):
order = numpy.argsort(row)
row = row[order]
col = col[order]
assert numpy.all(row[:-1] <= row[1:])
adj = chainer.utils.CooMatrix(
data=data, row=row, col=col,
shape=coo_adj.shape,
order='C')
return PaddingGraphData(
x=reddit_data['feature'].astype(numpy.float32),
adj=adj,
y=reddit_data['label'].astype(numpy.int32),
label_num=41
) | [
"def",
"get_reddit_coo_data",
"(",
"dirpath",
")",
":",
"print",
"(",
"\"Loading node feature and label\"",
")",
"reddit_data",
"=",
"numpy",
".",
"load",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"\"reddit_data.npz\"",
")",
")",
"print",
"(",
"\"Loading edge data\"",
")",
"coo_adj",
"=",
"scipy",
".",
"sparse",
".",
"load_npz",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"\"reddit_graph.npz\"",
")",
")",
"row",
"=",
"coo_adj",
".",
"row",
".",
"astype",
"(",
"numpy",
".",
"int32",
")",
"col",
"=",
"coo_adj",
".",
"col",
".",
"astype",
"(",
"numpy",
".",
"int32",
")",
"data",
"=",
"coo_adj",
".",
"data",
".",
"astype",
"(",
"numpy",
".",
"float32",
")",
"# ensure row is sorted",
"if",
"not",
"numpy",
".",
"all",
"(",
"row",
"[",
":",
"-",
"1",
"]",
"<=",
"row",
"[",
"1",
":",
"]",
")",
":",
"order",
"=",
"numpy",
".",
"argsort",
"(",
"row",
")",
"row",
"=",
"row",
"[",
"order",
"]",
"col",
"=",
"col",
"[",
"order",
"]",
"assert",
"numpy",
".",
"all",
"(",
"row",
"[",
":",
"-",
"1",
"]",
"<=",
"row",
"[",
"1",
":",
"]",
")",
"adj",
"=",
"chainer",
".",
"utils",
".",
"CooMatrix",
"(",
"data",
"=",
"data",
",",
"row",
"=",
"row",
",",
"col",
"=",
"col",
",",
"shape",
"=",
"coo_adj",
".",
"shape",
",",
"order",
"=",
"'C'",
")",
"return",
"PaddingGraphData",
"(",
"x",
"=",
"reddit_data",
"[",
"'feature'",
"]",
".",
"astype",
"(",
"numpy",
".",
"float32",
")",
",",
"adj",
"=",
"adj",
",",
"y",
"=",
"reddit_data",
"[",
"'label'",
"]",
".",
"astype",
"(",
"numpy",
".",
"int32",
")",
",",
"label_num",
"=",
"41",
")"
] | https://github.com/chainer/chainer-chemistry/blob/efe323aa21f63a815130d673781e7cca1ccb72d2/chainer_chemistry/dataset/networkx_preprocessors/reddit_coo.py#L11-L46 |
|
Symbo1/wsltools | 0b6e536fc85c707a1c81f0296c4e91ca835396a1 | wsltools/utils/faker/providers/address/fr_FR/__init__.py | python | Provider.street_prefix | (self) | return self.random_element(self.street_prefixes) | :example 'rue' | :example 'rue' | [
":",
"example",
"rue"
] | def street_prefix(self):
"""
:example 'rue'
"""
return self.random_element(self.street_prefixes) | [
"def",
"street_prefix",
"(",
"self",
")",
":",
"return",
"self",
".",
"random_element",
"(",
"self",
".",
"street_prefixes",
")"
] | https://github.com/Symbo1/wsltools/blob/0b6e536fc85c707a1c81f0296c4e91ca835396a1/wsltools/utils/faker/providers/address/fr_FR/__init__.py#L141-L145 |
|
chribsen/simple-machine-learning-examples | dc94e52a4cebdc8bb959ff88b81ff8cfeca25022 | venv/lib/python2.7/site-packages/numpy/ma/core.py | python | asanyarray | (a, dtype=None) | return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) | Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'> | Convert the input to a masked array, conserving subclasses. | [
"Convert",
"the",
"input",
"to",
"a",
"masked",
"array",
"conserving",
"subclasses",
"."
] | def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) | [
"def",
"asanyarray",
"(",
"a",
",",
"dtype",
"=",
"None",
")",
":",
"return",
"masked_array",
"(",
"a",
",",
"dtype",
"=",
"dtype",
",",
"copy",
"=",
"False",
",",
"keep_mask",
"=",
"True",
",",
"subok",
"=",
"True",
")"
] | https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/numpy/ma/core.py#L7566-L7609 |
|
mesalock-linux/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | lib-python/2.7/mailbox.py | python | MH.get_message | (self, key) | return msg | Return a Message representation or raise a KeyError. | Return a Message representation or raise a KeyError. | [
"Return",
"a",
"Message",
"representation",
"or",
"raise",
"a",
"KeyError",
"."
] | def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
for name, key_list in self.get_sequences().iteritems():
if key in key_list:
msg.add_sequence(name)
return msg | [
"def",
"get_message",
"(",
"self",
",",
"key",
")",
":",
"try",
":",
"if",
"self",
".",
"_locked",
":",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_path",
",",
"str",
"(",
"key",
")",
")",
",",
"'r+'",
")",
"else",
":",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_path",
",",
"str",
"(",
"key",
")",
")",
",",
"'r'",
")",
"except",
"IOError",
",",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"raise",
"KeyError",
"(",
"'No message with key: %s'",
"%",
"key",
")",
"else",
":",
"raise",
"try",
":",
"if",
"self",
".",
"_locked",
":",
"_lock_file",
"(",
"f",
")",
"try",
":",
"msg",
"=",
"MHMessage",
"(",
"f",
")",
"finally",
":",
"if",
"self",
".",
"_locked",
":",
"_unlock_file",
"(",
"f",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"for",
"name",
",",
"key_list",
"in",
"self",
".",
"get_sequences",
"(",
")",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"in",
"key_list",
":",
"msg",
".",
"add_sequence",
"(",
"name",
")",
"return",
"msg"
] | https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/lib-python/2.7/mailbox.py#L1004-L1029 |
|
seppius-xbmc-repo/ru | d0879d56ec8243b2c7af44fda5cf3d1ff77fd2e2 | plugin.video.torrent.gnu/resources/lib/torr2xbmc.py | python | stream | (params) | [] | def stream (params):
torr_link='f4a94963c11a47f213b145697f494b5fc5485b02'
TSplayer=tsengine()
out=TSplayer.load_torrent(torr_link,'INFOHASH',port=aceport)
if out=='Ok':
TSplayer.play_url_ind(0,'stream',None)
TSplayer.end() | [
"def",
"stream",
"(",
"params",
")",
":",
"torr_link",
"=",
"'f4a94963c11a47f213b145697f494b5fc5485b02'",
"TSplayer",
"=",
"tsengine",
"(",
")",
"out",
"=",
"TSplayer",
".",
"load_torrent",
"(",
"torr_link",
",",
"'INFOHASH'",
",",
"port",
"=",
"aceport",
")",
"if",
"out",
"==",
"'Ok'",
":",
"TSplayer",
".",
"play_url_ind",
"(",
"0",
",",
"'stream'",
",",
"None",
")",
"TSplayer",
".",
"end",
"(",
")"
] | https://github.com/seppius-xbmc-repo/ru/blob/d0879d56ec8243b2c7af44fda5cf3d1ff77fd2e2/plugin.video.torrent.gnu/resources/lib/torr2xbmc.py#L112-L118 |
||||
plaid/plaid-python | 8c60fca608e426f3ff30da8857775946d29e122c | plaid/model/payment_initiation_optional_restriction_bacs.py | python | PaymentInitiationOptionalRestrictionBacs.openapi_types | () | return {
'account': (str,), # noqa: E501
'sort_code': (str,), # noqa: E501
} | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type. | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded | [
"This",
"must",
"be",
"a",
"method",
"because",
"a",
"model",
"may",
"have",
"properties",
"that",
"are",
"of",
"type",
"self",
"this",
"must",
"run",
"after",
"the",
"class",
"is",
"loaded"
] | def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'account': (str,), # noqa: E501
'sort_code': (str,), # noqa: E501
} | [
"def",
"openapi_types",
"(",
")",
":",
"lazy_import",
"(",
")",
"return",
"{",
"'account'",
":",
"(",
"str",
",",
")",
",",
"# noqa: E501",
"'sort_code'",
":",
"(",
"str",
",",
")",
",",
"# noqa: E501",
"}"
] | https://github.com/plaid/plaid-python/blob/8c60fca608e426f3ff30da8857775946d29e122c/plaid/model/payment_initiation_optional_restriction_bacs.py#L82-L95 |
|
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_hxb2/lib/python3.5/site-packages/django/utils/feedgenerator.py | python | SyndicationFeed.root_attributes | (self) | return {} | Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write(). | Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write(). | [
"Return",
"extra",
"attributes",
"to",
"place",
"on",
"the",
"root",
"(",
"i",
".",
"e",
".",
"feed",
"/",
"channel",
")",
"element",
".",
"Called",
"from",
"write",
"()",
"."
] | def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {} | [
"def",
"root_attributes",
"(",
"self",
")",
":",
"return",
"{",
"}"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/utils/feedgenerator.py#L170-L175 |
|
CGATOxford/cgat | 326aad4694bdfae8ddc194171bb5d73911243947 | CGAT/Genomics.py | python | CalculatePairIndices | (seq1, seq2, gap_char="-", with_codons=False) | return result | returns number of idential and transitions/transversions substitutions
in the alignment.
If with-codons = True, synonymous and nonsynonymous changes will
be recorded as well. The routine assumes no frame-shifts and will
count more than one change as non-synonymous. | returns number of idential and transitions/transversions substitutions
in the alignment. | [
"returns",
"number",
"of",
"idential",
"and",
"transitions",
"/",
"transversions",
"substitutions",
"in",
"the",
"alignment",
"."
] | def CalculatePairIndices(seq1, seq2, gap_char="-", with_codons=False):
"""returns number of idential and transitions/transversions substitutions
in the alignment.
If with-codons = True, synonymous and nonsynonymous changes will
be recorded as well. The routine assumes no frame-shifts and will
count more than one change as non-synonymous.
"""
alphabet = "ACGT" + gap_char
map_char2pos = {}
for x in alphabet:
map_char2pos[x] = len(map_char2pos)
# build coordinates for various substitution subsets
transitions, transversions = [], []
for x in ("AG", "GA", "CT", "TC"):
transitions.append((map_char2pos[x[0]], map_char2pos[x[1]]))
for x in ("AT", "TA", "GT", "TG", "GC", "CG", "AC", "CA"):
transversions.append((map_char2pos[x[0]], map_char2pos[x[1]]))
matrix = AlignedPair2SubstitutionMatrix(seq1, seq2, alphabet)
matrix_acgt = matrix[0:4, 0:4]
if with_codons:
result = SequencePairInfoCodons()
else:
result = SequencePairInfo()
result.mMatrix = matrix
result.mMapChar2Pos = map_char2pos
result.mNAligned = numpy.sum(numpy.ravel(matrix_acgt))
result.mNIdentical = numpy.sum(numpy.trace(matrix_acgt))
result.mNTransitions = numpy.sum([matrix[x] for x in transitions])
result.mNTransversions = numpy.sum([matrix[x] for x in transversions])
result.mNDifferent = result.mNAligned - result.mNIdentical
result.mNUnaligned1 = numpy.sum(numpy.ravel(matrix[0:4, 4]))
result.mNUnaligned2 = numpy.sum(numpy.ravel(matrix[4, 0:4]))
if with_codons:
nsyn, nnon = 0, 0
pairs = list(zip(seq1, seq2))
for x in range(len(pairs)):
a, b = pairs[x]
if a != b:
l = (x // 3) * 3
c1 = MapCodon2AA(seq1[l:l + 3])
c2 = MapCodon2AA(seq2[l:l + 3])
if c1 == GAP_CHAR or c2 == GAP_CHAR:
continue
# print x, a, b, l, c1, c2, seq1[l:l+3], seq2[l:l+3], c1 == c2
if c1 == c2:
nsyn += 1
else:
nnon += 1
result.mNSynonymous = nsyn
result.mNNonSynonymous = nnon
return result | [
"def",
"CalculatePairIndices",
"(",
"seq1",
",",
"seq2",
",",
"gap_char",
"=",
"\"-\"",
",",
"with_codons",
"=",
"False",
")",
":",
"alphabet",
"=",
"\"ACGT\"",
"+",
"gap_char",
"map_char2pos",
"=",
"{",
"}",
"for",
"x",
"in",
"alphabet",
":",
"map_char2pos",
"[",
"x",
"]",
"=",
"len",
"(",
"map_char2pos",
")",
"# build coordinates for various substitution subsets",
"transitions",
",",
"transversions",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"x",
"in",
"(",
"\"AG\"",
",",
"\"GA\"",
",",
"\"CT\"",
",",
"\"TC\"",
")",
":",
"transitions",
".",
"append",
"(",
"(",
"map_char2pos",
"[",
"x",
"[",
"0",
"]",
"]",
",",
"map_char2pos",
"[",
"x",
"[",
"1",
"]",
"]",
")",
")",
"for",
"x",
"in",
"(",
"\"AT\"",
",",
"\"TA\"",
",",
"\"GT\"",
",",
"\"TG\"",
",",
"\"GC\"",
",",
"\"CG\"",
",",
"\"AC\"",
",",
"\"CA\"",
")",
":",
"transversions",
".",
"append",
"(",
"(",
"map_char2pos",
"[",
"x",
"[",
"0",
"]",
"]",
",",
"map_char2pos",
"[",
"x",
"[",
"1",
"]",
"]",
")",
")",
"matrix",
"=",
"AlignedPair2SubstitutionMatrix",
"(",
"seq1",
",",
"seq2",
",",
"alphabet",
")",
"matrix_acgt",
"=",
"matrix",
"[",
"0",
":",
"4",
",",
"0",
":",
"4",
"]",
"if",
"with_codons",
":",
"result",
"=",
"SequencePairInfoCodons",
"(",
")",
"else",
":",
"result",
"=",
"SequencePairInfo",
"(",
")",
"result",
".",
"mMatrix",
"=",
"matrix",
"result",
".",
"mMapChar2Pos",
"=",
"map_char2pos",
"result",
".",
"mNAligned",
"=",
"numpy",
".",
"sum",
"(",
"numpy",
".",
"ravel",
"(",
"matrix_acgt",
")",
")",
"result",
".",
"mNIdentical",
"=",
"numpy",
".",
"sum",
"(",
"numpy",
".",
"trace",
"(",
"matrix_acgt",
")",
")",
"result",
".",
"mNTransitions",
"=",
"numpy",
".",
"sum",
"(",
"[",
"matrix",
"[",
"x",
"]",
"for",
"x",
"in",
"transitions",
"]",
")",
"result",
".",
"mNTransversions",
"=",
"numpy",
".",
"sum",
"(",
"[",
"matrix",
"[",
"x",
"]",
"for",
"x",
"in",
"transversions",
"]",
")",
"result",
".",
"mNDifferent",
"=",
"result",
".",
"mNAligned",
"-",
"result",
".",
"mNIdentical",
"result",
".",
"mNUnaligned1",
"=",
"numpy",
".",
"sum",
"(",
"numpy",
".",
"ravel",
"(",
"matrix",
"[",
"0",
":",
"4",
",",
"4",
"]",
")",
")",
"result",
".",
"mNUnaligned2",
"=",
"numpy",
".",
"sum",
"(",
"numpy",
".",
"ravel",
"(",
"matrix",
"[",
"4",
",",
"0",
":",
"4",
"]",
")",
")",
"if",
"with_codons",
":",
"nsyn",
",",
"nnon",
"=",
"0",
",",
"0",
"pairs",
"=",
"list",
"(",
"zip",
"(",
"seq1",
",",
"seq2",
")",
")",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"pairs",
")",
")",
":",
"a",
",",
"b",
"=",
"pairs",
"[",
"x",
"]",
"if",
"a",
"!=",
"b",
":",
"l",
"=",
"(",
"x",
"//",
"3",
")",
"*",
"3",
"c1",
"=",
"MapCodon2AA",
"(",
"seq1",
"[",
"l",
":",
"l",
"+",
"3",
"]",
")",
"c2",
"=",
"MapCodon2AA",
"(",
"seq2",
"[",
"l",
":",
"l",
"+",
"3",
"]",
")",
"if",
"c1",
"==",
"GAP_CHAR",
"or",
"c2",
"==",
"GAP_CHAR",
":",
"continue",
"# print x, a, b, l, c1, c2, seq1[l:l+3], seq2[l:l+3], c1 == c2",
"if",
"c1",
"==",
"c2",
":",
"nsyn",
"+=",
"1",
"else",
":",
"nnon",
"+=",
"1",
"result",
".",
"mNSynonymous",
"=",
"nsyn",
"result",
".",
"mNNonSynonymous",
"=",
"nnon",
"return",
"result"
] | https://github.com/CGATOxford/cgat/blob/326aad4694bdfae8ddc194171bb5d73911243947/CGAT/Genomics.py#L1478-L1542 |
|
hongyuanmei/neurawkes | 808d3ec28c3aebd907c63616e133acd52380246f | run_models.py | python | test_neural_hawkes_ctsm_and_save_time | (input_test) | this function is called to test neural hawkes ctsm with time encoder | this function is called to test neural hawkes ctsm with time encoder | [
"this",
"function",
"is",
"called",
"to",
"test",
"neural",
"hawkes",
"ctsm",
"with",
"time",
"encoder"
] | def test_neural_hawkes_ctsm_and_save_time(input_test):
'''
this function is called to test neural hawkes ctsm with time encoder
'''
#TODO: pre-settings like random states
numpy.random.seed(
input_test['seed_random']
)
#
log_dict = {
'log_file': None,
'save_file_path': None,
'mode': 'create', 'compile_time': None,
'max_dev_log_likelihood': -1e6,
#
'args': input_test['args'],
#
'iteration': 0,
'track_period': None,
'max_epoch': input_test['max_epoch'],
'size_batch': input_test['size_batch'],
'tracked': {
'track_cnt': None,
'train_log_likelihood': None,
'dev_log_likelihood': None,
'train_log_likelihood_time': None,
'dev_log_likelihood_time': None,
'train_log_likelihood_type': None,
'dev_log_likelihood_type': None,
#
'train_time': None, 'dev_time': None
}
}
#TODO: get the data and process the data
print "reading and processing data ... "
data_process = data_processers.DataProcesser(
{
'path_rawdata': input_test['path_rawdata'],
'size_batch': input_test['size_batch'],
'ratio_train': numpy.float32(0.0),
'to_read': [input_test['tag_split']],
'partial_predict': input_test['partial_predict']
}
)
#
prune_stream = numpy.int32(
input_test['prune_stream']
)
assert(prune_stream >= 0)
if prune_stream > 0:
data_process.prune_stream(prune_stream)
#
#TODO: build the model
print "building model ... "
compile_start = time.time()
model_settings = {
'model': input_test['model'],
'dim_process': data_process.dim_process,
#
#'dim_time': data_process.dim_time,
#'dim_model': input_test['dim_model'],
#
'coef_l2': input_test['coef_l2'],
'size_batch': input_test['size_batch'],
'optimizer': input_test['optimizer'],
'path_pre_train': input_test['path_pre_train'],
'predict_lambda': input_test['predict_lambda']
}
control = controllers.ControlNeuralHawkesCTSM_time(
model_settings
)
compile_end = time.time()
compile_time = compile_end - compile_start
#'''
print "model finished, comilation time is ", round(compile_time, 0)
#TODO: start training, define the training functions
tag_split = input_test['tag_split']
for epi in range(log_dict['max_epoch']):
#
list_log_likelihood_seq = []
list_log_likelihood_type = []
list_log_likelihood_time = []
list_num_of_events = []
#
dev_start = time.time()
#
print "validating for ", tag_split
#
total_log_likelihood_dev = 0.0
total_log_likelihood_time_dev = 0.0
total_log_likelihood_type_dev = 0.0
total_num_of_events_dev = 0.0
#
for step_dev in range(data_process.max_nums[tag_split] ):
#
data_process.process_data(
tag_batch = tag_split,
idx_batch_current = step_dev,
tag_model = 'neural',
multiple = numpy.int32(
input_test['multiple_sample_for_dev']
),
predict_first = input_test['predict_first']
)
#
#print "training ... "
log_likelihood_numpy, log_likelihood_time_numpy, log_likelihood_type_numpy, num_of_events_numpy = control.model_dev(
#data_process.seq_time_to_end_numpy,
data_process.seq_time_to_current_numpy,
data_process.seq_type_event_numpy,
#data_process.seq_time_rep_numpy,
data_process.seq_time_values_numpy,
data_process.time_since_start_to_end_numpy,
data_process.num_sims_start_to_end_numpy,
data_process.seq_mask_numpy,
data_process.seq_sims_time_to_current_numpy,
data_process.seq_sims_index_in_hidden_numpy,
data_process.seq_sims_mask_numpy
)
#
list_log_likelihood_seq.append(
log_likelihood_numpy
)
list_log_likelihood_type.append(
log_likelihood_type_numpy
)
list_log_likelihood_time.append(
log_likelihood_time_numpy
)
list_num_of_events.append(
num_of_events_numpy
)
#
total_log_likelihood_dev += log_likelihood_numpy
total_log_likelihood_time_dev += log_likelihood_time_numpy
total_log_likelihood_type_dev += log_likelihood_type_numpy
total_num_of_events_dev += num_of_events_numpy
#
if step_dev % 100 == 99:
print "in validation, the step is out of ", step_dev, data_process.max_nums[tag_split]
#
log_likelihood_final = round(
total_log_likelihood_dev / total_num_of_events_dev, 4
)
log_likelihood_time_final = round(
total_log_likelihood_time_dev / total_num_of_events_dev, 4
)
log_likelihood_type_final = round(
total_log_likelihood_type_dev / total_num_of_events_dev, 4
)
#
dev_end = time.time()
#
# start saving stuff !!!
dict_results = {
'log_likelihood': {
'seq': log_likelihood_final,
'type': log_likelihood_type_final,
'time': log_likelihood_time_final
},
'model': input_test['path_pre_train'],
'data': input_test['path_rawdata'],
'tag_split': tag_split,
'lists': {
'log_likelihood_seq': list_log_likelihood_seq,
'log_likelihood_type': list_log_likelihood_type,
'log_likelihood_time': list_log_likelihood_time,
'num_of_events': list_num_of_events
}
}
#
#TODO: add more info about this model
log_org = organizers.LogOrg(
{
'path_tracks': os.path.abspath(
input_test['path_logs']
)
}
)
log_org.read_logs()
dict_log_org = log_org.get_one_log()
dict_results['log_info'] = dict_log_org
#
#
with open(input_test['file_to_save_results'], 'wb') as f:
pickle.dump(dict_results, f)
#
#
print "the model is : ", input_test['path_pre_train']
print "the dataset is : ", input_test['path_rawdata']
print "the tag split is : ", tag_split
print "the log-likelihood of seq is : ", log_likelihood_final
print "the log-likelihood of type is : ", log_likelihood_type_final
print "the log-likelihood of time is : ", log_likelihood_time_final
#
print "the three values in the order of seq / type / time : is " + str(log_likelihood_final) + ' / ' + str(log_likelihood_type_final) + ' / ' + str(log_likelihood_time_final)
#
print "finish testing and saving " | [
"def",
"test_neural_hawkes_ctsm_and_save_time",
"(",
"input_test",
")",
":",
"#TODO: pre-settings like random states",
"numpy",
".",
"random",
".",
"seed",
"(",
"input_test",
"[",
"'seed_random'",
"]",
")",
"#",
"log_dict",
"=",
"{",
"'log_file'",
":",
"None",
",",
"'save_file_path'",
":",
"None",
",",
"'mode'",
":",
"'create'",
",",
"'compile_time'",
":",
"None",
",",
"'max_dev_log_likelihood'",
":",
"-",
"1e6",
",",
"#",
"'args'",
":",
"input_test",
"[",
"'args'",
"]",
",",
"#",
"'iteration'",
":",
"0",
",",
"'track_period'",
":",
"None",
",",
"'max_epoch'",
":",
"input_test",
"[",
"'max_epoch'",
"]",
",",
"'size_batch'",
":",
"input_test",
"[",
"'size_batch'",
"]",
",",
"'tracked'",
":",
"{",
"'track_cnt'",
":",
"None",
",",
"'train_log_likelihood'",
":",
"None",
",",
"'dev_log_likelihood'",
":",
"None",
",",
"'train_log_likelihood_time'",
":",
"None",
",",
"'dev_log_likelihood_time'",
":",
"None",
",",
"'train_log_likelihood_type'",
":",
"None",
",",
"'dev_log_likelihood_type'",
":",
"None",
",",
"#",
"'train_time'",
":",
"None",
",",
"'dev_time'",
":",
"None",
"}",
"}",
"#TODO: get the data and process the data",
"print",
"\"reading and processing data ... \"",
"data_process",
"=",
"data_processers",
".",
"DataProcesser",
"(",
"{",
"'path_rawdata'",
":",
"input_test",
"[",
"'path_rawdata'",
"]",
",",
"'size_batch'",
":",
"input_test",
"[",
"'size_batch'",
"]",
",",
"'ratio_train'",
":",
"numpy",
".",
"float32",
"(",
"0.0",
")",
",",
"'to_read'",
":",
"[",
"input_test",
"[",
"'tag_split'",
"]",
"]",
",",
"'partial_predict'",
":",
"input_test",
"[",
"'partial_predict'",
"]",
"}",
")",
"#",
"prune_stream",
"=",
"numpy",
".",
"int32",
"(",
"input_test",
"[",
"'prune_stream'",
"]",
")",
"assert",
"(",
"prune_stream",
">=",
"0",
")",
"if",
"prune_stream",
">",
"0",
":",
"data_process",
".",
"prune_stream",
"(",
"prune_stream",
")",
"#",
"#TODO: build the model",
"print",
"\"building model ... \"",
"compile_start",
"=",
"time",
".",
"time",
"(",
")",
"model_settings",
"=",
"{",
"'model'",
":",
"input_test",
"[",
"'model'",
"]",
",",
"'dim_process'",
":",
"data_process",
".",
"dim_process",
",",
"#",
"#'dim_time': data_process.dim_time,",
"#'dim_model': input_test['dim_model'],",
"#",
"'coef_l2'",
":",
"input_test",
"[",
"'coef_l2'",
"]",
",",
"'size_batch'",
":",
"input_test",
"[",
"'size_batch'",
"]",
",",
"'optimizer'",
":",
"input_test",
"[",
"'optimizer'",
"]",
",",
"'path_pre_train'",
":",
"input_test",
"[",
"'path_pre_train'",
"]",
",",
"'predict_lambda'",
":",
"input_test",
"[",
"'predict_lambda'",
"]",
"}",
"control",
"=",
"controllers",
".",
"ControlNeuralHawkesCTSM_time",
"(",
"model_settings",
")",
"compile_end",
"=",
"time",
".",
"time",
"(",
")",
"compile_time",
"=",
"compile_end",
"-",
"compile_start",
"#'''",
"print",
"\"model finished, comilation time is \"",
",",
"round",
"(",
"compile_time",
",",
"0",
")",
"#TODO: start training, define the training functions",
"tag_split",
"=",
"input_test",
"[",
"'tag_split'",
"]",
"for",
"epi",
"in",
"range",
"(",
"log_dict",
"[",
"'max_epoch'",
"]",
")",
":",
"#",
"list_log_likelihood_seq",
"=",
"[",
"]",
"list_log_likelihood_type",
"=",
"[",
"]",
"list_log_likelihood_time",
"=",
"[",
"]",
"list_num_of_events",
"=",
"[",
"]",
"#",
"dev_start",
"=",
"time",
".",
"time",
"(",
")",
"#",
"print",
"\"validating for \"",
",",
"tag_split",
"#",
"total_log_likelihood_dev",
"=",
"0.0",
"total_log_likelihood_time_dev",
"=",
"0.0",
"total_log_likelihood_type_dev",
"=",
"0.0",
"total_num_of_events_dev",
"=",
"0.0",
"#",
"for",
"step_dev",
"in",
"range",
"(",
"data_process",
".",
"max_nums",
"[",
"tag_split",
"]",
")",
":",
"#",
"data_process",
".",
"process_data",
"(",
"tag_batch",
"=",
"tag_split",
",",
"idx_batch_current",
"=",
"step_dev",
",",
"tag_model",
"=",
"'neural'",
",",
"multiple",
"=",
"numpy",
".",
"int32",
"(",
"input_test",
"[",
"'multiple_sample_for_dev'",
"]",
")",
",",
"predict_first",
"=",
"input_test",
"[",
"'predict_first'",
"]",
")",
"#",
"#print \"training ... \"",
"log_likelihood_numpy",
",",
"log_likelihood_time_numpy",
",",
"log_likelihood_type_numpy",
",",
"num_of_events_numpy",
"=",
"control",
".",
"model_dev",
"(",
"#data_process.seq_time_to_end_numpy,",
"data_process",
".",
"seq_time_to_current_numpy",
",",
"data_process",
".",
"seq_type_event_numpy",
",",
"#data_process.seq_time_rep_numpy,",
"data_process",
".",
"seq_time_values_numpy",
",",
"data_process",
".",
"time_since_start_to_end_numpy",
",",
"data_process",
".",
"num_sims_start_to_end_numpy",
",",
"data_process",
".",
"seq_mask_numpy",
",",
"data_process",
".",
"seq_sims_time_to_current_numpy",
",",
"data_process",
".",
"seq_sims_index_in_hidden_numpy",
",",
"data_process",
".",
"seq_sims_mask_numpy",
")",
"#",
"list_log_likelihood_seq",
".",
"append",
"(",
"log_likelihood_numpy",
")",
"list_log_likelihood_type",
".",
"append",
"(",
"log_likelihood_type_numpy",
")",
"list_log_likelihood_time",
".",
"append",
"(",
"log_likelihood_time_numpy",
")",
"list_num_of_events",
".",
"append",
"(",
"num_of_events_numpy",
")",
"#",
"total_log_likelihood_dev",
"+=",
"log_likelihood_numpy",
"total_log_likelihood_time_dev",
"+=",
"log_likelihood_time_numpy",
"total_log_likelihood_type_dev",
"+=",
"log_likelihood_type_numpy",
"total_num_of_events_dev",
"+=",
"num_of_events_numpy",
"#",
"if",
"step_dev",
"%",
"100",
"==",
"99",
":",
"print",
"\"in validation, the step is out of \"",
",",
"step_dev",
",",
"data_process",
".",
"max_nums",
"[",
"tag_split",
"]",
"#",
"log_likelihood_final",
"=",
"round",
"(",
"total_log_likelihood_dev",
"/",
"total_num_of_events_dev",
",",
"4",
")",
"log_likelihood_time_final",
"=",
"round",
"(",
"total_log_likelihood_time_dev",
"/",
"total_num_of_events_dev",
",",
"4",
")",
"log_likelihood_type_final",
"=",
"round",
"(",
"total_log_likelihood_type_dev",
"/",
"total_num_of_events_dev",
",",
"4",
")",
"#",
"dev_end",
"=",
"time",
".",
"time",
"(",
")",
"#",
"# start saving stuff !!!",
"dict_results",
"=",
"{",
"'log_likelihood'",
":",
"{",
"'seq'",
":",
"log_likelihood_final",
",",
"'type'",
":",
"log_likelihood_type_final",
",",
"'time'",
":",
"log_likelihood_time_final",
"}",
",",
"'model'",
":",
"input_test",
"[",
"'path_pre_train'",
"]",
",",
"'data'",
":",
"input_test",
"[",
"'path_rawdata'",
"]",
",",
"'tag_split'",
":",
"tag_split",
",",
"'lists'",
":",
"{",
"'log_likelihood_seq'",
":",
"list_log_likelihood_seq",
",",
"'log_likelihood_type'",
":",
"list_log_likelihood_type",
",",
"'log_likelihood_time'",
":",
"list_log_likelihood_time",
",",
"'num_of_events'",
":",
"list_num_of_events",
"}",
"}",
"#",
"#TODO: add more info about this model",
"log_org",
"=",
"organizers",
".",
"LogOrg",
"(",
"{",
"'path_tracks'",
":",
"os",
".",
"path",
".",
"abspath",
"(",
"input_test",
"[",
"'path_logs'",
"]",
")",
"}",
")",
"log_org",
".",
"read_logs",
"(",
")",
"dict_log_org",
"=",
"log_org",
".",
"get_one_log",
"(",
")",
"dict_results",
"[",
"'log_info'",
"]",
"=",
"dict_log_org",
"#",
"#",
"with",
"open",
"(",
"input_test",
"[",
"'file_to_save_results'",
"]",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"dict_results",
",",
"f",
")",
"#",
"#",
"print",
"\"the model is : \"",
",",
"input_test",
"[",
"'path_pre_train'",
"]",
"print",
"\"the dataset is : \"",
",",
"input_test",
"[",
"'path_rawdata'",
"]",
"print",
"\"the tag split is : \"",
",",
"tag_split",
"print",
"\"the log-likelihood of seq is : \"",
",",
"log_likelihood_final",
"print",
"\"the log-likelihood of type is : \"",
",",
"log_likelihood_type_final",
"print",
"\"the log-likelihood of time is : \"",
",",
"log_likelihood_time_final",
"#",
"print",
"\"the three values in the order of seq / type / time : is \"",
"+",
"str",
"(",
"log_likelihood_final",
")",
"+",
"' / '",
"+",
"str",
"(",
"log_likelihood_type_final",
")",
"+",
"' / '",
"+",
"str",
"(",
"log_likelihood_time_final",
")",
"#",
"print",
"\"finish testing and saving \""
] | https://github.com/hongyuanmei/neurawkes/blob/808d3ec28c3aebd907c63616e133acd52380246f/run_models.py#L3473-L3680 |
||
SCons/scons | 309f0234d1d9cc76955818be47c5c722f577dac6 | SCons/Tool/ninja/Methods.py | python | get_command | (env, node, action) | return ninja_build | Get the command to execute for node. | Get the command to execute for node. | [
"Get",
"the",
"command",
"to",
"execute",
"for",
"node",
"."
] | def get_command(env, node, action): # pylint: disable=too-many-branches
"""Get the command to execute for node."""
if node.env:
sub_env = node.env
else:
sub_env = env
executor = node.get_executor()
tlist, slist = get_targets_sources(node)
# Generate a real CommandAction
if isinstance(action, SCons.Action.CommandGeneratorAction):
# pylint: disable=protected-access
action = action._generate(tlist, slist, sub_env, SUBST_CMD, executor=executor)
variables = {}
comstr = get_comstr(sub_env, action, tlist, slist)
if not comstr:
return None
provider = __NINJA_RULE_MAPPING.get(comstr, get_generic_shell_command)
rule, variables, provider_deps = provider(sub_env, node, action, tlist, slist, executor=executor)
if node.get_env().get('NINJA_FORCE_SCONS_BUILD'):
rule = 'TEMPLATE'
# Get the dependencies for all targets
implicit = list({dep for tgt in tlist for dep in get_dependencies(tgt)})
# Now add in the other dependencies related to the command,
# e.g. the compiler binary. The ninja rule can be user provided so
# we must do some validation to resolve the dependency path for ninja.
for provider_dep in provider_deps:
provider_dep = sub_env.subst(provider_dep)
if not provider_dep:
continue
# If the tool is a node, then SCons will resolve the path later, if its not
# a node then we assume it generated from build and make sure it is existing.
if isinstance(provider_dep, SCons.Node.Node) or os.path.exists(provider_dep):
implicit.append(provider_dep)
continue
# in some case the tool could be in the local directory and be supplied without the ext
# such as in windows, so append the executable suffix and check.
prog_suffix = sub_env.get('PROGSUFFIX', '')
provider_dep_ext = provider_dep if provider_dep.endswith(prog_suffix) else provider_dep + prog_suffix
if os.path.exists(provider_dep_ext):
implicit.append(provider_dep_ext)
continue
# Many commands will assume the binary is in the path, so
# we accept this as a possible input from a given command.
provider_dep_abspath = sub_env.WhereIs(provider_dep) or sub_env.WhereIs(provider_dep, path=os.environ["PATH"])
if provider_dep_abspath:
implicit.append(provider_dep_abspath)
continue
# Possibly these could be ignore and the build would still work, however it may not always
# rebuild correctly, so we hard stop, and force the user to fix the issue with the provided
# ninja rule.
raise Exception("Could not resolve path for %s dependency on node '%s'" % (provider_dep, node))
ninja_build = {
"order_only": get_order_only(node),
"outputs": get_outputs(node),
"inputs": get_inputs(node),
"implicit": implicit,
"rule": get_rule(node, rule),
"variables": variables,
}
# Don't use sub_env here because we require that NINJA_POOL be set
# on a per-builder call basis to prevent accidental strange
# behavior like env['NINJA_POOL'] = 'console' and sub_env can be
# the global Environment object if node.env is None.
# Example:
#
# Allowed:
#
# env.Command("ls", NINJA_POOL="ls_pool")
#
# Not allowed and ignored:
#
# env["NINJA_POOL"] = "ls_pool"
# env.Command("ls")
#
# TODO: Why not alloe env['NINJA_POOL'] ? (bdbaddog)
if node.env and node.env.get("NINJA_POOL", None) is not None:
ninja_build["pool"] = node.env["NINJA_POOL"]
return ninja_build | [
"def",
"get_command",
"(",
"env",
",",
"node",
",",
"action",
")",
":",
"# pylint: disable=too-many-branches",
"if",
"node",
".",
"env",
":",
"sub_env",
"=",
"node",
".",
"env",
"else",
":",
"sub_env",
"=",
"env",
"executor",
"=",
"node",
".",
"get_executor",
"(",
")",
"tlist",
",",
"slist",
"=",
"get_targets_sources",
"(",
"node",
")",
"# Generate a real CommandAction",
"if",
"isinstance",
"(",
"action",
",",
"SCons",
".",
"Action",
".",
"CommandGeneratorAction",
")",
":",
"# pylint: disable=protected-access",
"action",
"=",
"action",
".",
"_generate",
"(",
"tlist",
",",
"slist",
",",
"sub_env",
",",
"SUBST_CMD",
",",
"executor",
"=",
"executor",
")",
"variables",
"=",
"{",
"}",
"comstr",
"=",
"get_comstr",
"(",
"sub_env",
",",
"action",
",",
"tlist",
",",
"slist",
")",
"if",
"not",
"comstr",
":",
"return",
"None",
"provider",
"=",
"__NINJA_RULE_MAPPING",
".",
"get",
"(",
"comstr",
",",
"get_generic_shell_command",
")",
"rule",
",",
"variables",
",",
"provider_deps",
"=",
"provider",
"(",
"sub_env",
",",
"node",
",",
"action",
",",
"tlist",
",",
"slist",
",",
"executor",
"=",
"executor",
")",
"if",
"node",
".",
"get_env",
"(",
")",
".",
"get",
"(",
"'NINJA_FORCE_SCONS_BUILD'",
")",
":",
"rule",
"=",
"'TEMPLATE'",
"# Get the dependencies for all targets",
"implicit",
"=",
"list",
"(",
"{",
"dep",
"for",
"tgt",
"in",
"tlist",
"for",
"dep",
"in",
"get_dependencies",
"(",
"tgt",
")",
"}",
")",
"# Now add in the other dependencies related to the command,",
"# e.g. the compiler binary. The ninja rule can be user provided so",
"# we must do some validation to resolve the dependency path for ninja.",
"for",
"provider_dep",
"in",
"provider_deps",
":",
"provider_dep",
"=",
"sub_env",
".",
"subst",
"(",
"provider_dep",
")",
"if",
"not",
"provider_dep",
":",
"continue",
"# If the tool is a node, then SCons will resolve the path later, if its not",
"# a node then we assume it generated from build and make sure it is existing.",
"if",
"isinstance",
"(",
"provider_dep",
",",
"SCons",
".",
"Node",
".",
"Node",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"provider_dep",
")",
":",
"implicit",
".",
"append",
"(",
"provider_dep",
")",
"continue",
"# in some case the tool could be in the local directory and be supplied without the ext",
"# such as in windows, so append the executable suffix and check.",
"prog_suffix",
"=",
"sub_env",
".",
"get",
"(",
"'PROGSUFFIX'",
",",
"''",
")",
"provider_dep_ext",
"=",
"provider_dep",
"if",
"provider_dep",
".",
"endswith",
"(",
"prog_suffix",
")",
"else",
"provider_dep",
"+",
"prog_suffix",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"provider_dep_ext",
")",
":",
"implicit",
".",
"append",
"(",
"provider_dep_ext",
")",
"continue",
"# Many commands will assume the binary is in the path, so",
"# we accept this as a possible input from a given command.",
"provider_dep_abspath",
"=",
"sub_env",
".",
"WhereIs",
"(",
"provider_dep",
")",
"or",
"sub_env",
".",
"WhereIs",
"(",
"provider_dep",
",",
"path",
"=",
"os",
".",
"environ",
"[",
"\"PATH\"",
"]",
")",
"if",
"provider_dep_abspath",
":",
"implicit",
".",
"append",
"(",
"provider_dep_abspath",
")",
"continue",
"# Possibly these could be ignore and the build would still work, however it may not always",
"# rebuild correctly, so we hard stop, and force the user to fix the issue with the provided",
"# ninja rule.",
"raise",
"Exception",
"(",
"\"Could not resolve path for %s dependency on node '%s'\"",
"%",
"(",
"provider_dep",
",",
"node",
")",
")",
"ninja_build",
"=",
"{",
"\"order_only\"",
":",
"get_order_only",
"(",
"node",
")",
",",
"\"outputs\"",
":",
"get_outputs",
"(",
"node",
")",
",",
"\"inputs\"",
":",
"get_inputs",
"(",
"node",
")",
",",
"\"implicit\"",
":",
"implicit",
",",
"\"rule\"",
":",
"get_rule",
"(",
"node",
",",
"rule",
")",
",",
"\"variables\"",
":",
"variables",
",",
"}",
"# Don't use sub_env here because we require that NINJA_POOL be set",
"# on a per-builder call basis to prevent accidental strange",
"# behavior like env['NINJA_POOL'] = 'console' and sub_env can be",
"# the global Environment object if node.env is None.",
"# Example:",
"#",
"# Allowed:",
"#",
"# env.Command(\"ls\", NINJA_POOL=\"ls_pool\")",
"#",
"# Not allowed and ignored:",
"#",
"# env[\"NINJA_POOL\"] = \"ls_pool\"",
"# env.Command(\"ls\")",
"#",
"# TODO: Why not alloe env['NINJA_POOL'] ? (bdbaddog)",
"if",
"node",
".",
"env",
"and",
"node",
".",
"env",
".",
"get",
"(",
"\"NINJA_POOL\"",
",",
"None",
")",
"is",
"not",
"None",
":",
"ninja_build",
"[",
"\"pool\"",
"]",
"=",
"node",
".",
"env",
"[",
"\"NINJA_POOL\"",
"]",
"return",
"ninja_build"
] | https://github.com/SCons/scons/blob/309f0234d1d9cc76955818be47c5c722f577dac6/SCons/Tool/ninja/Methods.py#L121-L213 |
|
scipy/scipy | e0a749f01e79046642ccfdc419edbf9e7ca141ad | scipy/stats/_stats_py.py | python | tmin | (a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate') | return res | Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14 | Compute the trimmed minimum. | [
"Compute",
"the",
"trimmed",
"minimum",
"."
] | def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res | [
"def",
"tmin",
"(",
"a",
",",
"lowerlimit",
"=",
"None",
",",
"axis",
"=",
"0",
",",
"inclusive",
"=",
"True",
",",
"nan_policy",
"=",
"'propagate'",
")",
":",
"a",
",",
"axis",
"=",
"_chk_asarray",
"(",
"a",
",",
"axis",
")",
"am",
"=",
"_mask_to_limits",
"(",
"a",
",",
"(",
"lowerlimit",
",",
"None",
")",
",",
"(",
"inclusive",
",",
"False",
")",
")",
"contains_nan",
",",
"nan_policy",
"=",
"_contains_nan",
"(",
"am",
",",
"nan_policy",
")",
"if",
"contains_nan",
"and",
"nan_policy",
"==",
"'omit'",
":",
"am",
"=",
"ma",
".",
"masked_invalid",
"(",
"am",
")",
"res",
"=",
"ma",
".",
"minimum",
".",
"reduce",
"(",
"am",
",",
"axis",
")",
".",
"data",
"if",
"res",
".",
"ndim",
"==",
"0",
":",
"return",
"res",
"[",
"(",
")",
"]",
"return",
"res"
] | https://github.com/scipy/scipy/blob/e0a749f01e79046642ccfdc419edbf9e7ca141ad/scipy/stats/_stats_py.py#L621-L680 |
|
4shadoww/hakkuframework | 409a11fc3819d251f86faa3473439f8c19066a21 | lib/scapy/arch/windows/__init__.py | python | NetworkInterface_Win.setmonitor | (self, enable=True) | return tmp if enable else (not tmp) | Alias for setmode('monitor') or setmode('managed')
Only available with Npcap | Alias for setmode('monitor') or setmode('managed')
Only available with Npcap | [
"Alias",
"for",
"setmode",
"(",
"monitor",
")",
"or",
"setmode",
"(",
"managed",
")",
"Only",
"available",
"with",
"Npcap"
] | def setmonitor(self, enable=True):
"""Alias for setmode('monitor') or setmode('managed')
Only available with Npcap"""
# We must reset the monitor cache
if enable:
res = self.setmode('monitor')
else:
res = self.setmode('managed')
if not res:
log_runtime.error("Npcap WlanHelper returned with an error code !")
self.cache_mode = None
tmp = self.cache_mode = self.ismonitor()
return tmp if enable else (not tmp) | [
"def",
"setmonitor",
"(",
"self",
",",
"enable",
"=",
"True",
")",
":",
"# We must reset the monitor cache",
"if",
"enable",
":",
"res",
"=",
"self",
".",
"setmode",
"(",
"'monitor'",
")",
"else",
":",
"res",
"=",
"self",
".",
"setmode",
"(",
"'managed'",
")",
"if",
"not",
"res",
":",
"log_runtime",
".",
"error",
"(",
"\"Npcap WlanHelper returned with an error code !\"",
")",
"self",
".",
"cache_mode",
"=",
"None",
"tmp",
"=",
"self",
".",
"cache_mode",
"=",
"self",
".",
"ismonitor",
"(",
")",
"return",
"tmp",
"if",
"enable",
"else",
"(",
"not",
"tmp",
")"
] | https://github.com/4shadoww/hakkuframework/blob/409a11fc3819d251f86faa3473439f8c19066a21/lib/scapy/arch/windows/__init__.py#L383-L395 |
|
ales-tsurko/cells | 4cf7e395cd433762bea70cdc863a346f3a6fe1d0 | packaging/macos/python/lib/python3.7/site-packages/pip/_vendor/distro.py | python | LinuxDistribution.distro_release_attr | (self, attribute) | return self._distro_release_info.get(attribute, '') | Return a single named information item from the distro release file
data source of the OS distribution.
For details, see :func:`distro.distro_release_attr`. | Return a single named information item from the distro release file
data source of the OS distribution. | [
"Return",
"a",
"single",
"named",
"information",
"item",
"from",
"the",
"distro",
"release",
"file",
"data",
"source",
"of",
"the",
"OS",
"distribution",
"."
] | def distro_release_attr(self, attribute):
"""
Return a single named information item from the distro release file
data source of the OS distribution.
For details, see :func:`distro.distro_release_attr`.
"""
return self._distro_release_info.get(attribute, '') | [
"def",
"distro_release_attr",
"(",
"self",
",",
"attribute",
")",
":",
"return",
"self",
".",
"_distro_release_info",
".",
"get",
"(",
"attribute",
",",
"''",
")"
] | https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/site-packages/pip/_vendor/distro.py#L901-L908 |
|
openhatch/oh-mainline | ce29352a034e1223141dcc2f317030bbc3359a51 | vendor/packages/Django/django/contrib/gis/geos/geometry.py | python | GEOSGeometry.transform | (self, ct, clone=False) | Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead. | Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead. | [
"Requires",
"GDAL",
".",
"Transforms",
"the",
"geometry",
"according",
"to",
"the",
"given",
"transformation",
"object",
"which",
"may",
"be",
"an",
"integer",
"SRID",
"and",
"WKT",
"or",
"PROJ",
".",
"4",
"string",
".",
"By",
"default",
"the",
"geometry",
"is",
"transformed",
"in",
"-",
"place",
"and",
"nothing",
"is",
"returned",
".",
"However",
"if",
"the",
"clone",
"keyword",
"is",
"set",
"then",
"this",
"geometry",
"will",
"not",
"be",
"modified",
"and",
"a",
"transformed",
"clone",
"will",
"be",
"returned",
"instead",
"."
] | def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.') | [
"def",
"transform",
"(",
"self",
",",
"ct",
",",
"clone",
"=",
"False",
")",
":",
"srid",
"=",
"self",
".",
"srid",
"if",
"ct",
"==",
"srid",
":",
"# short-circuit where source & dest SRIDs match",
"if",
"clone",
":",
"return",
"self",
".",
"clone",
"(",
")",
"else",
":",
"return",
"if",
"(",
"srid",
"is",
"None",
")",
"or",
"(",
"srid",
"<",
"0",
")",
":",
"raise",
"GEOSException",
"(",
"\"Calling transform() with no SRID set is not supported\"",
")",
"if",
"not",
"gdal",
".",
"HAS_GDAL",
":",
"raise",
"GEOSException",
"(",
"\"GDAL library is not available to transform() geometry.\"",
")",
"# Creating an OGR Geometry, which is then transformed.",
"g",
"=",
"self",
".",
"ogr",
"g",
".",
"transform",
"(",
"ct",
")",
"# Getting a new GEOS pointer",
"ptr",
"=",
"wkb_r",
"(",
")",
".",
"read",
"(",
"g",
".",
"wkb",
")",
"if",
"clone",
":",
"# User wants a cloned transformed geometry returned.",
"return",
"GEOSGeometry",
"(",
"ptr",
",",
"srid",
"=",
"g",
".",
"srid",
")",
"if",
"ptr",
":",
"# Reassigning pointer, and performing post-initialization setup",
"# again due to the reassignment.",
"capi",
".",
"destroy_geom",
"(",
"self",
".",
"ptr",
")",
"self",
".",
"ptr",
"=",
"ptr",
"self",
".",
"_post_init",
"(",
"g",
".",
"srid",
")",
"else",
":",
"raise",
"GEOSException",
"(",
"'Transformed WKB was invalid.'",
")"
] | https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/Django/django/contrib/gis/geos/geometry.py#L487-L526 |
||
flow-project/flow | a511c41c48e6b928bb2060de8ad1ef3c3e3d9554 | flow/envs/multiagent/traffic_light_grid.py | python | MultiTrafficLightGridPOEnv.get_state | (self) | return obs | Observations for each traffic light agent.
:return: dictionary which contains agent-wise observations as follows:
- For the self.num_observed number of vehicles closest and incoming
towards traffic light agent, gives the vehicle velocity, distance to
intersection, edge number.
- For edges in the network, gives the density and average velocity.
- For the self.num_local_lights number of nearest lights (itself
included), gives the traffic light information, including the last
change time, light direction (i.e. phase), and a currently_yellow flag. | Observations for each traffic light agent. | [
"Observations",
"for",
"each",
"traffic",
"light",
"agent",
"."
] | def get_state(self):
"""Observations for each traffic light agent.
:return: dictionary which contains agent-wise observations as follows:
- For the self.num_observed number of vehicles closest and incoming
towards traffic light agent, gives the vehicle velocity, distance to
intersection, edge number.
- For edges in the network, gives the density and average velocity.
- For the self.num_local_lights number of nearest lights (itself
included), gives the traffic light information, including the last
change time, light direction (i.e. phase), and a currently_yellow flag.
"""
# Normalization factors
max_speed = max(
self.k.network.speed_limit(edge)
for edge in self.k.network.get_edge_list())
grid_array = self.net_params.additional_params["grid_array"]
max_dist = max(grid_array["short_length"], grid_array["long_length"],
grid_array["inner_length"])
# TODO(cathywu) refactor TrafficLightGridPOEnv with convenience
# methods for observations, but remember to flatten for single-agent
# Observed vehicle information
speeds = []
dist_to_intersec = []
edge_number = []
all_observed_ids = []
for _, edges in self.network.node_mapping:
local_speeds = []
local_dists_to_intersec = []
local_edge_numbers = []
for edge in edges:
observed_ids = \
self.get_closest_to_intersection(edge, self.num_observed)
all_observed_ids.append(observed_ids)
# check which edges we have so we can always pad in the right
# positions
local_speeds.extend(
[self.k.vehicle.get_speed(veh_id) / max_speed for veh_id in
observed_ids])
local_dists_to_intersec.extend([(self.k.network.edge_length(
self.k.vehicle.get_edge(
veh_id)) - self.k.vehicle.get_position(
veh_id)) / max_dist for veh_id in observed_ids])
local_edge_numbers.extend([self._convert_edge(
self.k.vehicle.get_edge(veh_id)) / (
self.k.network.network.num_edges - 1) for veh_id in
observed_ids])
if len(observed_ids) < self.num_observed:
diff = self.num_observed - len(observed_ids)
local_speeds.extend([1] * diff)
local_dists_to_intersec.extend([1] * diff)
local_edge_numbers.extend([0] * diff)
speeds.append(local_speeds)
dist_to_intersec.append(local_dists_to_intersec)
edge_number.append(local_edge_numbers)
# Edge information
density = []
velocity_avg = []
for edge in self.k.network.get_edge_list():
ids = self.k.vehicle.get_ids_by_edge(edge)
if len(ids) > 0:
# TODO(cathywu) Why is there a 5 here?
density += [5 * len(ids) / self.k.network.edge_length(edge)]
velocity_avg += [np.mean(
[self.k.vehicle.get_speed(veh_id) for veh_id in
ids]) / max_speed]
else:
density += [0]
velocity_avg += [0]
density = np.array(density)
velocity_avg = np.array(velocity_avg)
self.observed_ids = all_observed_ids
# Traffic light information
direction = self.direction.flatten()
currently_yellow = self.currently_yellow.flatten()
# This is a catch-all for when the relative_node method returns a -1
# (when there is no node in the direction sought). We add a last
# item to the lists here, which will serve as a default value.
# TODO(cathywu) are these values reasonable?
direction = np.append(direction, [0])
currently_yellow = np.append(currently_yellow, [1])
obs = {}
# TODO(cathywu) allow differentiation between rl and non-rl lights
node_to_edges = self.network.node_mapping
for rl_id in self.k.traffic_light.get_ids():
rl_id_num = int(rl_id.split("center")[ID_IDX])
local_edges = node_to_edges[rl_id_num][1]
local_edge_numbers = [self.k.network.get_edge_list().index(e)
for e in local_edges]
local_id_nums = [rl_id_num, self._get_relative_node(rl_id, "top"),
self._get_relative_node(rl_id, "bottom"),
self._get_relative_node(rl_id, "left"),
self._get_relative_node(rl_id, "right")]
observation = np.array(np.concatenate(
[speeds[rl_id_num], dist_to_intersec[rl_id_num],
edge_number[rl_id_num], density[local_edge_numbers],
velocity_avg[local_edge_numbers],
direction[local_id_nums], currently_yellow[local_id_nums]
]))
obs.update({rl_id: observation})
return obs | [
"def",
"get_state",
"(",
"self",
")",
":",
"# Normalization factors",
"max_speed",
"=",
"max",
"(",
"self",
".",
"k",
".",
"network",
".",
"speed_limit",
"(",
"edge",
")",
"for",
"edge",
"in",
"self",
".",
"k",
".",
"network",
".",
"get_edge_list",
"(",
")",
")",
"grid_array",
"=",
"self",
".",
"net_params",
".",
"additional_params",
"[",
"\"grid_array\"",
"]",
"max_dist",
"=",
"max",
"(",
"grid_array",
"[",
"\"short_length\"",
"]",
",",
"grid_array",
"[",
"\"long_length\"",
"]",
",",
"grid_array",
"[",
"\"inner_length\"",
"]",
")",
"# TODO(cathywu) refactor TrafficLightGridPOEnv with convenience",
"# methods for observations, but remember to flatten for single-agent",
"# Observed vehicle information",
"speeds",
"=",
"[",
"]",
"dist_to_intersec",
"=",
"[",
"]",
"edge_number",
"=",
"[",
"]",
"all_observed_ids",
"=",
"[",
"]",
"for",
"_",
",",
"edges",
"in",
"self",
".",
"network",
".",
"node_mapping",
":",
"local_speeds",
"=",
"[",
"]",
"local_dists_to_intersec",
"=",
"[",
"]",
"local_edge_numbers",
"=",
"[",
"]",
"for",
"edge",
"in",
"edges",
":",
"observed_ids",
"=",
"self",
".",
"get_closest_to_intersection",
"(",
"edge",
",",
"self",
".",
"num_observed",
")",
"all_observed_ids",
".",
"append",
"(",
"observed_ids",
")",
"# check which edges we have so we can always pad in the right",
"# positions",
"local_speeds",
".",
"extend",
"(",
"[",
"self",
".",
"k",
".",
"vehicle",
".",
"get_speed",
"(",
"veh_id",
")",
"/",
"max_speed",
"for",
"veh_id",
"in",
"observed_ids",
"]",
")",
"local_dists_to_intersec",
".",
"extend",
"(",
"[",
"(",
"self",
".",
"k",
".",
"network",
".",
"edge_length",
"(",
"self",
".",
"k",
".",
"vehicle",
".",
"get_edge",
"(",
"veh_id",
")",
")",
"-",
"self",
".",
"k",
".",
"vehicle",
".",
"get_position",
"(",
"veh_id",
")",
")",
"/",
"max_dist",
"for",
"veh_id",
"in",
"observed_ids",
"]",
")",
"local_edge_numbers",
".",
"extend",
"(",
"[",
"self",
".",
"_convert_edge",
"(",
"self",
".",
"k",
".",
"vehicle",
".",
"get_edge",
"(",
"veh_id",
")",
")",
"/",
"(",
"self",
".",
"k",
".",
"network",
".",
"network",
".",
"num_edges",
"-",
"1",
")",
"for",
"veh_id",
"in",
"observed_ids",
"]",
")",
"if",
"len",
"(",
"observed_ids",
")",
"<",
"self",
".",
"num_observed",
":",
"diff",
"=",
"self",
".",
"num_observed",
"-",
"len",
"(",
"observed_ids",
")",
"local_speeds",
".",
"extend",
"(",
"[",
"1",
"]",
"*",
"diff",
")",
"local_dists_to_intersec",
".",
"extend",
"(",
"[",
"1",
"]",
"*",
"diff",
")",
"local_edge_numbers",
".",
"extend",
"(",
"[",
"0",
"]",
"*",
"diff",
")",
"speeds",
".",
"append",
"(",
"local_speeds",
")",
"dist_to_intersec",
".",
"append",
"(",
"local_dists_to_intersec",
")",
"edge_number",
".",
"append",
"(",
"local_edge_numbers",
")",
"# Edge information",
"density",
"=",
"[",
"]",
"velocity_avg",
"=",
"[",
"]",
"for",
"edge",
"in",
"self",
".",
"k",
".",
"network",
".",
"get_edge_list",
"(",
")",
":",
"ids",
"=",
"self",
".",
"k",
".",
"vehicle",
".",
"get_ids_by_edge",
"(",
"edge",
")",
"if",
"len",
"(",
"ids",
")",
">",
"0",
":",
"# TODO(cathywu) Why is there a 5 here?",
"density",
"+=",
"[",
"5",
"*",
"len",
"(",
"ids",
")",
"/",
"self",
".",
"k",
".",
"network",
".",
"edge_length",
"(",
"edge",
")",
"]",
"velocity_avg",
"+=",
"[",
"np",
".",
"mean",
"(",
"[",
"self",
".",
"k",
".",
"vehicle",
".",
"get_speed",
"(",
"veh_id",
")",
"for",
"veh_id",
"in",
"ids",
"]",
")",
"/",
"max_speed",
"]",
"else",
":",
"density",
"+=",
"[",
"0",
"]",
"velocity_avg",
"+=",
"[",
"0",
"]",
"density",
"=",
"np",
".",
"array",
"(",
"density",
")",
"velocity_avg",
"=",
"np",
".",
"array",
"(",
"velocity_avg",
")",
"self",
".",
"observed_ids",
"=",
"all_observed_ids",
"# Traffic light information",
"direction",
"=",
"self",
".",
"direction",
".",
"flatten",
"(",
")",
"currently_yellow",
"=",
"self",
".",
"currently_yellow",
".",
"flatten",
"(",
")",
"# This is a catch-all for when the relative_node method returns a -1",
"# (when there is no node in the direction sought). We add a last",
"# item to the lists here, which will serve as a default value.",
"# TODO(cathywu) are these values reasonable?",
"direction",
"=",
"np",
".",
"append",
"(",
"direction",
",",
"[",
"0",
"]",
")",
"currently_yellow",
"=",
"np",
".",
"append",
"(",
"currently_yellow",
",",
"[",
"1",
"]",
")",
"obs",
"=",
"{",
"}",
"# TODO(cathywu) allow differentiation between rl and non-rl lights",
"node_to_edges",
"=",
"self",
".",
"network",
".",
"node_mapping",
"for",
"rl_id",
"in",
"self",
".",
"k",
".",
"traffic_light",
".",
"get_ids",
"(",
")",
":",
"rl_id_num",
"=",
"int",
"(",
"rl_id",
".",
"split",
"(",
"\"center\"",
")",
"[",
"ID_IDX",
"]",
")",
"local_edges",
"=",
"node_to_edges",
"[",
"rl_id_num",
"]",
"[",
"1",
"]",
"local_edge_numbers",
"=",
"[",
"self",
".",
"k",
".",
"network",
".",
"get_edge_list",
"(",
")",
".",
"index",
"(",
"e",
")",
"for",
"e",
"in",
"local_edges",
"]",
"local_id_nums",
"=",
"[",
"rl_id_num",
",",
"self",
".",
"_get_relative_node",
"(",
"rl_id",
",",
"\"top\"",
")",
",",
"self",
".",
"_get_relative_node",
"(",
"rl_id",
",",
"\"bottom\"",
")",
",",
"self",
".",
"_get_relative_node",
"(",
"rl_id",
",",
"\"left\"",
")",
",",
"self",
".",
"_get_relative_node",
"(",
"rl_id",
",",
"\"right\"",
")",
"]",
"observation",
"=",
"np",
".",
"array",
"(",
"np",
".",
"concatenate",
"(",
"[",
"speeds",
"[",
"rl_id_num",
"]",
",",
"dist_to_intersec",
"[",
"rl_id_num",
"]",
",",
"edge_number",
"[",
"rl_id_num",
"]",
",",
"density",
"[",
"local_edge_numbers",
"]",
",",
"velocity_avg",
"[",
"local_edge_numbers",
"]",
",",
"direction",
"[",
"local_id_nums",
"]",
",",
"currently_yellow",
"[",
"local_id_nums",
"]",
"]",
")",
")",
"obs",
".",
"update",
"(",
"{",
"rl_id",
":",
"observation",
"}",
")",
"return",
"obs"
] | https://github.com/flow-project/flow/blob/a511c41c48e6b928bb2060de8ad1ef3c3e3d9554/flow/envs/multiagent/traffic_light_grid.py#L90-L200 |
|
chribsen/simple-machine-learning-examples | dc94e52a4cebdc8bb959ff88b81ff8cfeca25022 | venv/lib/python2.7/site-packages/numpy/lib/recfunctions.py | python | izip_records | (seqarrays, fill_value=None, flatten=True) | Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to | Returns an iterator of concatenated items from a sequence of arrays. | [
"Returns",
"an",
"iterator",
"of",
"concatenated",
"items",
"from",
"a",
"sequence",
"of",
"arrays",
"."
] | def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass | [
"def",
"izip_records",
"(",
"seqarrays",
",",
"fill_value",
"=",
"None",
",",
"flatten",
"=",
"True",
")",
":",
"# OK, that's a complete ripoff from Python2.6 itertools.izip_longest",
"def",
"sentinel",
"(",
"counter",
"=",
"(",
"[",
"fill_value",
"]",
"*",
"(",
"len",
"(",
"seqarrays",
")",
"-",
"1",
")",
")",
".",
"pop",
")",
":",
"\"Yields the fill_value or raises IndexError\"",
"yield",
"counter",
"(",
")",
"#",
"fillers",
"=",
"itertools",
".",
"repeat",
"(",
"fill_value",
")",
"iters",
"=",
"[",
"itertools",
".",
"chain",
"(",
"it",
",",
"sentinel",
"(",
")",
",",
"fillers",
")",
"for",
"it",
"in",
"seqarrays",
"]",
"# Should we flatten the items, or just use a nested approach",
"if",
"flatten",
":",
"zipfunc",
"=",
"_izip_fields_flat",
"else",
":",
"zipfunc",
"=",
"_izip_fields",
"#",
"try",
":",
"for",
"tup",
"in",
"zip",
"(",
"*",
"iters",
")",
":",
"yield",
"tuple",
"(",
"zipfunc",
"(",
"tup",
")",
")",
"except",
"IndexError",
":",
"pass"
] | https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/numpy/lib/recfunctions.py#L265-L295 |
||
cloudera/hue | 23f02102d4547c17c32bd5ea0eb24e9eadd657a4 | desktop/core/ext-py/pysaml2-4.9.0/src/saml2/client.py | python | Saml2Client.handle_logout_request | (self, request, name_id, binding, sign=False,
sign_alg=None, relay_state="") | return self.apply_binding(rinfo["binding"], response,
rinfo["destination"], relay_state,
response=True, sign=sign) | Deal with a LogoutRequest
:param request: The request as text string
:param name_id: The id of the current user
:param binding: Which binding the message came in over
:param sign: Whether the response will be signed or not
:return: Keyword arguments which can be used to send the response
what's returned follow different patterns for different bindings.
If the binding is BINDIND_SOAP, what is returned looks like this::
{
"data": <the SOAP enveloped response>
"url": "",
'headers': [('content-type', 'application/soap+xml')]
'method': "POST
} | Deal with a LogoutRequest | [
"Deal",
"with",
"a",
"LogoutRequest"
] | def handle_logout_request(self, request, name_id, binding, sign=False,
sign_alg=None, relay_state=""):
"""
Deal with a LogoutRequest
:param request: The request as text string
:param name_id: The id of the current user
:param binding: Which binding the message came in over
:param sign: Whether the response will be signed or not
:return: Keyword arguments which can be used to send the response
what's returned follow different patterns for different bindings.
If the binding is BINDIND_SOAP, what is returned looks like this::
{
"data": <the SOAP enveloped response>
"url": "",
'headers': [('content-type', 'application/soap+xml')]
'method': "POST
}
"""
logger.info("logout request: %s", request)
_req = self._parse_request(request, LogoutRequest,
"single_logout_service", binding)
if _req.message.name_id == name_id:
try:
if self.local_logout(name_id):
status = success_status_factory()
else:
status = status_message_factory("Server error",
STATUS_REQUEST_DENIED)
except KeyError:
status = status_message_factory("Server error",
STATUS_REQUEST_DENIED)
else:
status = status_message_factory("Wrong user",
STATUS_UNKNOWN_PRINCIPAL)
if binding == BINDING_SOAP:
response_bindings = [BINDING_SOAP]
elif binding == BINDING_HTTP_POST or BINDING_HTTP_REDIRECT:
response_bindings = [BINDING_HTTP_POST, BINDING_HTTP_REDIRECT]
else:
response_bindings = self.config.preferred_binding[
"single_logout_service"]
response = self.create_logout_response(_req.message, response_bindings,
status, sign, sign_alg=sign_alg)
rinfo = self.response_args(_req.message, response_bindings)
return self.apply_binding(rinfo["binding"], response,
rinfo["destination"], relay_state,
response=True, sign=sign) | [
"def",
"handle_logout_request",
"(",
"self",
",",
"request",
",",
"name_id",
",",
"binding",
",",
"sign",
"=",
"False",
",",
"sign_alg",
"=",
"None",
",",
"relay_state",
"=",
"\"\"",
")",
":",
"logger",
".",
"info",
"(",
"\"logout request: %s\"",
",",
"request",
")",
"_req",
"=",
"self",
".",
"_parse_request",
"(",
"request",
",",
"LogoutRequest",
",",
"\"single_logout_service\"",
",",
"binding",
")",
"if",
"_req",
".",
"message",
".",
"name_id",
"==",
"name_id",
":",
"try",
":",
"if",
"self",
".",
"local_logout",
"(",
"name_id",
")",
":",
"status",
"=",
"success_status_factory",
"(",
")",
"else",
":",
"status",
"=",
"status_message_factory",
"(",
"\"Server error\"",
",",
"STATUS_REQUEST_DENIED",
")",
"except",
"KeyError",
":",
"status",
"=",
"status_message_factory",
"(",
"\"Server error\"",
",",
"STATUS_REQUEST_DENIED",
")",
"else",
":",
"status",
"=",
"status_message_factory",
"(",
"\"Wrong user\"",
",",
"STATUS_UNKNOWN_PRINCIPAL",
")",
"if",
"binding",
"==",
"BINDING_SOAP",
":",
"response_bindings",
"=",
"[",
"BINDING_SOAP",
"]",
"elif",
"binding",
"==",
"BINDING_HTTP_POST",
"or",
"BINDING_HTTP_REDIRECT",
":",
"response_bindings",
"=",
"[",
"BINDING_HTTP_POST",
",",
"BINDING_HTTP_REDIRECT",
"]",
"else",
":",
"response_bindings",
"=",
"self",
".",
"config",
".",
"preferred_binding",
"[",
"\"single_logout_service\"",
"]",
"response",
"=",
"self",
".",
"create_logout_response",
"(",
"_req",
".",
"message",
",",
"response_bindings",
",",
"status",
",",
"sign",
",",
"sign_alg",
"=",
"sign_alg",
")",
"rinfo",
"=",
"self",
".",
"response_args",
"(",
"_req",
".",
"message",
",",
"response_bindings",
")",
"return",
"self",
".",
"apply_binding",
"(",
"rinfo",
"[",
"\"binding\"",
"]",
",",
"response",
",",
"rinfo",
"[",
"\"destination\"",
"]",
",",
"relay_state",
",",
"response",
"=",
"True",
",",
"sign",
"=",
"sign",
")"
] | https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/pysaml2-4.9.0/src/saml2/client.py#L485-L538 |
|
SeldonIO/alibi | ce961caf995d22648a8338857822c90428af4765 | alibi/explainers/shap_wrappers.py | python | KernelShap._ | (self, background_data, *args, **kwargs) | Initialises background data if the user passes an `np.ndarray` object as input.
If the user specifies feature grouping then a `shap_utils.DenseData` object
is returned. Weights are handled separately to avoid triggering assertion
correct inside `shap` library. Otherwise, the original data is returned and
is handled by the `shap` library internally. | Initialises background data if the user passes an `np.ndarray` object as input.
If the user specifies feature grouping then a `shap_utils.DenseData` object
is returned. Weights are handled separately to avoid triggering assertion
correct inside `shap` library. Otherwise, the original data is returned and
is handled by the `shap` library internally. | [
"Initialises",
"background",
"data",
"if",
"the",
"user",
"passes",
"an",
"np",
".",
"ndarray",
"object",
"as",
"input",
".",
"If",
"the",
"user",
"specifies",
"feature",
"grouping",
"then",
"a",
"shap_utils",
".",
"DenseData",
"object",
"is",
"returned",
".",
"Weights",
"are",
"handled",
"separately",
"to",
"avoid",
"triggering",
"assertion",
"correct",
"inside",
"shap",
"library",
".",
"Otherwise",
"the",
"original",
"data",
"is",
"returned",
"and",
"is",
"handled",
"by",
"the",
"shap",
"library",
"internally",
"."
] | def _(self, background_data, *args, **kwargs) -> Union[np.ndarray, shap_utils.Data]:
"""
Initialises background data if the user passes an `np.ndarray` object as input.
If the user specifies feature grouping then a `shap_utils.DenseData` object
is returned. Weights are handled separately to avoid triggering assertion
correct inside `shap` library. Otherwise, the original data is returned and
is handled by the `shap` library internally.
"""
group_names, groups, weights = args
new_args = (group_names, groups, weights) if weights is not None else (group_names, groups)
if self.use_groups:
return shap_utils.DenseData(background_data, *new_args)
else:
return background_data | [
"def",
"_",
"(",
"self",
",",
"background_data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"Union",
"[",
"np",
".",
"ndarray",
",",
"shap_utils",
".",
"Data",
"]",
":",
"group_names",
",",
"groups",
",",
"weights",
"=",
"args",
"new_args",
"=",
"(",
"group_names",
",",
"groups",
",",
"weights",
")",
"if",
"weights",
"is",
"not",
"None",
"else",
"(",
"group_names",
",",
"groups",
")",
"if",
"self",
".",
"use_groups",
":",
"return",
"shap_utils",
".",
"DenseData",
"(",
"background_data",
",",
"*",
"new_args",
")",
"else",
":",
"return",
"background_data"
] | https://github.com/SeldonIO/alibi/blob/ce961caf995d22648a8338857822c90428af4765/alibi/explainers/shap_wrappers.py#L580-L594 |
||
MrGiovanni/UNetPlusPlus | e145ba63862982bf1099cf2ec11d5466b434ae0b | keras/segmentation_models/pspnet/model.py | python | PSPNet | (backbone_name='vgg16',
input_shape=(384, 384, 3),
input_tensor=None,
encoder_weights='imagenet',
freeze_encoder=False,
downsample_factor=8,
psp_conv_filters=512,
psp_pooling_type='avg',
use_batchnorm=True,
dropout=None,
final_interpolation='bilinear',
classes=21,
activation='softmax') | return model | Exploit the capability of global context information by different-regionbased
context aggregation through pyramid pooling module together with the proposed
pyramid scene parsing network (PSPNet).
https://arxiv.org/pdf/1612.01105.pdf
Args:
backbone_name: (str) look at list of available backbones.
input_shape: (tuple) dimensions of input data (H, W, C).
H and W should be divisible by (6 * `downsample_factor`) and **NOT** `None`!
input_tensor: keras tensor
encoder_weights: one of `None` (random initialization), 'imagenet' (pre-
training on ImageNet)
freeze_encoder: (bool) Set encoder layers weights as non-trainable. Use-
ful for fine-tuning
downsample_factor: int, one of 4, 8 and 16. Specify layer of backbone or
backbone depth to construct PSP module on it.
psp_conv_filters: (int), number of filters in `Conv2D` layer in each psp block
psp_pooling_type: 'avg' or 'max', psp block pooling type (maximum or average)
use_batchnorm: (bool) if True add batch normalisation layer between
`Conv2D` ad `Activation` layers
dropout: None or float in range 0-1, if specified add SpatialDropout after PSP module
final_interpolation: 'duc' or 'bilinear' - interpolation type for final
upsampling layer.
classes: (int) a number of classes for output
activation: (str) one of keras activations
Returns:
keras Model instance | Exploit the capability of global context information by different-regionbased
context aggregation through pyramid pooling module together with the proposed
pyramid scene parsing network (PSPNet). | [
"Exploit",
"the",
"capability",
"of",
"global",
"context",
"information",
"by",
"different",
"-",
"regionbased",
"context",
"aggregation",
"through",
"pyramid",
"pooling",
"module",
"together",
"with",
"the",
"proposed",
"pyramid",
"scene",
"parsing",
"network",
"(",
"PSPNet",
")",
"."
] | def PSPNet(backbone_name='vgg16',
input_shape=(384, 384, 3),
input_tensor=None,
encoder_weights='imagenet',
freeze_encoder=False,
downsample_factor=8,
psp_conv_filters=512,
psp_pooling_type='avg',
use_batchnorm=True,
dropout=None,
final_interpolation='bilinear',
classes=21,
activation='softmax'):
"""
Exploit the capability of global context information by different-regionbased
context aggregation through pyramid pooling module together with the proposed
pyramid scene parsing network (PSPNet).
https://arxiv.org/pdf/1612.01105.pdf
Args:
backbone_name: (str) look at list of available backbones.
input_shape: (tuple) dimensions of input data (H, W, C).
H and W should be divisible by (6 * `downsample_factor`) and **NOT** `None`!
input_tensor: keras tensor
encoder_weights: one of `None` (random initialization), 'imagenet' (pre-
training on ImageNet)
freeze_encoder: (bool) Set encoder layers weights as non-trainable. Use-
ful for fine-tuning
downsample_factor: int, one of 4, 8 and 16. Specify layer of backbone or
backbone depth to construct PSP module on it.
psp_conv_filters: (int), number of filters in `Conv2D` layer in each psp block
psp_pooling_type: 'avg' or 'max', psp block pooling type (maximum or average)
use_batchnorm: (bool) if True add batch normalisation layer between
`Conv2D` ad `Activation` layers
dropout: None or float in range 0-1, if specified add SpatialDropout after PSP module
final_interpolation: 'duc' or 'bilinear' - interpolation type for final
upsampling layer.
classes: (int) a number of classes for output
activation: (str) one of keras activations
Returns:
keras Model instance
"""
# control image input shape
_shape_guard(downsample_factor, input_shape)
backbone = get_backbone(backbone_name,
input_shape=input_shape,
input_tensor=input_tensor,
weights=encoder_weights,
include_top=False)
psp_layer = _get_layer_by_factor(backbone_name, downsample_factor)
model = build_psp(backbone,
psp_layer,
last_upsampling_factor=downsample_factor,
classes=classes,
conv_filters=psp_conv_filters,
pooling_type=psp_pooling_type,
activation=activation,
use_batchnorm=use_batchnorm,
dropout=dropout,
final_interpolation=final_interpolation)
# lock encoder weights for fine-tuning
if freeze_encoder:
freeze_model(backbone)
model.name = 'psp-{}'.format(backbone_name)
return model | [
"def",
"PSPNet",
"(",
"backbone_name",
"=",
"'vgg16'",
",",
"input_shape",
"=",
"(",
"384",
",",
"384",
",",
"3",
")",
",",
"input_tensor",
"=",
"None",
",",
"encoder_weights",
"=",
"'imagenet'",
",",
"freeze_encoder",
"=",
"False",
",",
"downsample_factor",
"=",
"8",
",",
"psp_conv_filters",
"=",
"512",
",",
"psp_pooling_type",
"=",
"'avg'",
",",
"use_batchnorm",
"=",
"True",
",",
"dropout",
"=",
"None",
",",
"final_interpolation",
"=",
"'bilinear'",
",",
"classes",
"=",
"21",
",",
"activation",
"=",
"'softmax'",
")",
":",
"# control image input shape",
"_shape_guard",
"(",
"downsample_factor",
",",
"input_shape",
")",
"backbone",
"=",
"get_backbone",
"(",
"backbone_name",
",",
"input_shape",
"=",
"input_shape",
",",
"input_tensor",
"=",
"input_tensor",
",",
"weights",
"=",
"encoder_weights",
",",
"include_top",
"=",
"False",
")",
"psp_layer",
"=",
"_get_layer_by_factor",
"(",
"backbone_name",
",",
"downsample_factor",
")",
"model",
"=",
"build_psp",
"(",
"backbone",
",",
"psp_layer",
",",
"last_upsampling_factor",
"=",
"downsample_factor",
",",
"classes",
"=",
"classes",
",",
"conv_filters",
"=",
"psp_conv_filters",
",",
"pooling_type",
"=",
"psp_pooling_type",
",",
"activation",
"=",
"activation",
",",
"use_batchnorm",
"=",
"use_batchnorm",
",",
"dropout",
"=",
"dropout",
",",
"final_interpolation",
"=",
"final_interpolation",
")",
"# lock encoder weights for fine-tuning",
"if",
"freeze_encoder",
":",
"freeze_model",
"(",
"backbone",
")",
"model",
".",
"name",
"=",
"'psp-{}'",
".",
"format",
"(",
"backbone_name",
")",
"return",
"model"
] | https://github.com/MrGiovanni/UNetPlusPlus/blob/e145ba63862982bf1099cf2ec11d5466b434ae0b/keras/segmentation_models/pspnet/model.py#L47-L121 |
|
facebookresearch/ParlAI | e4d59c30eef44f1f67105961b82a83fd28d7d78b | parlai/agents/rag/rag.py | python | RagAgent._convert_model | (self, opt: Opt) | return self._generation_agent._convert_model(self, opt) | Override BartAgent._convert_model to use RagConversionScript. | Override BartAgent._convert_model to use RagConversionScript. | [
"Override",
"BartAgent",
".",
"_convert_model",
"to",
"use",
"RagConversionScript",
"."
] | def _convert_model(self, opt: Opt) -> Opt:
"""
Override BartAgent._convert_model to use RagConversionScript.
"""
return self._generation_agent._convert_model(self, opt) | [
"def",
"_convert_model",
"(",
"self",
",",
"opt",
":",
"Opt",
")",
"->",
"Opt",
":",
"return",
"self",
".",
"_generation_agent",
".",
"_convert_model",
"(",
"self",
",",
"opt",
")"
] | https://github.com/facebookresearch/ParlAI/blob/e4d59c30eef44f1f67105961b82a83fd28d7d78b/parlai/agents/rag/rag.py#L271-L275 |
|
leancloud/satori | 701caccbd4fe45765001ca60435c0cb499477c03 | satori-rules/plugin/libs/requests/api.py | python | post | (url, data=None, json=None, **kwargs) | return request('post', url, data=data, json=json, **kwargs) | Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response | Sends a POST request. | [
"Sends",
"a",
"POST",
"request",
"."
] | def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs) | [
"def",
"post",
"(",
"url",
",",
"data",
"=",
"None",
",",
"json",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"request",
"(",
"'post'",
",",
"url",
",",
"data",
"=",
"data",
",",
"json",
"=",
"json",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/leancloud/satori/blob/701caccbd4fe45765001ca60435c0cb499477c03/satori-rules/plugin/libs/requests/api.py#L96-L107 |
|
openstack/barbican | a9d2b133c8dc3307974f119f9a2b23a4ba82e8ce | barbican/tasks/resources.py | python | BeginTypeOrder.get_name | (self) | return u._('Process TypeOrder') | [] | def get_name(self):
return u._('Process TypeOrder') | [
"def",
"get_name",
"(",
"self",
")",
":",
"return",
"u",
".",
"_",
"(",
"'Process TypeOrder'",
")"
] | https://github.com/openstack/barbican/blob/a9d2b133c8dc3307974f119f9a2b23a4ba82e8ce/barbican/tasks/resources.py#L231-L232 |
|||
pytroll/satpy | 09e51f932048f98cce7919a4ff8bd2ec01e1ae98 | satpy/resample.py | python | _LegacySatpyEWAResampler.resample | (self, *args, **kwargs) | return super(_LegacySatpyEWAResampler, self).resample(*args, **kwargs) | Run precompute and compute methods.
.. note::
This sets the default of 'mask_area' to False since it is
not needed in EWA resampling currently. | Run precompute and compute methods. | [
"Run",
"precompute",
"and",
"compute",
"methods",
"."
] | def resample(self, *args, **kwargs):
"""Run precompute and compute methods.
.. note::
This sets the default of 'mask_area' to False since it is
not needed in EWA resampling currently.
"""
kwargs.setdefault('mask_area', False)
return super(_LegacySatpyEWAResampler, self).resample(*args, **kwargs) | [
"def",
"resample",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'mask_area'",
",",
"False",
")",
"return",
"super",
"(",
"_LegacySatpyEWAResampler",
",",
"self",
")",
".",
"resample",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/pytroll/satpy/blob/09e51f932048f98cce7919a4ff8bd2ec01e1ae98/satpy/resample.py#L683-L693 |
|
bjmayor/hacker | e3ce2ad74839c2733b27dac6c0f495e0743e1866 | venv/lib/python3.5/site-packages/pip/_vendor/retrying.py | python | retry | (*dargs, **dkw) | Decorator function that instantiates the Retrying object
@param *dargs: positional arguments passed to Retrying object
@param **dkw: keyword arguments passed to the Retrying object | Decorator function that instantiates the Retrying object | [
"Decorator",
"function",
"that",
"instantiates",
"the",
"Retrying",
"object"
] | def retry(*dargs, **dkw):
"""
Decorator function that instantiates the Retrying object
@param *dargs: positional arguments passed to Retrying object
@param **dkw: keyword arguments passed to the Retrying object
"""
# support both @retry and @retry() as valid syntax
if len(dargs) == 1 and callable(dargs[0]):
def wrap_simple(f):
@six.wraps(f)
def wrapped_f(*args, **kw):
return Retrying().call(f, *args, **kw)
return wrapped_f
return wrap_simple(dargs[0])
else:
def wrap(f):
@six.wraps(f)
def wrapped_f(*args, **kw):
return Retrying(*dargs, **dkw).call(f, *args, **kw)
return wrapped_f
return wrap | [
"def",
"retry",
"(",
"*",
"dargs",
",",
"*",
"*",
"dkw",
")",
":",
"# support both @retry and @retry() as valid syntax",
"if",
"len",
"(",
"dargs",
")",
"==",
"1",
"and",
"callable",
"(",
"dargs",
"[",
"0",
"]",
")",
":",
"def",
"wrap_simple",
"(",
"f",
")",
":",
"@",
"six",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped_f",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"return",
"Retrying",
"(",
")",
".",
"call",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"wrapped_f",
"return",
"wrap_simple",
"(",
"dargs",
"[",
"0",
"]",
")",
"else",
":",
"def",
"wrap",
"(",
"f",
")",
":",
"@",
"six",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped_f",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"return",
"Retrying",
"(",
"*",
"dargs",
",",
"*",
"*",
"dkw",
")",
".",
"call",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"wrapped_f",
"return",
"wrap"
] | https://github.com/bjmayor/hacker/blob/e3ce2ad74839c2733b27dac6c0f495e0743e1866/venv/lib/python3.5/site-packages/pip/_vendor/retrying.py#L26-L53 |
||
Tencent/GAutomator | 0ac9f849d1ca2c59760a91c5c94d3db375a380cd | GAutomatorIos/ga2/cloud/httptools/remote_connection.py | python | Request.get_method | (self) | return self._method | Returns the HTTP method used by this request. | Returns the HTTP method used by this request. | [
"Returns",
"the",
"HTTP",
"method",
"used",
"by",
"this",
"request",
"."
] | def get_method(self):
"""
Returns the HTTP method used by this request.
"""
return self._method | [
"def",
"get_method",
"(",
"self",
")",
":",
"return",
"self",
".",
"_method"
] | https://github.com/Tencent/GAutomator/blob/0ac9f849d1ca2c59760a91c5c94d3db375a380cd/GAutomatorIos/ga2/cloud/httptools/remote_connection.py#L67-L71 |
|
chribsen/simple-machine-learning-examples | dc94e52a4cebdc8bb959ff88b81ff8cfeca25022 | venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py | python | multivariate_normal_gen.entropy | (self, mean=None, cov=1) | return 0.5 * logdet | Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s | Compute the differential entropy of the multivariate normal. | [
"Compute",
"the",
"differential",
"entropy",
"of",
"the",
"multivariate",
"normal",
"."
] | def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet | [
"def",
"entropy",
"(",
"self",
",",
"mean",
"=",
"None",
",",
"cov",
"=",
"1",
")",
":",
"dim",
",",
"mean",
",",
"cov",
"=",
"self",
".",
"_process_parameters",
"(",
"None",
",",
"mean",
",",
"cov",
")",
"_",
",",
"logdet",
"=",
"np",
".",
"linalg",
".",
"slogdet",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"e",
"*",
"cov",
")",
"return",
"0.5",
"*",
"logdet"
] | https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py#L536-L556 |
|
ninja-ide/ninja-ide | 87d91131bd19fdc3dcfd91eb97ad1e41c49c60c0 | ninja_ide/gui/editor/editor.py | python | NEditor.__manage_key_home | (self, event) | Performs home key action | Performs home key action | [
"Performs",
"home",
"key",
"action"
] | def __manage_key_home(self, event):
"""Performs home key action"""
cursor = self.textCursor()
indent = self.line_indent()
# For selection
move = QTextCursor.MoveAnchor
if event.modifiers() == Qt.ShiftModifier:
move = QTextCursor.KeepAnchor
# Operation
if cursor.positionInBlock() == indent:
cursor.movePosition(QTextCursor.StartOfBlock, move)
elif cursor.atBlockStart():
cursor.setPosition(cursor.block().position() + indent, move)
elif cursor.positionInBlock() > indent:
cursor.movePosition(QTextCursor.StartOfLine, move)
cursor.setPosition(cursor.block().position() + indent, move)
self.setTextCursor(cursor)
event.accept() | [
"def",
"__manage_key_home",
"(",
"self",
",",
"event",
")",
":",
"cursor",
"=",
"self",
".",
"textCursor",
"(",
")",
"indent",
"=",
"self",
".",
"line_indent",
"(",
")",
"# For selection",
"move",
"=",
"QTextCursor",
".",
"MoveAnchor",
"if",
"event",
".",
"modifiers",
"(",
")",
"==",
"Qt",
".",
"ShiftModifier",
":",
"move",
"=",
"QTextCursor",
".",
"KeepAnchor",
"# Operation",
"if",
"cursor",
".",
"positionInBlock",
"(",
")",
"==",
"indent",
":",
"cursor",
".",
"movePosition",
"(",
"QTextCursor",
".",
"StartOfBlock",
",",
"move",
")",
"elif",
"cursor",
".",
"atBlockStart",
"(",
")",
":",
"cursor",
".",
"setPosition",
"(",
"cursor",
".",
"block",
"(",
")",
".",
"position",
"(",
")",
"+",
"indent",
",",
"move",
")",
"elif",
"cursor",
".",
"positionInBlock",
"(",
")",
">",
"indent",
":",
"cursor",
".",
"movePosition",
"(",
"QTextCursor",
".",
"StartOfLine",
",",
"move",
")",
"cursor",
".",
"setPosition",
"(",
"cursor",
".",
"block",
"(",
")",
".",
"position",
"(",
")",
"+",
"indent",
",",
"move",
")",
"self",
".",
"setTextCursor",
"(",
"cursor",
")",
"event",
".",
"accept",
"(",
")"
] | https://github.com/ninja-ide/ninja-ide/blob/87d91131bd19fdc3dcfd91eb97ad1e41c49c60c0/ninja_ide/gui/editor/editor.py#L477-L494 |
||
hirofumi0810/tensorflow_end2end_speech_recognition | 65b9728089d5e92b25b92384a67419d970399a64 | examples/timit/data/load_dataset_joint_ctc_attention.py | python | Dataset.__init__ | (self, data_type, label_type, batch_size, map_file_path,
max_epoch=None, splice=1,
num_stack=1, num_skip=1,
shuffle=False, sort_utt=False, sort_stop_epoch=None,
progressbar=False) | A class for loading dataset.
Args:
data_type (string): train or dev or test
label_type (string): phone39 or phone48 or phone61 or
character or character_capital_divide
batch_size (int): the size of mini-batch
map_file_path (string): path to the mapping file
max_epoch (int, optional): the max epoch. None means infinite loop.
splice (int, optional): frames to splice. Default is 1 frame.
num_stack (int, optional): the number of frames to stack
num_skip (int, optional): the number of frames to skip
shuffle (bool, optional): if True, shuffle utterances. This is
disabled when sort_utt is True.
sort_utt (bool, optional): if True, sort all utterances by the
number of frames and utteraces in each mini-batch are shuffled.
Otherwise, shuffle utteraces.
sort_stop_epoch (int, optional): After sort_stop_epoch, training
will revert back to a random order
progressbar (bool, optional): if True, visualize progressbar | A class for loading dataset.
Args:
data_type (string): train or dev or test
label_type (string): phone39 or phone48 or phone61 or
character or character_capital_divide
batch_size (int): the size of mini-batch
map_file_path (string): path to the mapping file
max_epoch (int, optional): the max epoch. None means infinite loop.
splice (int, optional): frames to splice. Default is 1 frame.
num_stack (int, optional): the number of frames to stack
num_skip (int, optional): the number of frames to skip
shuffle (bool, optional): if True, shuffle utterances. This is
disabled when sort_utt is True.
sort_utt (bool, optional): if True, sort all utterances by the
number of frames and utteraces in each mini-batch are shuffled.
Otherwise, shuffle utteraces.
sort_stop_epoch (int, optional): After sort_stop_epoch, training
will revert back to a random order
progressbar (bool, optional): if True, visualize progressbar | [
"A",
"class",
"for",
"loading",
"dataset",
".",
"Args",
":",
"data_type",
"(",
"string",
")",
":",
"train",
"or",
"dev",
"or",
"test",
"label_type",
"(",
"string",
")",
":",
"phone39",
"or",
"phone48",
"or",
"phone61",
"or",
"character",
"or",
"character_capital_divide",
"batch_size",
"(",
"int",
")",
":",
"the",
"size",
"of",
"mini",
"-",
"batch",
"map_file_path",
"(",
"string",
")",
":",
"path",
"to",
"the",
"mapping",
"file",
"max_epoch",
"(",
"int",
"optional",
")",
":",
"the",
"max",
"epoch",
".",
"None",
"means",
"infinite",
"loop",
".",
"splice",
"(",
"int",
"optional",
")",
":",
"frames",
"to",
"splice",
".",
"Default",
"is",
"1",
"frame",
".",
"num_stack",
"(",
"int",
"optional",
")",
":",
"the",
"number",
"of",
"frames",
"to",
"stack",
"num_skip",
"(",
"int",
"optional",
")",
":",
"the",
"number",
"of",
"frames",
"to",
"skip",
"shuffle",
"(",
"bool",
"optional",
")",
":",
"if",
"True",
"shuffle",
"utterances",
".",
"This",
"is",
"disabled",
"when",
"sort_utt",
"is",
"True",
".",
"sort_utt",
"(",
"bool",
"optional",
")",
":",
"if",
"True",
"sort",
"all",
"utterances",
"by",
"the",
"number",
"of",
"frames",
"and",
"utteraces",
"in",
"each",
"mini",
"-",
"batch",
"are",
"shuffled",
".",
"Otherwise",
"shuffle",
"utteraces",
".",
"sort_stop_epoch",
"(",
"int",
"optional",
")",
":",
"After",
"sort_stop_epoch",
"training",
"will",
"revert",
"back",
"to",
"a",
"random",
"order",
"progressbar",
"(",
"bool",
"optional",
")",
":",
"if",
"True",
"visualize",
"progressbar"
] | def __init__(self, data_type, label_type, batch_size, map_file_path,
max_epoch=None, splice=1,
num_stack=1, num_skip=1,
shuffle=False, sort_utt=False, sort_stop_epoch=None,
progressbar=False):
"""A class for loading dataset.
Args:
data_type (string): train or dev or test
label_type (string): phone39 or phone48 or phone61 or
character or character_capital_divide
batch_size (int): the size of mini-batch
map_file_path (string): path to the mapping file
max_epoch (int, optional): the max epoch. None means infinite loop.
splice (int, optional): frames to splice. Default is 1 frame.
num_stack (int, optional): the number of frames to stack
num_skip (int, optional): the number of frames to skip
shuffle (bool, optional): if True, shuffle utterances. This is
disabled when sort_utt is True.
sort_utt (bool, optional): if True, sort all utterances by the
number of frames and utteraces in each mini-batch are shuffled.
Otherwise, shuffle utteraces.
sort_stop_epoch (int, optional): After sort_stop_epoch, training
will revert back to a random order
progressbar (bool, optional): if True, visualize progressbar
"""
super(Dataset, self).__init__(map_file_path=map_file_path)
self.is_test = True if data_type == 'test' else False
self.data_type = data_type
self.label_type = label_type
self.batch_size = batch_size
self.max_epoch = max_epoch
self.splice = splice
self.num_stack = num_stack
self.num_skip = num_skip
self.shuffle = shuffle
self.sort_utt = sort_utt
self.sort_stop_epoch = sort_stop_epoch
self.progressbar = progressbar
self.num_gpu = 1
# paths where datasets exist
dataset_root = ['/data/inaguma/timit',
'/n/sd8/inaguma/corpus/timit/dataset']
input_path = join(dataset_root[0], 'inputs', data_type)
# NOTE: ex.) save_path: timit_dataset_path/inputs/data_type/***.npy
label_path = join(dataset_root[0], 'labels', data_type, label_type)
# NOTE: ex.) save_path:
# timit_dataset_path/labels/data_type/label_type/***.npy
# Load the frame number dictionary
if isfile(join(input_path, 'frame_num.pickle')):
with open(join(input_path, 'frame_num.pickle'), 'rb') as f:
self.frame_num_dict = pickle.load(f)
else:
dataset_root.pop(0)
input_path = join(dataset_root[0], 'inputs', data_type)
label_path = join(dataset_root[0], 'labels', data_type, label_type)
with open(join(input_path, 'frame_num.pickle'), 'rb') as f:
self.frame_num_dict = pickle.load(f)
# Sort paths to input & label
axis = 1 if sort_utt else 0
frame_num_tuple_sorted = sorted(self.frame_num_dict.items(),
key=lambda x: x[axis])
input_paths, label_paths = [], []
for input_name, frame_num in frame_num_tuple_sorted:
input_paths.append(join(input_path, input_name + '.npy'))
label_paths.append(join(label_path, input_name + '.npy'))
self.input_paths = np.array(input_paths)
self.label_paths = np.array(label_paths)
# NOTE: Not load dataset yet
self.rest = set(range(0, len(self.input_paths), 1)) | [
"def",
"__init__",
"(",
"self",
",",
"data_type",
",",
"label_type",
",",
"batch_size",
",",
"map_file_path",
",",
"max_epoch",
"=",
"None",
",",
"splice",
"=",
"1",
",",
"num_stack",
"=",
"1",
",",
"num_skip",
"=",
"1",
",",
"shuffle",
"=",
"False",
",",
"sort_utt",
"=",
"False",
",",
"sort_stop_epoch",
"=",
"None",
",",
"progressbar",
"=",
"False",
")",
":",
"super",
"(",
"Dataset",
",",
"self",
")",
".",
"__init__",
"(",
"map_file_path",
"=",
"map_file_path",
")",
"self",
".",
"is_test",
"=",
"True",
"if",
"data_type",
"==",
"'test'",
"else",
"False",
"self",
".",
"data_type",
"=",
"data_type",
"self",
".",
"label_type",
"=",
"label_type",
"self",
".",
"batch_size",
"=",
"batch_size",
"self",
".",
"max_epoch",
"=",
"max_epoch",
"self",
".",
"splice",
"=",
"splice",
"self",
".",
"num_stack",
"=",
"num_stack",
"self",
".",
"num_skip",
"=",
"num_skip",
"self",
".",
"shuffle",
"=",
"shuffle",
"self",
".",
"sort_utt",
"=",
"sort_utt",
"self",
".",
"sort_stop_epoch",
"=",
"sort_stop_epoch",
"self",
".",
"progressbar",
"=",
"progressbar",
"self",
".",
"num_gpu",
"=",
"1",
"# paths where datasets exist",
"dataset_root",
"=",
"[",
"'/data/inaguma/timit'",
",",
"'/n/sd8/inaguma/corpus/timit/dataset'",
"]",
"input_path",
"=",
"join",
"(",
"dataset_root",
"[",
"0",
"]",
",",
"'inputs'",
",",
"data_type",
")",
"# NOTE: ex.) save_path: timit_dataset_path/inputs/data_type/***.npy",
"label_path",
"=",
"join",
"(",
"dataset_root",
"[",
"0",
"]",
",",
"'labels'",
",",
"data_type",
",",
"label_type",
")",
"# NOTE: ex.) save_path:",
"# timit_dataset_path/labels/data_type/label_type/***.npy",
"# Load the frame number dictionary",
"if",
"isfile",
"(",
"join",
"(",
"input_path",
",",
"'frame_num.pickle'",
")",
")",
":",
"with",
"open",
"(",
"join",
"(",
"input_path",
",",
"'frame_num.pickle'",
")",
",",
"'rb'",
")",
"as",
"f",
":",
"self",
".",
"frame_num_dict",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"else",
":",
"dataset_root",
".",
"pop",
"(",
"0",
")",
"input_path",
"=",
"join",
"(",
"dataset_root",
"[",
"0",
"]",
",",
"'inputs'",
",",
"data_type",
")",
"label_path",
"=",
"join",
"(",
"dataset_root",
"[",
"0",
"]",
",",
"'labels'",
",",
"data_type",
",",
"label_type",
")",
"with",
"open",
"(",
"join",
"(",
"input_path",
",",
"'frame_num.pickle'",
")",
",",
"'rb'",
")",
"as",
"f",
":",
"self",
".",
"frame_num_dict",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"# Sort paths to input & label",
"axis",
"=",
"1",
"if",
"sort_utt",
"else",
"0",
"frame_num_tuple_sorted",
"=",
"sorted",
"(",
"self",
".",
"frame_num_dict",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"axis",
"]",
")",
"input_paths",
",",
"label_paths",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"input_name",
",",
"frame_num",
"in",
"frame_num_tuple_sorted",
":",
"input_paths",
".",
"append",
"(",
"join",
"(",
"input_path",
",",
"input_name",
"+",
"'.npy'",
")",
")",
"label_paths",
".",
"append",
"(",
"join",
"(",
"label_path",
",",
"input_name",
"+",
"'.npy'",
")",
")",
"self",
".",
"input_paths",
"=",
"np",
".",
"array",
"(",
"input_paths",
")",
"self",
".",
"label_paths",
"=",
"np",
".",
"array",
"(",
"label_paths",
")",
"# NOTE: Not load dataset yet",
"self",
".",
"rest",
"=",
"set",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"input_paths",
")",
",",
"1",
")",
")"
] | https://github.com/hirofumi0810/tensorflow_end2end_speech_recognition/blob/65b9728089d5e92b25b92384a67419d970399a64/examples/timit/data/load_dataset_joint_ctc_attention.py#L22-L97 |
||
akanazawa/hmr | f149abeb0a7e2a3412eb68274a94a9232f7cb667 | src/tf_smpl/batch_lbs.py | python | batch_skew | (vec, batch_size=None) | vec is N x 3, batch_size is int
returns N x 3 x 3. Skew_sym version of each matrix. | vec is N x 3, batch_size is int | [
"vec",
"is",
"N",
"x",
"3",
"batch_size",
"is",
"int"
] | def batch_skew(vec, batch_size=None):
"""
vec is N x 3, batch_size is int
returns N x 3 x 3. Skew_sym version of each matrix.
"""
with tf.name_scope("batch_skew", [vec]):
if batch_size is None:
batch_size = vec.shape.as_list()[0]
col_inds = tf.constant([1, 2, 3, 5, 6, 7])
indices = tf.reshape(
tf.reshape(tf.range(0, batch_size) * 9, [-1, 1]) + col_inds,
[-1, 1])
updates = tf.reshape(
tf.stack(
[
-vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],
vec[:, 0]
],
axis=1), [-1])
out_shape = [batch_size * 9]
res = tf.scatter_nd(indices, updates, out_shape)
res = tf.reshape(res, [batch_size, 3, 3])
return res | [
"def",
"batch_skew",
"(",
"vec",
",",
"batch_size",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"batch_skew\"",
",",
"[",
"vec",
"]",
")",
":",
"if",
"batch_size",
"is",
"None",
":",
"batch_size",
"=",
"vec",
".",
"shape",
".",
"as_list",
"(",
")",
"[",
"0",
"]",
"col_inds",
"=",
"tf",
".",
"constant",
"(",
"[",
"1",
",",
"2",
",",
"3",
",",
"5",
",",
"6",
",",
"7",
"]",
")",
"indices",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"reshape",
"(",
"tf",
".",
"range",
"(",
"0",
",",
"batch_size",
")",
"*",
"9",
",",
"[",
"-",
"1",
",",
"1",
"]",
")",
"+",
"col_inds",
",",
"[",
"-",
"1",
",",
"1",
"]",
")",
"updates",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"stack",
"(",
"[",
"-",
"vec",
"[",
":",
",",
"2",
"]",
",",
"vec",
"[",
":",
",",
"1",
"]",
",",
"vec",
"[",
":",
",",
"2",
"]",
",",
"-",
"vec",
"[",
":",
",",
"0",
"]",
",",
"-",
"vec",
"[",
":",
",",
"1",
"]",
",",
"vec",
"[",
":",
",",
"0",
"]",
"]",
",",
"axis",
"=",
"1",
")",
",",
"[",
"-",
"1",
"]",
")",
"out_shape",
"=",
"[",
"batch_size",
"*",
"9",
"]",
"res",
"=",
"tf",
".",
"scatter_nd",
"(",
"indices",
",",
"updates",
",",
"out_shape",
")",
"res",
"=",
"tf",
".",
"reshape",
"(",
"res",
",",
"[",
"batch_size",
",",
"3",
",",
"3",
"]",
")",
"return",
"res"
] | https://github.com/akanazawa/hmr/blob/f149abeb0a7e2a3412eb68274a94a9232f7cb667/src/tf_smpl/batch_lbs.py#L15-L39 |
||
jgagneastro/coffeegrindsize | 22661ebd21831dba4cf32bfc6ba59fe3d49f879c | App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/text.py | python | TextWithDash.draw | (self, renderer) | Draw the :class:`TextWithDash` object to the given *renderer*. | Draw the :class:`TextWithDash` object to the given *renderer*. | [
"Draw",
"the",
":",
"class",
":",
"TextWithDash",
"object",
"to",
"the",
"given",
"*",
"renderer",
"*",
"."
] | def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
self.stale = False | [
"def",
"draw",
"(",
"self",
",",
"renderer",
")",
":",
"self",
".",
"update_coords",
"(",
"renderer",
")",
"Text",
".",
"draw",
"(",
"self",
",",
"renderer",
")",
"if",
"self",
".",
"get_dashlength",
"(",
")",
">",
"0.0",
":",
"self",
".",
"dashline",
".",
"draw",
"(",
"renderer",
")",
"self",
".",
"stale",
"=",
"False"
] | https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/text.py#L1410-L1418 |
||
replit-archive/empythoned | 977ec10ced29a3541a4973dc2b59910805695752 | dist/lib/python2.7/decimal.py | python | Decimal._compare_check_nans | (self, other, context) | return 0 | Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN. | Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__. | [
"Version",
"of",
"_check_nans",
"used",
"for",
"the",
"signaling",
"comparisons",
"compare_signal",
"__le__",
"__lt__",
"__ge__",
"__gt__",
"."
] | def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0 | [
"def",
"_compare_check_nans",
"(",
"self",
",",
"other",
",",
"context",
")",
":",
"if",
"context",
"is",
"None",
":",
"context",
"=",
"getcontext",
"(",
")",
"if",
"self",
".",
"_is_special",
"or",
"other",
".",
"_is_special",
":",
"if",
"self",
".",
"is_snan",
"(",
")",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'comparison involving sNaN'",
",",
"self",
")",
"elif",
"other",
".",
"is_snan",
"(",
")",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'comparison involving sNaN'",
",",
"other",
")",
"elif",
"self",
".",
"is_qnan",
"(",
")",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'comparison involving NaN'",
",",
"self",
")",
"elif",
"other",
".",
"is_qnan",
"(",
")",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'comparison involving NaN'",
",",
"other",
")",
"return",
"0"
] | https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/decimal.py#L760-L791 |
|
roclark/sportsipy | c19f545d3376d62ded6304b137dc69238ac620a9 | sportsipy/ncaab/boxscore.py | python | Boxscore.home_points | (self) | return self._home_points | Returns an ``int`` of the number of points the home team scored. | Returns an ``int`` of the number of points the home team scored. | [
"Returns",
"an",
"int",
"of",
"the",
"number",
"of",
"points",
"the",
"home",
"team",
"scored",
"."
] | def home_points(self):
"""
Returns an ``int`` of the number of points the home team scored.
"""
return self._home_points | [
"def",
"home_points",
"(",
"self",
")",
":",
"return",
"self",
".",
"_home_points"
] | https://github.com/roclark/sportsipy/blob/c19f545d3376d62ded6304b137dc69238ac620a9/sportsipy/ncaab/boxscore.py#L1467-L1471 |
|
numba/numba | bf480b9e0da858a65508c2b17759a72ee6a44c51 | numba/np/npyfuncs.py | python | np_real_log_impl | (context, builder, sig, args) | return mathimpl.log_impl(context, builder, sig, args) | [] | def np_real_log_impl(context, builder, sig, args):
_check_arity_and_homogeneity(sig, args, 1)
return mathimpl.log_impl(context, builder, sig, args) | [
"def",
"np_real_log_impl",
"(",
"context",
",",
"builder",
",",
"sig",
",",
"args",
")",
":",
"_check_arity_and_homogeneity",
"(",
"sig",
",",
"args",
",",
"1",
")",
"return",
"mathimpl",
".",
"log_impl",
"(",
"context",
",",
"builder",
",",
"sig",
",",
"args",
")"
] | https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/np/npyfuncs.py#L626-L628 |
|||
jina-ai/jina | c77a492fcd5adba0fc3de5347bea83dd4e7d8087 | jina/peapods/pods/k8slib/kubernetes_tools.py | python | _patch_deployment_with_device_plugins | (yaml_content: str, params: Dict) | return json.dumps(deployment) | [] | def _patch_deployment_with_device_plugins(yaml_content: str, params: Dict):
import yaml
device_plugins = _create_device_plugins(params['device_plugins'])
deployment = yaml.safe_load(yaml_content)
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return json.dumps(deployment) | [
"def",
"_patch_deployment_with_device_plugins",
"(",
"yaml_content",
":",
"str",
",",
"params",
":",
"Dict",
")",
":",
"import",
"yaml",
"device_plugins",
"=",
"_create_device_plugins",
"(",
"params",
"[",
"'device_plugins'",
"]",
")",
"deployment",
"=",
"yaml",
".",
"safe_load",
"(",
"yaml_content",
")",
"deployment",
"[",
"'spec'",
"]",
"[",
"'template'",
"]",
"[",
"'spec'",
"]",
"[",
"'containers'",
"]",
"[",
"0",
"]",
"[",
"'resources'",
"]",
"=",
"device_plugins",
"return",
"json",
".",
"dumps",
"(",
"deployment",
")"
] | https://github.com/jina-ai/jina/blob/c77a492fcd5adba0fc3de5347bea83dd4e7d8087/jina/peapods/pods/k8slib/kubernetes_tools.py#L142-L151 |
|||
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/ecm/v20190719/models.py | python | SetSecurityGroupForLoadbalancersRequest.__init__ | (self) | r"""
:param LoadBalancerIds: 负载均衡实例ID数组
:type LoadBalancerIds: list of str
:param SecurityGroup: 安全组ID,如 esg-12345678
:type SecurityGroup: str
:param OperationType: ADD 绑定安全组;
DEL 解绑安全组
:type OperationType: str | r"""
:param LoadBalancerIds: 负载均衡实例ID数组
:type LoadBalancerIds: list of str
:param SecurityGroup: 安全组ID,如 esg-12345678
:type SecurityGroup: str
:param OperationType: ADD 绑定安全组;
DEL 解绑安全组
:type OperationType: str | [
"r",
":",
"param",
"LoadBalancerIds",
":",
"负载均衡实例ID数组",
":",
"type",
"LoadBalancerIds",
":",
"list",
"of",
"str",
":",
"param",
"SecurityGroup",
":",
"安全组ID,如",
"esg",
"-",
"12345678",
":",
"type",
"SecurityGroup",
":",
"str",
":",
"param",
"OperationType",
":",
"ADD",
"绑定安全组;",
"DEL",
"解绑安全组",
":",
"type",
"OperationType",
":",
"str"
] | def __init__(self):
r"""
:param LoadBalancerIds: 负载均衡实例ID数组
:type LoadBalancerIds: list of str
:param SecurityGroup: 安全组ID,如 esg-12345678
:type SecurityGroup: str
:param OperationType: ADD 绑定安全组;
DEL 解绑安全组
:type OperationType: str
"""
self.LoadBalancerIds = None
self.SecurityGroup = None
self.OperationType = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"LoadBalancerIds",
"=",
"None",
"self",
".",
"SecurityGroup",
"=",
"None",
"self",
".",
"OperationType",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/ecm/v20190719/models.py#L11399-L11411 |
||
omz/PythonistaAppTemplate | f560f93f8876d82a21d108977f90583df08d55af | PythonistaAppTemplate/PythonistaKit.framework/pylib/sysconfig.py | python | get_config_h_filename | () | return os.path.join(inc_dir, 'pyconfig.h') | Returns the path of pyconfig.h. | Returns the path of pyconfig.h. | [
"Returns",
"the",
"path",
"of",
"pyconfig",
".",
"h",
"."
] | def get_config_h_filename():
"""Returns the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h') | [
"def",
"get_config_h_filename",
"(",
")",
":",
"if",
"_PYTHON_BUILD",
":",
"if",
"os",
".",
"name",
"==",
"\"nt\"",
":",
"inc_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_PROJECT_BASE",
",",
"\"PC\"",
")",
"else",
":",
"inc_dir",
"=",
"_PROJECT_BASE",
"else",
":",
"inc_dir",
"=",
"get_path",
"(",
"'platinclude'",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"inc_dir",
",",
"'pyconfig.h'",
")"
] | https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/sysconfig.py#L401-L410 |
|
python-social-auth/social-core | 1ea27e8989657bb35dd37b6ee2e038e1358fbc96 | social_core/backends/weibo.py | python | WeiboOAuth2.get_uid | (self, access_token) | return data['uid'] | Return uid by access_token | Return uid by access_token | [
"Return",
"uid",
"by",
"access_token"
] | def get_uid(self, access_token):
"""Return uid by access_token"""
data = self.get_json(
'https://api.weibo.com/oauth2/get_token_info',
method='POST',
params={'access_token': access_token}
)
return data['uid'] | [
"def",
"get_uid",
"(",
"self",
",",
"access_token",
")",
":",
"data",
"=",
"self",
".",
"get_json",
"(",
"'https://api.weibo.com/oauth2/get_token_info'",
",",
"method",
"=",
"'POST'",
",",
"params",
"=",
"{",
"'access_token'",
":",
"access_token",
"}",
")",
"return",
"data",
"[",
"'uid'",
"]"
] | https://github.com/python-social-auth/social-core/blob/1ea27e8989657bb35dd37b6ee2e038e1358fbc96/social_core/backends/weibo.py#L41-L48 |
|
googleads/google-ads-python | 2a1d6062221f6aad1992a6bcca0e7e4a93d2db86 | google/ads/googleads/v9/services/services/language_constant_service/client.py | python | LanguageConstantServiceClient.get_language_constant | (
self,
request: Union[
language_constant_service.GetLanguageConstantRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) | return response | r"""Returns the requested language constant.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetLanguageConstantRequest, dict]):
The request object. Request message for
[LanguageConstantService.GetLanguageConstant][google.ads.googleads.v9.services.LanguageConstantService.GetLanguageConstant].
resource_name (:class:`str`):
Required. Resource name of the
language constant to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.LanguageConstant:
A language. | r"""Returns the requested language constant. | [
"r",
"Returns",
"the",
"requested",
"language",
"constant",
"."
] | def get_language_constant(
self,
request: Union[
language_constant_service.GetLanguageConstantRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> language_constant.LanguageConstant:
r"""Returns the requested language constant.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetLanguageConstantRequest, dict]):
The request object. Request message for
[LanguageConstantService.GetLanguageConstant][google.ads.googleads.v9.services.LanguageConstantService.GetLanguageConstant].
resource_name (:class:`str`):
Required. Resource name of the
language constant to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.LanguageConstant:
A language.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a language_constant_service.GetLanguageConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, language_constant_service.GetLanguageConstantRequest
):
request = language_constant_service.GetLanguageConstantRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_language_constant
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response | [
"def",
"get_language_constant",
"(",
"self",
",",
"request",
":",
"Union",
"[",
"language_constant_service",
".",
"GetLanguageConstantRequest",
",",
"dict",
"]",
"=",
"None",
",",
"*",
",",
"resource_name",
":",
"str",
"=",
"None",
",",
"retry",
":",
"OptionalRetry",
"=",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
":",
"float",
"=",
"None",
",",
"metadata",
":",
"Sequence",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"(",
")",
",",
")",
"->",
"language_constant",
".",
"LanguageConstant",
":",
"# Create or coerce a protobuf request object.",
"# Sanity check: If we got a request object, we should *not* have",
"# gotten any keyword arguments that map to the request.",
"if",
"request",
"is",
"not",
"None",
"and",
"any",
"(",
"[",
"resource_name",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"If the `request` argument is set, then none of \"",
"\"the individual field arguments should be set.\"",
")",
"# Minor optimization to avoid making a copy if the user passes",
"# in a language_constant_service.GetLanguageConstantRequest.",
"# There's no risk of modifying the input as we've already verified",
"# there are no flattened fields.",
"if",
"not",
"isinstance",
"(",
"request",
",",
"language_constant_service",
".",
"GetLanguageConstantRequest",
")",
":",
"request",
"=",
"language_constant_service",
".",
"GetLanguageConstantRequest",
"(",
"request",
")",
"# If we have keyword arguments corresponding to fields on the",
"# request, apply these.",
"if",
"resource_name",
"is",
"not",
"None",
":",
"request",
".",
"resource_name",
"=",
"resource_name",
"# Wrap the RPC method; this adds retry and timeout information,",
"# and friendly error handling.",
"rpc",
"=",
"self",
".",
"_transport",
".",
"_wrapped_methods",
"[",
"self",
".",
"_transport",
".",
"get_language_constant",
"]",
"# Certain fields should be provided within the metadata header;",
"# add these here.",
"metadata",
"=",
"tuple",
"(",
"metadata",
")",
"+",
"(",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"(",
"(",
"\"resource_name\"",
",",
"request",
".",
"resource_name",
")",
",",
")",
")",
",",
")",
"# Send the request.",
"response",
"=",
"rpc",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")",
"# Done; return the response.",
"return",
"response"
] | https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v9/services/services/language_constant_service/client.py#L375-L457 |
|
sacmehta/EdgeNets | 2b232d3f7fb60658755dad1ebca0ffc895cc795e | model/detection/ssd.py | python | SSD300.init_params | (self) | Function to initialze the parameters | Function to initialze the parameters | [
"Function",
"to",
"initialze",
"the",
"parameters"
] | def init_params(self):
'''
Function to initialze the parameters
'''
print_info_message('Initializaing Conv Layers with Xavier Unifrom')
# initializing matters a lot
# changing to Kaiming He's init functionaity, does not let the model to converge.
# probably because, smooth function struggles with that initialization.
# XAVIER Unifrom Rocks here
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight) | [
"def",
"init_params",
"(",
"self",
")",
":",
"print_info_message",
"(",
"'Initializaing Conv Layers with Xavier Unifrom'",
")",
"# initializing matters a lot",
"# changing to Kaiming He's init functionaity, does not let the model to converge.",
"# probably because, smooth function struggles with that initialization.",
"# XAVIER Unifrom Rocks here",
"for",
"m",
"in",
"self",
".",
"modules",
"(",
")",
":",
"if",
"isinstance",
"(",
"m",
",",
"nn",
".",
"Conv2d",
")",
":",
"nn",
".",
"init",
".",
"xavier_uniform_",
"(",
"m",
".",
"weight",
")"
] | https://github.com/sacmehta/EdgeNets/blob/2b232d3f7fb60658755dad1ebca0ffc895cc795e/model/detection/ssd.py#L47-L58 |
||
jelmer/xandikos | 3149a633c388a6f1dffbc6686763fca00f72e3bc | xandikos/scheduling.py | python | ScheduleInbox.get_max_date_time | (self) | Return maximum datetime property. | Return maximum datetime property. | [
"Return",
"maximum",
"datetime",
"property",
"."
] | def get_max_date_time(self):
"""Return maximum datetime property."""
raise NotImplementedError(self.get_max_date_time) | [
"def",
"get_max_date_time",
"(",
"self",
")",
":",
"raise",
"NotImplementedError",
"(",
"self",
".",
"get_max_date_time",
")"
] | https://github.com/jelmer/xandikos/blob/3149a633c388a6f1dffbc6686763fca00f72e3bc/xandikos/scheduling.py#L91-L93 |
||
networkx/networkx | 1620568e36702b1cfeaf1c0277b167b6cb93e48d | networkx/linalg/bethehessianmatrix.py | python | bethe_hessian_matrix | (G, r=None, nodelist=None) | return (r ** 2 - 1) * I - r * A + D | r"""Returns the Bethe Hessian matrix of G.
The Bethe Hessian is a family of matrices parametrized by r, defined as
H(r) = (r^2 - 1) I - r A + D where A is the adjacency matrix, D is the
diagonal matrix of node degrees, and I is the identify matrix. It is equal
to the graph laplacian when the regularizer r = 1.
The default choice of regularizer should be the ratio [2]
.. math::
r_m = \left(\sum k_i \right)^{-1}\left(\sum k_i^2 \right) - 1
Parameters
----------
G : Graph
A NetworkX graph
r : float
Regularizer parameter
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
Returns
-------
H : Numpy matrix
The Bethe Hessian matrix of G, with paramter r.
Examples
--------
>>> k = [3, 2, 2, 1, 0]
>>> G = nx.havel_hakimi_graph(k)
>>> H = nx.modularity_matrix(G)
See Also
--------
bethe_hessian_spectrum
to_numpy_array
adjacency_matrix
laplacian_matrix
References
----------
.. [1] A. Saade, F. Krzakala and L. Zdeborová
"Spectral clustering of graphs with the bethe hessian",
Advances in Neural Information Processing Systems. 2014.
.. [2] C. M. Lee, E. Levina
"Estimating the number of communities in networks by spectral methods"
arXiv:1507.00827, 2015. | r"""Returns the Bethe Hessian matrix of G. | [
"r",
"Returns",
"the",
"Bethe",
"Hessian",
"matrix",
"of",
"G",
"."
] | def bethe_hessian_matrix(G, r=None, nodelist=None):
r"""Returns the Bethe Hessian matrix of G.
The Bethe Hessian is a family of matrices parametrized by r, defined as
H(r) = (r^2 - 1) I - r A + D where A is the adjacency matrix, D is the
diagonal matrix of node degrees, and I is the identify matrix. It is equal
to the graph laplacian when the regularizer r = 1.
The default choice of regularizer should be the ratio [2]
.. math::
r_m = \left(\sum k_i \right)^{-1}\left(\sum k_i^2 \right) - 1
Parameters
----------
G : Graph
A NetworkX graph
r : float
Regularizer parameter
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
Returns
-------
H : Numpy matrix
The Bethe Hessian matrix of G, with paramter r.
Examples
--------
>>> k = [3, 2, 2, 1, 0]
>>> G = nx.havel_hakimi_graph(k)
>>> H = nx.modularity_matrix(G)
See Also
--------
bethe_hessian_spectrum
to_numpy_array
adjacency_matrix
laplacian_matrix
References
----------
.. [1] A. Saade, F. Krzakala and L. Zdeborová
"Spectral clustering of graphs with the bethe hessian",
Advances in Neural Information Processing Systems. 2014.
.. [2] C. M. Lee, E. Levina
"Estimating the number of communities in networks by spectral methods"
arXiv:1507.00827, 2015.
"""
import scipy as sp
import scipy.sparse # call as sp.sparse
if nodelist is None:
nodelist = list(G)
if r is None:
r = sum(d ** 2 for v, d in nx.degree(G)) / sum(d for v, d in nx.degree(G)) - 1
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, format="csr")
n, m = A.shape
diags = A.sum(axis=1)
D = sp.sparse.spdiags(diags.flatten(), [0], m, n, format="csr")
I = sp.sparse.eye(m, n, format="csr")
return (r ** 2 - 1) * I - r * A + D | [
"def",
"bethe_hessian_matrix",
"(",
"G",
",",
"r",
"=",
"None",
",",
"nodelist",
"=",
"None",
")",
":",
"import",
"scipy",
"as",
"sp",
"import",
"scipy",
".",
"sparse",
"# call as sp.sparse",
"if",
"nodelist",
"is",
"None",
":",
"nodelist",
"=",
"list",
"(",
"G",
")",
"if",
"r",
"is",
"None",
":",
"r",
"=",
"sum",
"(",
"d",
"**",
"2",
"for",
"v",
",",
"d",
"in",
"nx",
".",
"degree",
"(",
"G",
")",
")",
"/",
"sum",
"(",
"d",
"for",
"v",
",",
"d",
"in",
"nx",
".",
"degree",
"(",
"G",
")",
")",
"-",
"1",
"A",
"=",
"nx",
".",
"to_scipy_sparse_matrix",
"(",
"G",
",",
"nodelist",
"=",
"nodelist",
",",
"format",
"=",
"\"csr\"",
")",
"n",
",",
"m",
"=",
"A",
".",
"shape",
"diags",
"=",
"A",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"D",
"=",
"sp",
".",
"sparse",
".",
"spdiags",
"(",
"diags",
".",
"flatten",
"(",
")",
",",
"[",
"0",
"]",
",",
"m",
",",
"n",
",",
"format",
"=",
"\"csr\"",
")",
"I",
"=",
"sp",
".",
"sparse",
".",
"eye",
"(",
"m",
",",
"n",
",",
"format",
"=",
"\"csr\"",
")",
"return",
"(",
"r",
"**",
"2",
"-",
"1",
")",
"*",
"I",
"-",
"r",
"*",
"A",
"+",
"D"
] | https://github.com/networkx/networkx/blob/1620568e36702b1cfeaf1c0277b167b6cb93e48d/networkx/linalg/bethehessianmatrix.py#L10-L76 |