nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
sequence | function
stringlengths 18
4.83M
| function_tokens
sequence | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
google-research/rigl | f18abc7d82ae3acc6736068408a0186c9efa575c | rigl/experimental/jax/pruning/masked.py | python | _PerNeuronShuffle.__init__ | (self, init_rng, sparsity) | Creates the per-neuron shuffle class, with initial RNG state.
Args:
init_rng: The initial random number generator state to use.
sparsity: The per-layer sparsity of the mask (i.e. % of zeroes), 1.0 will
mask all weights, while 0 will mask none. | Creates the per-neuron shuffle class, with initial RNG state. | [
"Creates",
"the",
"per",
"-",
"neuron",
"shuffle",
"class",
"with",
"initial",
"RNG",
"state",
"."
] | def __init__(self, init_rng, sparsity):
"""Creates the per-neuron shuffle class, with initial RNG state.
Args:
init_rng: The initial random number generator state to use.
sparsity: The per-layer sparsity of the mask (i.e. % of zeroes), 1.0 will
mask all weights, while 0 will mask none.
"""
self._rng = init_rng
self._sparsity = sparsity | [
"def",
"__init__",
"(",
"self",
",",
"init_rng",
",",
"sparsity",
")",
":",
"self",
".",
"_rng",
"=",
"init_rng",
"self",
".",
"_sparsity",
"=",
"sparsity"
] | https://github.com/google-research/rigl/blob/f18abc7d82ae3acc6736068408a0186c9efa575c/rigl/experimental/jax/pruning/masked.py#L380-L389 |
||
facebookresearch/pytorch_GAN_zoo | b75dee40918caabb4fe7ec561522717bf096a8cb | models/trainer/DCGAN_trainer.py | python | DCGANTrainer.initModel | (self) | [] | def initModel(self):
self.model = DCGAN(useGPU=self.useGPU,
**vars(self.modelConfig)) | [
"def",
"initModel",
"(",
"self",
")",
":",
"self",
".",
"model",
"=",
"DCGAN",
"(",
"useGPU",
"=",
"self",
".",
"useGPU",
",",
"*",
"*",
"vars",
"(",
"self",
".",
"modelConfig",
")",
")"
] | https://github.com/facebookresearch/pytorch_GAN_zoo/blob/b75dee40918caabb4fe7ec561522717bf096a8cb/models/trainer/DCGAN_trainer.py#L33-L35 |
||||
biopython/biopython | 2dd97e71762af7b046d7f7f8a4f1e38db6b06c86 | Bio/SearchIO/HmmerIO/hmmer3_tab.py | python | Hmmer3TabWriter.write_file | (self, qresults) | return qresult_counter, hit_counter, hsp_counter, frag_counter | Write to the handle.
Returns a tuple of how many QueryResult, Hit, and HSP objects were written. | Write to the handle. | [
"Write",
"to",
"the",
"handle",
"."
] | def write_file(self, qresults):
"""Write to the handle.
Returns a tuple of how many QueryResult, Hit, and HSP objects were written.
"""
handle = self.handle
qresult_counter, hit_counter, hsp_counter, frag_counter = 0, 0, 0, 0
try:
first_qresult = next(qresults)
except StopIteration:
handle.write(self._build_header())
else:
# write header
handle.write(self._build_header(first_qresult))
# and then the qresults
for qresult in chain([first_qresult], qresults):
if qresult:
handle.write(self._build_row(qresult))
qresult_counter += 1
hit_counter += len(qresult)
hsp_counter += sum(len(hit) for hit in qresult)
frag_counter += sum(len(hit.fragments) for hit in qresult)
return qresult_counter, hit_counter, hsp_counter, frag_counter | [
"def",
"write_file",
"(",
"self",
",",
"qresults",
")",
":",
"handle",
"=",
"self",
".",
"handle",
"qresult_counter",
",",
"hit_counter",
",",
"hsp_counter",
",",
"frag_counter",
"=",
"0",
",",
"0",
",",
"0",
",",
"0",
"try",
":",
"first_qresult",
"=",
"next",
"(",
"qresults",
")",
"except",
"StopIteration",
":",
"handle",
".",
"write",
"(",
"self",
".",
"_build_header",
"(",
")",
")",
"else",
":",
"# write header",
"handle",
".",
"write",
"(",
"self",
".",
"_build_header",
"(",
"first_qresult",
")",
")",
"# and then the qresults",
"for",
"qresult",
"in",
"chain",
"(",
"[",
"first_qresult",
"]",
",",
"qresults",
")",
":",
"if",
"qresult",
":",
"handle",
".",
"write",
"(",
"self",
".",
"_build_row",
"(",
"qresult",
")",
")",
"qresult_counter",
"+=",
"1",
"hit_counter",
"+=",
"len",
"(",
"qresult",
")",
"hsp_counter",
"+=",
"sum",
"(",
"len",
"(",
"hit",
")",
"for",
"hit",
"in",
"qresult",
")",
"frag_counter",
"+=",
"sum",
"(",
"len",
"(",
"hit",
".",
"fragments",
")",
"for",
"hit",
"in",
"qresult",
")",
"return",
"qresult_counter",
",",
"hit_counter",
",",
"hsp_counter",
",",
"frag_counter"
] | https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/SearchIO/HmmerIO/hmmer3_tab.py#L222-L247 |
|
nansencenter/nansat | 5700ec673fbf522c19b8dedcb01cc15f7cd29a6a | nansat/vrt.py | python | VRT.fix_global_metadata | (self, rm_metadata) | Remove unwanted global metadata and escape special characters | Remove unwanted global metadata and escape special characters | [
"Remove",
"unwanted",
"global",
"metadata",
"and",
"escape",
"special",
"characters"
] | def fix_global_metadata(self, rm_metadata):
"""Remove unwanted global metadata and escape special characters"""
metadata = remove_keys(self.dataset.GetMetadata(), rm_metadata)
# Apply escaping to metadata strings to preserve special characters (in XML/HTML format)
metadata_escaped = {}
for key, val in list(metadata.items()):
# Keys not escaped - this may be changed if needed...
metadata_escaped[key] = gdal.EscapeString(val, gdal.CPLES_XML)
self.dataset.SetMetadata(metadata_escaped)
self.dataset.FlushCache() | [
"def",
"fix_global_metadata",
"(",
"self",
",",
"rm_metadata",
")",
":",
"metadata",
"=",
"remove_keys",
"(",
"self",
".",
"dataset",
".",
"GetMetadata",
"(",
")",
",",
"rm_metadata",
")",
"# Apply escaping to metadata strings to preserve special characters (in XML/HTML format)",
"metadata_escaped",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"list",
"(",
"metadata",
".",
"items",
"(",
")",
")",
":",
"# Keys not escaped - this may be changed if needed...",
"metadata_escaped",
"[",
"key",
"]",
"=",
"gdal",
".",
"EscapeString",
"(",
"val",
",",
"gdal",
".",
"CPLES_XML",
")",
"self",
".",
"dataset",
".",
"SetMetadata",
"(",
"metadata_escaped",
")",
"self",
".",
"dataset",
".",
"FlushCache",
"(",
")"
] | https://github.com/nansencenter/nansat/blob/5700ec673fbf522c19b8dedcb01cc15f7cd29a6a/nansat/vrt.py#L801-L810 |
||
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.py | python | Infinity.__eq__ | (self, other) | return isinstance(other, self.__class__) | [] | def __eq__(self, other):
return isinstance(other, self.__class__) | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"return",
"isinstance",
"(",
"other",
",",
"self",
".",
"__class__",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.py#L21-L22 |
|||
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/tci/v20190318/models.py | python | ModifyLibraryResponse.__init__ | (self) | r"""
:param LibraryId: 人员库唯一标识符
:type LibraryId: str
:param LibraryName: 人员库名称
:type LibraryName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str | r"""
:param LibraryId: 人员库唯一标识符
:type LibraryId: str
:param LibraryName: 人员库名称
:type LibraryName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str | [
"r",
":",
"param",
"LibraryId",
":",
"人员库唯一标识符",
":",
"type",
"LibraryId",
":",
"str",
":",
"param",
"LibraryName",
":",
"人员库名称",
":",
"type",
"LibraryName",
":",
"str",
":",
"param",
"RequestId",
":",
"唯一请求",
"ID,每次请求都会返回。定位问题时需要提供该次请求的",
"RequestId。",
":",
"type",
"RequestId",
":",
"str"
] | def __init__(self):
r"""
:param LibraryId: 人员库唯一标识符
:type LibraryId: str
:param LibraryName: 人员库名称
:type LibraryName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LibraryId = None
self.LibraryName = None
self.RequestId = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"LibraryId",
"=",
"None",
"self",
".",
"LibraryName",
"=",
"None",
"self",
".",
"RequestId",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/tci/v20190318/models.py#L3152-L3163 |
||
phonopy/phonopy | 816586d0ba8177482ecf40e52f20cbdee2260d51 | phonopy/api_phonopy.py | python | Phonopy._shape_supercell_matrix | (self, smat) | return shape_supercell_matrix(smat) | [] | def _shape_supercell_matrix(self, smat):
return shape_supercell_matrix(smat) | [
"def",
"_shape_supercell_matrix",
"(",
"self",
",",
"smat",
")",
":",
"return",
"shape_supercell_matrix",
"(",
"smat",
")"
] | https://github.com/phonopy/phonopy/blob/816586d0ba8177482ecf40e52f20cbdee2260d51/phonopy/api_phonopy.py#L3543-L3544 |
|||
openshift/openshift-tools | 1188778e728a6e4781acf728123e5b356380fe6f | openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_adm_policy_group.py | python | SecurityContextConstraints.groups | (self) | return self._groups | groups property getter | groups property getter | [
"groups",
"property",
"getter"
] | def groups(self):
''' groups property getter '''
if self._groups is None:
self._groups = self.get_groups()
return self._groups | [
"def",
"groups",
"(",
"self",
")",
":",
"if",
"self",
".",
"_groups",
"is",
"None",
":",
"self",
".",
"_groups",
"=",
"self",
".",
"get_groups",
"(",
")",
"return",
"self",
".",
"_groups"
] | https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_adm_policy_group.py#L1872-L1876 |
|
cournape/Bento | 37de23d784407a7c98a4a15770ffc570d5f32d70 | bento/private/version.py | python | NormalizedVersion.__eq__ | (self, other) | return self.parts == other.parts | [] | def __eq__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts == other.parts | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"NormalizedVersion",
")",
":",
"self",
".",
"_cannot_compare",
"(",
"other",
")",
"return",
"self",
".",
"parts",
"==",
"other",
".",
"parts"
] | https://github.com/cournape/Bento/blob/37de23d784407a7c98a4a15770ffc570d5f32d70/bento/private/version.py#L197-L200 |
|||
david8862/keras-YOLOv3-model-set | e9f0f94109430973525219e66eeafe8a2f51363d | common/backbones/shufflenet.py | python | ShuffleNet | (include_top=True,
input_tensor=None,
scale_factor=1.0,
pooling=None,
input_shape=None,
groups=1,
weights='imagenet',
num_shuffle_units=[3, 7, 3],
bottleneck_ratio=0.25,
classes=1000,
**kwargs) | return model | ShuffleNet implementation for Keras 2
ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun
https://arxiv.org/pdf/1707.01083.pdf
Note that only TensorFlow is supported for now, therefore it only works
with the data format `image_data_format='channels_last'` in your Keras
config at `~/.keras/keras.json`.
Parameters
----------
include_top: bool(True)
whether to include the fully-connected layer at the top of the network.
input_tensor:
optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
scale_factor:
scales the number of output channels
input_shape:
pooling:
Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
groups: int
number of groups per channel
num_shuffle_units: list([3,7,3])
number of stages (list length) and the number of shufflenet units in a
stage beginning with stage 2 because stage 1 is fixed
e.g. idx 0 contains 3 + 1 (first shuffle unit in each stage differs) shufflenet units for stage 2
idx 1 contains 7 + 1 Shufflenet Units for stage 3 and
idx 2 contains 3 + 1 Shufflenet Units
bottleneck_ratio:
bottleneck ratio implies the ratio of bottleneck channels to output channels.
For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
the width of the bottleneck feature map.
classes: int(1000)
number of classes to predict
Returns
-------
A Keras model instance
References
----------
- [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices]
(http://www.arxiv.org/pdf/1707.01083.pdf) | ShuffleNet implementation for Keras 2
ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun
https://arxiv.org/pdf/1707.01083.pdf
Note that only TensorFlow is supported for now, therefore it only works
with the data format `image_data_format='channels_last'` in your Keras
config at `~/.keras/keras.json`.
Parameters
----------
include_top: bool(True)
whether to include the fully-connected layer at the top of the network.
input_tensor:
optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
scale_factor:
scales the number of output channels
input_shape:
pooling:
Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
groups: int
number of groups per channel
num_shuffle_units: list([3,7,3])
number of stages (list length) and the number of shufflenet units in a
stage beginning with stage 2 because stage 1 is fixed
e.g. idx 0 contains 3 + 1 (first shuffle unit in each stage differs) shufflenet units for stage 2
idx 1 contains 7 + 1 Shufflenet Units for stage 3 and
idx 2 contains 3 + 1 Shufflenet Units
bottleneck_ratio:
bottleneck ratio implies the ratio of bottleneck channels to output channels.
For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
the width of the bottleneck feature map.
classes: int(1000)
number of classes to predict
Returns
-------
A Keras model instance
References
----------
- [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices]
(http://www.arxiv.org/pdf/1707.01083.pdf) | [
"ShuffleNet",
"implementation",
"for",
"Keras",
"2",
"ShuffleNet",
":",
"An",
"Extremely",
"Efficient",
"Convolutional",
"Neural",
"Network",
"for",
"Mobile",
"Devices",
"Xiangyu",
"Zhang",
"Xinyu",
"Zhou",
"Mengxiao",
"Lin",
"Jian",
"Sun",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1707",
".",
"01083",
".",
"pdf",
"Note",
"that",
"only",
"TensorFlow",
"is",
"supported",
"for",
"now",
"therefore",
"it",
"only",
"works",
"with",
"the",
"data",
"format",
"image_data_format",
"=",
"channels_last",
"in",
"your",
"Keras",
"config",
"at",
"~",
"/",
".",
"keras",
"/",
"keras",
".",
"json",
".",
"Parameters",
"----------",
"include_top",
":",
"bool",
"(",
"True",
")",
"whether",
"to",
"include",
"the",
"fully",
"-",
"connected",
"layer",
"at",
"the",
"top",
"of",
"the",
"network",
".",
"input_tensor",
":",
"optional",
"Keras",
"tensor",
"(",
"i",
".",
"e",
".",
"output",
"of",
"layers",
".",
"Input",
"()",
")",
"to",
"use",
"as",
"image",
"input",
"for",
"the",
"model",
".",
"scale_factor",
":",
"scales",
"the",
"number",
"of",
"output",
"channels",
"input_shape",
":",
"pooling",
":",
"Optional",
"pooling",
"mode",
"for",
"feature",
"extraction",
"when",
"include_top",
"is",
"False",
".",
"-",
"None",
"means",
"that",
"the",
"output",
"of",
"the",
"model",
"will",
"be",
"the",
"4D",
"tensor",
"output",
"of",
"the",
"last",
"convolutional",
"layer",
".",
"-",
"avg",
"means",
"that",
"global",
"average",
"pooling",
"will",
"be",
"applied",
"to",
"the",
"output",
"of",
"the",
"last",
"convolutional",
"layer",
"and",
"thus",
"the",
"output",
"of",
"the",
"model",
"will",
"be",
"a",
"2D",
"tensor",
".",
"-",
"max",
"means",
"that",
"global",
"max",
"pooling",
"will",
"be",
"applied",
".",
"groups",
":",
"int",
"number",
"of",
"groups",
"per",
"channel",
"num_shuffle_units",
":",
"list",
"(",
"[",
"3",
"7",
"3",
"]",
")",
"number",
"of",
"stages",
"(",
"list",
"length",
")",
"and",
"the",
"number",
"of",
"shufflenet",
"units",
"in",
"a",
"stage",
"beginning",
"with",
"stage",
"2",
"because",
"stage",
"1",
"is",
"fixed",
"e",
".",
"g",
".",
"idx",
"0",
"contains",
"3",
"+",
"1",
"(",
"first",
"shuffle",
"unit",
"in",
"each",
"stage",
"differs",
")",
"shufflenet",
"units",
"for",
"stage",
"2",
"idx",
"1",
"contains",
"7",
"+",
"1",
"Shufflenet",
"Units",
"for",
"stage",
"3",
"and",
"idx",
"2",
"contains",
"3",
"+",
"1",
"Shufflenet",
"Units",
"bottleneck_ratio",
":",
"bottleneck",
"ratio",
"implies",
"the",
"ratio",
"of",
"bottleneck",
"channels",
"to",
"output",
"channels",
".",
"For",
"example",
"bottleneck",
"ratio",
"=",
"1",
":",
"4",
"means",
"the",
"output",
"feature",
"map",
"is",
"4",
"times",
"the",
"width",
"of",
"the",
"bottleneck",
"feature",
"map",
".",
"classes",
":",
"int",
"(",
"1000",
")",
"number",
"of",
"classes",
"to",
"predict",
"Returns",
"-------",
"A",
"Keras",
"model",
"instance",
"References",
"----------",
"-",
"[",
"ShuffleNet",
":",
"An",
"Extremely",
"Efficient",
"Convolutional",
"Neural",
"Network",
"for",
"Mobile",
"Devices",
"]",
"(",
"http",
":",
"//",
"www",
".",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1707",
".",
"01083",
".",
"pdf",
")"
] | def ShuffleNet(include_top=True,
input_tensor=None,
scale_factor=1.0,
pooling=None,
input_shape=None,
groups=1,
weights='imagenet',
num_shuffle_units=[3, 7, 3],
bottleneck_ratio=0.25,
classes=1000,
**kwargs):
"""
ShuffleNet implementation for Keras 2
ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun
https://arxiv.org/pdf/1707.01083.pdf
Note that only TensorFlow is supported for now, therefore it only works
with the data format `image_data_format='channels_last'` in your Keras
config at `~/.keras/keras.json`.
Parameters
----------
include_top: bool(True)
whether to include the fully-connected layer at the top of the network.
input_tensor:
optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
scale_factor:
scales the number of output channels
input_shape:
pooling:
Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
groups: int
number of groups per channel
num_shuffle_units: list([3,7,3])
number of stages (list length) and the number of shufflenet units in a
stage beginning with stage 2 because stage 1 is fixed
e.g. idx 0 contains 3 + 1 (first shuffle unit in each stage differs) shufflenet units for stage 2
idx 1 contains 7 + 1 Shufflenet Units for stage 3 and
idx 2 contains 3 + 1 Shufflenet Units
bottleneck_ratio:
bottleneck ratio implies the ratio of bottleneck channels to output channels.
For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
the width of the bottleneck feature map.
classes: int(1000)
number of classes to predict
Returns
-------
A Keras model instance
References
----------
- [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices]
(http://www.arxiv.org/pdf/1707.01083.pdf)
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only TensorFlow backend is currently supported, '
'as other backends do not support ')
name = "ShuffleNet_%.2gX_g%d_br_%.2g_%s" % (scale_factor, groups, bottleneck_ratio, "".join([str(x) for x in num_shuffle_units]))
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=28,
require_flatten=include_top,
data_format=K.image_data_format())
out_dim_stage_two = {1: 144, 2: 200, 3: 240, 4: 272, 8: 384}
if groups not in out_dim_stage_two:
raise ValueError("Invalid number of groups.")
if pooling not in ['max','avg', None]:
raise ValueError("Invalid value for pooling.")
if not (float(scale_factor) * 4).is_integer():
raise ValueError("Invalid value for scale_factor. Should be x over 4.")
exp = np.insert(np.arange(0, len(num_shuffle_units), dtype=np.float32), 0, 0)
out_channels_in_stage = 2 ** exp
out_channels_in_stage *= out_dim_stage_two[groups] # calculate output channels for each stage
out_channels_in_stage[0] = 24 # first stage has always 24 output channels
out_channels_in_stage *= scale_factor
out_channels_in_stage = out_channels_in_stage.astype(int)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
#if not K.is_keras_tensor(input_tensor):
#img_input = Input(tensor=input_tensor, shape=input_shape)
#else:
#img_input = input_tensor
img_input = input_tensor
# create shufflenet architecture
x = YoloConv2D(filters=out_channels_in_stage[0], kernel_size=(3, 3), padding='same',
use_bias=False, strides=(2, 2), activation="relu", name="conv1")(img_input)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same', name="maxpool1")(x)
# create stages containing shufflenet units beginning at stage 2
for stage in range(0, len(num_shuffle_units)):
repeat = num_shuffle_units[stage]
x = _block(x, out_channels_in_stage, repeat=repeat,
bottleneck_ratio=bottleneck_ratio,
groups=groups, stage=stage + 2)
if include_top:
#x = Dense(units=classes, name="fc")(x)
#x = Activation('softmax', name='softmax')(x)
x = GlobalAveragePooling2D(name='global_avg_pool')(x)
x = Dense(units=classes, activation='softmax',
use_bias=True, name='Logits')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D(name='global_avg_pool')(x)
elif pooling == 'max':
x = GlobalMaxPooling2D(name='global_max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs=inputs, outputs=x, name=name)
# Load weights.
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_first" format '
'are not available.')
if include_top:
model_name = ('shufflenet_weights_tf_dim_ordering_tf_kernels_' +
str(alpha) + '_' + str(rows) + '.h5')
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(
model_name, weigh_path, cache_subdir='models')
else:
model_name = ('shufflenet_weights_tf_dim_ordering_tf_kernels_' +
str(alpha) + '_' + str(rows) + '_no_top' + '.h5')
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(
model_name, weigh_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model | [
"def",
"ShuffleNet",
"(",
"include_top",
"=",
"True",
",",
"input_tensor",
"=",
"None",
",",
"scale_factor",
"=",
"1.0",
",",
"pooling",
"=",
"None",
",",
"input_shape",
"=",
"None",
",",
"groups",
"=",
"1",
",",
"weights",
"=",
"'imagenet'",
",",
"num_shuffle_units",
"=",
"[",
"3",
",",
"7",
",",
"3",
"]",
",",
"bottleneck_ratio",
"=",
"0.25",
",",
"classes",
"=",
"1000",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"K",
".",
"backend",
"(",
")",
"!=",
"'tensorflow'",
":",
"raise",
"RuntimeError",
"(",
"'Only TensorFlow backend is currently supported, '",
"'as other backends do not support '",
")",
"name",
"=",
"\"ShuffleNet_%.2gX_g%d_br_%.2g_%s\"",
"%",
"(",
"scale_factor",
",",
"groups",
",",
"bottleneck_ratio",
",",
"\"\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"num_shuffle_units",
"]",
")",
")",
"input_shape",
"=",
"_obtain_input_shape",
"(",
"input_shape",
",",
"default_size",
"=",
"224",
",",
"min_size",
"=",
"28",
",",
"require_flatten",
"=",
"include_top",
",",
"data_format",
"=",
"K",
".",
"image_data_format",
"(",
")",
")",
"out_dim_stage_two",
"=",
"{",
"1",
":",
"144",
",",
"2",
":",
"200",
",",
"3",
":",
"240",
",",
"4",
":",
"272",
",",
"8",
":",
"384",
"}",
"if",
"groups",
"not",
"in",
"out_dim_stage_two",
":",
"raise",
"ValueError",
"(",
"\"Invalid number of groups.\"",
")",
"if",
"pooling",
"not",
"in",
"[",
"'max'",
",",
"'avg'",
",",
"None",
"]",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for pooling.\"",
")",
"if",
"not",
"(",
"float",
"(",
"scale_factor",
")",
"*",
"4",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for scale_factor. Should be x over 4.\"",
")",
"exp",
"=",
"np",
".",
"insert",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"len",
"(",
"num_shuffle_units",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
",",
"0",
",",
"0",
")",
"out_channels_in_stage",
"=",
"2",
"**",
"exp",
"out_channels_in_stage",
"*=",
"out_dim_stage_two",
"[",
"groups",
"]",
"# calculate output channels for each stage",
"out_channels_in_stage",
"[",
"0",
"]",
"=",
"24",
"# first stage has always 24 output channels",
"out_channels_in_stage",
"*=",
"scale_factor",
"out_channels_in_stage",
"=",
"out_channels_in_stage",
".",
"astype",
"(",
"int",
")",
"if",
"input_tensor",
"is",
"None",
":",
"img_input",
"=",
"Input",
"(",
"shape",
"=",
"input_shape",
")",
"else",
":",
"#if not K.is_keras_tensor(input_tensor):",
"#img_input = Input(tensor=input_tensor, shape=input_shape)",
"#else:",
"#img_input = input_tensor",
"img_input",
"=",
"input_tensor",
"# create shufflenet architecture",
"x",
"=",
"YoloConv2D",
"(",
"filters",
"=",
"out_channels_in_stage",
"[",
"0",
"]",
",",
"kernel_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"padding",
"=",
"'same'",
",",
"use_bias",
"=",
"False",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"activation",
"=",
"\"relu\"",
",",
"name",
"=",
"\"conv1\"",
")",
"(",
"img_input",
")",
"x",
"=",
"MaxPooling2D",
"(",
"pool_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"'same'",
",",
"name",
"=",
"\"maxpool1\"",
")",
"(",
"x",
")",
"# create stages containing shufflenet units beginning at stage 2",
"for",
"stage",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"num_shuffle_units",
")",
")",
":",
"repeat",
"=",
"num_shuffle_units",
"[",
"stage",
"]",
"x",
"=",
"_block",
"(",
"x",
",",
"out_channels_in_stage",
",",
"repeat",
"=",
"repeat",
",",
"bottleneck_ratio",
"=",
"bottleneck_ratio",
",",
"groups",
"=",
"groups",
",",
"stage",
"=",
"stage",
"+",
"2",
")",
"if",
"include_top",
":",
"#x = Dense(units=classes, name=\"fc\")(x)",
"#x = Activation('softmax', name='softmax')(x)",
"x",
"=",
"GlobalAveragePooling2D",
"(",
"name",
"=",
"'global_avg_pool'",
")",
"(",
"x",
")",
"x",
"=",
"Dense",
"(",
"units",
"=",
"classes",
",",
"activation",
"=",
"'softmax'",
",",
"use_bias",
"=",
"True",
",",
"name",
"=",
"'Logits'",
")",
"(",
"x",
")",
"else",
":",
"if",
"pooling",
"==",
"'avg'",
":",
"x",
"=",
"GlobalAveragePooling2D",
"(",
"name",
"=",
"'global_avg_pool'",
")",
"(",
"x",
")",
"elif",
"pooling",
"==",
"'max'",
":",
"x",
"=",
"GlobalMaxPooling2D",
"(",
"name",
"=",
"'global_max_pool'",
")",
"(",
"x",
")",
"# Ensure that the model takes into account",
"# any potential predecessors of `input_tensor`.",
"if",
"input_tensor",
"is",
"not",
"None",
":",
"inputs",
"=",
"get_source_inputs",
"(",
"input_tensor",
")",
"else",
":",
"inputs",
"=",
"img_input",
"# Create model.",
"model",
"=",
"Model",
"(",
"inputs",
"=",
"inputs",
",",
"outputs",
"=",
"x",
",",
"name",
"=",
"name",
")",
"# Load weights.",
"if",
"weights",
"==",
"'imagenet'",
":",
"if",
"K",
".",
"image_data_format",
"(",
")",
"==",
"'channels_first'",
":",
"raise",
"ValueError",
"(",
"'Weights for \"channels_first\" format '",
"'are not available.'",
")",
"if",
"include_top",
":",
"model_name",
"=",
"(",
"'shufflenet_weights_tf_dim_ordering_tf_kernels_'",
"+",
"str",
"(",
"alpha",
")",
"+",
"'_'",
"+",
"str",
"(",
"rows",
")",
"+",
"'.h5'",
")",
"weigh_path",
"=",
"BASE_WEIGHT_PATH",
"+",
"model_name",
"weights_path",
"=",
"get_file",
"(",
"model_name",
",",
"weigh_path",
",",
"cache_subdir",
"=",
"'models'",
")",
"else",
":",
"model_name",
"=",
"(",
"'shufflenet_weights_tf_dim_ordering_tf_kernels_'",
"+",
"str",
"(",
"alpha",
")",
"+",
"'_'",
"+",
"str",
"(",
"rows",
")",
"+",
"'_no_top'",
"+",
"'.h5'",
")",
"weigh_path",
"=",
"BASE_WEIGHT_PATH",
"+",
"model_name",
"weights_path",
"=",
"get_file",
"(",
"model_name",
",",
"weigh_path",
",",
"cache_subdir",
"=",
"'models'",
")",
"model",
".",
"load_weights",
"(",
"weights_path",
")",
"elif",
"weights",
"is",
"not",
"None",
":",
"model",
".",
"load_weights",
"(",
"weights",
")",
"return",
"model"
] | https://github.com/david8862/keras-YOLOv3-model-set/blob/e9f0f94109430973525219e66eeafe8a2f51363d/common/backbones/shufflenet.py#L23-L181 |
|
tp4a/teleport | 1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad | server/www/packages/packages-darwin/x64/PIL/ImageDraw.py | python | _color_diff | (rgb1, rgb2) | return abs(rgb1[0]-rgb2[0]) + abs(rgb1[1]-rgb2[1]) + abs(rgb1[2]-rgb2[2]) | Uses 1-norm distance to calculate difference between two rgb values. | Uses 1-norm distance to calculate difference between two rgb values. | [
"Uses",
"1",
"-",
"norm",
"distance",
"to",
"calculate",
"difference",
"between",
"two",
"rgb",
"values",
"."
] | def _color_diff(rgb1, rgb2):
"""
Uses 1-norm distance to calculate difference between two rgb values.
"""
return abs(rgb1[0]-rgb2[0]) + abs(rgb1[1]-rgb2[1]) + abs(rgb1[2]-rgb2[2]) | [
"def",
"_color_diff",
"(",
"rgb1",
",",
"rgb2",
")",
":",
"return",
"abs",
"(",
"rgb1",
"[",
"0",
"]",
"-",
"rgb2",
"[",
"0",
"]",
")",
"+",
"abs",
"(",
"rgb1",
"[",
"1",
"]",
"-",
"rgb2",
"[",
"1",
"]",
")",
"+",
"abs",
"(",
"rgb1",
"[",
"2",
"]",
"-",
"rgb2",
"[",
"2",
"]",
")"
] | https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/PIL/ImageDraw.py#L383-L387 |
|
intel/IntelSEAPI | 7997a782fd3fa5621e275bd31060f9795564e6ca | runtool/exporters/DGML.py | python | DGML.get_targets | (self) | return [self.args.output + ".dgml"] | [] | def get_targets(self):
return [self.args.output + ".dgml"] | [
"def",
"get_targets",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"args",
".",
"output",
"+",
"\".dgml\"",
"]"
] | https://github.com/intel/IntelSEAPI/blob/7997a782fd3fa5621e275bd31060f9795564e6ca/runtool/exporters/DGML.py#L15-L16 |
|||
pwnieexpress/pwn_plug_sources | 1a23324f5dc2c3de20f9c810269b6a29b2758cad | src/wifitap/scapy.py | python | PacketListField.do_copy | (self, x) | return map(lambda p:p.copy(), x) | [] | def do_copy(self, x):
return map(lambda p:p.copy(), x) | [
"def",
"do_copy",
"(",
"self",
",",
"x",
")",
":",
"return",
"map",
"(",
"lambda",
"p",
":",
"p",
".",
"copy",
"(",
")",
",",
"x",
")"
] | https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/wifitap/scapy.py#L4029-L4030 |
|||
boto/boto | b2a6f08122b2f1b89888d2848e730893595cd001 | boto/gs/bucket.py | python | Bucket.set_def_xml_acl | (self, acl_str, headers=None) | return self.set_xml_acl(acl_str, '', headers,
query_args=DEF_OBJ_ACL) | Sets a bucket's default ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request. | Sets a bucket's default ACL to an XML string. | [
"Sets",
"a",
"bucket",
"s",
"default",
"ACL",
"to",
"an",
"XML",
"string",
"."
] | def set_def_xml_acl(self, acl_str, headers=None):
"""Sets a bucket's default ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
return self.set_xml_acl(acl_str, '', headers,
query_args=DEF_OBJ_ACL) | [
"def",
"set_def_xml_acl",
"(",
"self",
",",
"acl_str",
",",
"headers",
"=",
"None",
")",
":",
"return",
"self",
".",
"set_xml_acl",
"(",
"acl_str",
",",
"''",
",",
"headers",
",",
"query_args",
"=",
"DEF_OBJ_ACL",
")"
] | https://github.com/boto/boto/blob/b2a6f08122b2f1b89888d2848e730893595cd001/boto/gs/bucket.py#L574-L584 |
|
twilio/twilio-python | 6e1e811ea57a1edfadd5161ace87397c563f6915 | twilio/rest/api/v2010/account/usage/record/__init__.py | python | RecordInstance.uri | (self) | return self._properties['uri'] | :returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode | :returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode | [
":",
"returns",
":",
"The",
"URI",
"of",
"the",
"resource",
"relative",
"to",
"https",
":",
"//",
"api",
".",
"twilio",
".",
"com",
":",
"rtype",
":",
"unicode"
] | def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri'] | [
"def",
"uri",
"(",
"self",
")",
":",
"return",
"self",
".",
"_properties",
"[",
"'uri'",
"]"
] | https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/api/v2010/account/usage/record/__init__.py#L687-L692 |
|
Bitmessage/PyBitmessage | 97612b049e0453867d6d90aa628f8e7b007b4d85 | src/network/tcp.py | python | TCPConnection.antiIntersectionDelay | (self, initial=False) | This is a defense against the so called intersection attacks.
It is called when you notice peer is requesting non-existing
objects, or right after the connection is established. It will
estimate how long an object will take to propagate across the
network, and skip processing "getdata" requests until then. This
means an attacker only has one shot per IP to perform the attack. | This is a defense against the so called intersection attacks. | [
"This",
"is",
"a",
"defense",
"against",
"the",
"so",
"called",
"intersection",
"attacks",
"."
] | def antiIntersectionDelay(self, initial=False):
"""
This is a defense against the so called intersection attacks.
It is called when you notice peer is requesting non-existing
objects, or right after the connection is established. It will
estimate how long an object will take to propagate across the
network, and skip processing "getdata" requests until then. This
means an attacker only has one shot per IP to perform the attack.
"""
# estimated time for a small object to propagate across the
# whole network
max_known_nodes = max(
len(knownnodes.knownNodes[x]) for x in knownnodes.knownNodes)
delay = math.ceil(math.log(max_known_nodes + 2, 20)) * (
0.2 + invQueue.queueCount / 2.0)
# take the stream with maximum amount of nodes
# +2 is to avoid problems with log(0) and log(1)
# 20 is avg connected nodes count
# 0.2 is avg message transmission time
if delay > 0:
if initial:
self.skipUntil = self.connectedAt + delay
if self.skipUntil > time.time():
logger.debug(
'Initial skipping processing getdata for %.2fs',
self.skipUntil - time.time())
else:
logger.debug(
'Skipping processing getdata due to missing object'
' for %.2fs', delay)
self.skipUntil = time.time() + delay | [
"def",
"antiIntersectionDelay",
"(",
"self",
",",
"initial",
"=",
"False",
")",
":",
"# estimated time for a small object to propagate across the",
"# whole network",
"max_known_nodes",
"=",
"max",
"(",
"len",
"(",
"knownnodes",
".",
"knownNodes",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"knownnodes",
".",
"knownNodes",
")",
"delay",
"=",
"math",
".",
"ceil",
"(",
"math",
".",
"log",
"(",
"max_known_nodes",
"+",
"2",
",",
"20",
")",
")",
"*",
"(",
"0.2",
"+",
"invQueue",
".",
"queueCount",
"/",
"2.0",
")",
"# take the stream with maximum amount of nodes",
"# +2 is to avoid problems with log(0) and log(1)",
"# 20 is avg connected nodes count",
"# 0.2 is avg message transmission time",
"if",
"delay",
">",
"0",
":",
"if",
"initial",
":",
"self",
".",
"skipUntil",
"=",
"self",
".",
"connectedAt",
"+",
"delay",
"if",
"self",
".",
"skipUntil",
">",
"time",
".",
"time",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"'Initial skipping processing getdata for %.2fs'",
",",
"self",
".",
"skipUntil",
"-",
"time",
".",
"time",
"(",
")",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'Skipping processing getdata due to missing object'",
"' for %.2fs'",
",",
"delay",
")",
"self",
".",
"skipUntil",
"=",
"time",
".",
"time",
"(",
")",
"+",
"delay"
] | https://github.com/Bitmessage/PyBitmessage/blob/97612b049e0453867d6d90aa628f8e7b007b4d85/src/network/tcp.py#L96-L127 |
||
Axelrod-Python/Axelrod | 00e18323c1b1af74df873773e44f31e1b9a299c6 | axelrod/strategy_transformers.py | python | mixed_reclassifier | (original_classifier, probability, m_player) | return original_classifier | Function to reclassify the strategy | Function to reclassify the strategy | [
"Function",
"to",
"reclassify",
"the",
"strategy"
] | def mixed_reclassifier(original_classifier, probability, m_player):
"""Function to reclassify the strategy"""
# If a single probability, player is passed
if isinstance(probability, float) or isinstance(probability, int):
m_player = [m_player]
probability = [probability]
if min(probability) == max(probability) == 0: # No probability given
return original_classifier
if 1 in probability: # If all probability given to one player
player = m_player[probability.index(1)]
original_classifier["stochastic"] = player.classifier["stochastic"]
return original_classifier
# Otherwise: stochastic.
original_classifier["stochastic"] = True
return original_classifier | [
"def",
"mixed_reclassifier",
"(",
"original_classifier",
",",
"probability",
",",
"m_player",
")",
":",
"# If a single probability, player is passed",
"if",
"isinstance",
"(",
"probability",
",",
"float",
")",
"or",
"isinstance",
"(",
"probability",
",",
"int",
")",
":",
"m_player",
"=",
"[",
"m_player",
"]",
"probability",
"=",
"[",
"probability",
"]",
"if",
"min",
"(",
"probability",
")",
"==",
"max",
"(",
"probability",
")",
"==",
"0",
":",
"# No probability given",
"return",
"original_classifier",
"if",
"1",
"in",
"probability",
":",
"# If all probability given to one player",
"player",
"=",
"m_player",
"[",
"probability",
".",
"index",
"(",
"1",
")",
"]",
"original_classifier",
"[",
"\"stochastic\"",
"]",
"=",
"player",
".",
"classifier",
"[",
"\"stochastic\"",
"]",
"return",
"original_classifier",
"# Otherwise: stochastic.",
"original_classifier",
"[",
"\"stochastic\"",
"]",
"=",
"True",
"return",
"original_classifier"
] | https://github.com/Axelrod-Python/Axelrod/blob/00e18323c1b1af74df873773e44f31e1b9a299c6/axelrod/strategy_transformers.py#L621-L638 |
|
google/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | grr/server/grr_response_server/gui/api_labels_restricted_call_router.py | python | ApiLabelsRestrictedCallRouter.ListClientApprovals | (self, args, context=None) | return self.delegate.ListClientApprovals(args, context=context) | [] | def ListClientApprovals(self, args, context=None):
# Everybody can list their own user client approvals.
return self.delegate.ListClientApprovals(args, context=context) | [
"def",
"ListClientApprovals",
"(",
"self",
",",
"args",
",",
"context",
"=",
"None",
")",
":",
"# Everybody can list their own user client approvals.",
"return",
"self",
".",
"delegate",
".",
"ListClientApprovals",
"(",
"args",
",",
"context",
"=",
"context",
")"
] | https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/gui/api_labels_restricted_call_router.py#L265-L268 |
|||
makelove/OpenCV-Python-Tutorial | e428d648f7aa50d6a0fb4f4d0fb1bd1a600fef41 | cv-Tkinter-GUI/kivy-GUI/kivy_cv1.py | python | KivyCamera.__init__ | (self, capture, fps, **kwargs) | [] | def __init__(self, capture, fps, **kwargs):
super(KivyCamera, self).__init__(**kwargs)
self.capture = capture
Clock.schedule_interval(self.update, 1.0 / fps) | [
"def",
"__init__",
"(",
"self",
",",
"capture",
",",
"fps",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"KivyCamera",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"capture",
"=",
"capture",
"Clock",
".",
"schedule_interval",
"(",
"self",
".",
"update",
",",
"1.0",
"/",
"fps",
")"
] | https://github.com/makelove/OpenCV-Python-Tutorial/blob/e428d648f7aa50d6a0fb4f4d0fb1bd1a600fef41/cv-Tkinter-GUI/kivy-GUI/kivy_cv1.py#L27-L30 |
||||
gkrizek/bash-lambda-layer | 703b0ade8174022d44779d823172ab7ac33a5505 | bin/docutils/utils/math/math2html.py | python | Newline.process | (self) | Process contents | Process contents | [
"Process",
"contents"
] | def process(self):
"Process contents"
self.html = ['<br/>\n'] | [
"def",
"process",
"(",
"self",
")",
":",
"self",
".",
"html",
"=",
"[",
"'<br/>\\n'",
"]"
] | https://github.com/gkrizek/bash-lambda-layer/blob/703b0ade8174022d44779d823172ab7ac33a5505/bin/docutils/utils/math/math2html.py#L3718-L3720 |
||
gpodder/mygpo | 7a028ad621d05d4ca0d58fd22fb92656c8835e43 | mygpo/search/index.py | python | search_podcasts | (query) | return results | Search for podcasts according to 'query | Search for podcasts according to 'query | [
"Search",
"for",
"podcasts",
"according",
"to",
"query"
] | def search_podcasts(query):
"""Search for podcasts according to 'query'"""
if is_query_too_short(query):
logger.debug('Found no podcasts for "{query}". Query is too short', query=query)
return Podcast.objects.none()
logger.debug('Searching for "{query}" podcasts"', query=query)
query = SearchQuery(query)
results = (
Podcast.objects.annotate(rank=SearchRank(F("search_vector"), query))
.annotate(
order=ExpressionWrapper(
F("rank") * F("subscribers"), output_field=FloatField()
)
)
.filter(rank__gte=SEARCH_CUTOFF)
.order_by("-order")[:100]
.prefetch_related("slugs")
)
logger.debug(
'Found {count} podcasts for "{query}"', count=len(results), query=query
)
return results | [
"def",
"search_podcasts",
"(",
"query",
")",
":",
"if",
"is_query_too_short",
"(",
"query",
")",
":",
"logger",
".",
"debug",
"(",
"'Found no podcasts for \"{query}\". Query is too short'",
",",
"query",
"=",
"query",
")",
"return",
"Podcast",
".",
"objects",
".",
"none",
"(",
")",
"logger",
".",
"debug",
"(",
"'Searching for \"{query}\" podcasts\"'",
",",
"query",
"=",
"query",
")",
"query",
"=",
"SearchQuery",
"(",
"query",
")",
"results",
"=",
"(",
"Podcast",
".",
"objects",
".",
"annotate",
"(",
"rank",
"=",
"SearchRank",
"(",
"F",
"(",
"\"search_vector\"",
")",
",",
"query",
")",
")",
".",
"annotate",
"(",
"order",
"=",
"ExpressionWrapper",
"(",
"F",
"(",
"\"rank\"",
")",
"*",
"F",
"(",
"\"subscribers\"",
")",
",",
"output_field",
"=",
"FloatField",
"(",
")",
")",
")",
".",
"filter",
"(",
"rank__gte",
"=",
"SEARCH_CUTOFF",
")",
".",
"order_by",
"(",
"\"-order\"",
")",
"[",
":",
"100",
"]",
".",
"prefetch_related",
"(",
"\"slugs\"",
")",
")",
"logger",
".",
"debug",
"(",
"'Found {count} podcasts for \"{query}\"'",
",",
"count",
"=",
"len",
"(",
"results",
")",
",",
"query",
"=",
"query",
")",
"return",
"results"
] | https://github.com/gpodder/mygpo/blob/7a028ad621d05d4ca0d58fd22fb92656c8835e43/mygpo/search/index.py#L24-L50 |
|
n1nj4sec/pupy | a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39 | pupy/network/lib/rpc/core/protocol.py | python | Connection._handle_getattr | (self, oid, name) | return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr) | [] | def _handle_getattr(self, oid, name):
return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr) | [
"def",
"_handle_getattr",
"(",
"self",
",",
"oid",
",",
"name",
")",
":",
"return",
"self",
".",
"_access_attr",
"(",
"oid",
",",
"name",
",",
"(",
")",
",",
"\"_rpyc_getattr\"",
",",
"\"allow_getattr\"",
",",
"getattr",
")"
] | https://github.com/n1nj4sec/pupy/blob/a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39/pupy/network/lib/rpc/core/protocol.py#L662-L663 |
|||
zhanghe06/python | a678ce38a3770c91ad12e617810bf9f5ccf7898b | fuck/pconline.py | python | get_link | (url, token) | return down_link | 组装下载链接 | 组装下载链接 | [
"组装下载链接"
] | def get_link(url, token):
"""
组装下载链接
"""
file_name = url.split('/')[-1]
print file_name
print token
down_link = url.rstrip(file_name)+token+'/'+file_name
print down_link
return down_link | [
"def",
"get_link",
"(",
"url",
",",
"token",
")",
":",
"file_name",
"=",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"print",
"file_name",
"print",
"token",
"down_link",
"=",
"url",
".",
"rstrip",
"(",
"file_name",
")",
"+",
"token",
"+",
"'/'",
"+",
"file_name",
"print",
"down_link",
"return",
"down_link"
] | https://github.com/zhanghe06/python/blob/a678ce38a3770c91ad12e617810bf9f5ccf7898b/fuck/pconline.py#L43-L52 |
|
sagemath/sage | f9b2db94f675ff16963ccdefba4f1a3393b3fe0d | src/sage/geometry/polyhedron/plot.py | python | Projection.tikz | (self, view=[0, 0, 1], angle=0, scale=1,
edge_color='blue!95!black', facet_color='blue!95!black',
opacity=0.8, vertex_color='green', axis=False) | r"""
Return a string ``tikz_pic`` consisting of a tikz picture of ``self``
according to a projection ``view`` and an angle ``angle``
obtained via Jmol through the current state property.
INPUT:
- ``view`` - list (default: [0,0,1]) representing the rotation axis (see note below).
- ``angle`` - integer (default: 0) angle of rotation in degree from 0 to 360 (see note
below).
- ``scale`` - integer (default: 1) specifying the scaling of the tikz picture.
- ``edge_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``facet_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``vertex_color`` - string (default: 'green') representing colors which tikz
recognize.
- ``opacity`` - real number (default: 0.8) between 0 and 1 giving the opacity of
the front facets.
- ``axis`` - Boolean (default: False) draw the axes at the origin or not.
OUTPUT:
- LatexExpr -- containing the TikZ picture.
.. NOTE::
The inputs ``view`` and ``angle`` can be obtained by visualizing it
using ``.show(aspect_ratio=1)``. This will open an interactive view
in your default browser, where you can rotate the polytope. Once
the desired view angle is found, click on the information icon in
the lower right-hand corner and select *Get Viewpoint*. This will
copy a string of the form '[x,y,z],angle' to your local clipboard.
Go back to Sage and type ``Img = P.projection().tikz([x,y,z],angle)``.
The inputs ``view`` and ``angle`` can also be obtained from the
viewer Jmol::
1) Right click on the image
2) Select ``Console``
3) Select the tab ``State``
4) Scroll to the line ``moveto``
It reads something like::
moveto 0.0 {x y z angle} Scale
The ``view`` is then [x,y,z] and ``angle`` is angle.
The following number is the scale.
Jmol performs a rotation of ``angle`` degrees along the
vector [x,y,z] and show the result from the z-axis.
EXAMPLES::
sage: P1 = polytopes.small_rhombicuboctahedron()
sage: Image1 = P1.projection().tikz([1,3,5], 175, scale=4)
sage: type(Image1)
<class 'sage.misc.latex.LatexExpr'>
sage: print('\n'.join(Image1.splitlines()[:4]))
\begin{tikzpicture}%
[x={(-0.939161cm, 0.244762cm)},
y={(0.097442cm, -0.482887cm)},
z={(0.329367cm, 0.840780cm)},
sage: with open('polytope-tikz1.tex', 'w') as f: # not tested
....: _ = f.write(Image1)
sage: P2 = Polyhedron(vertices=[[1, 1],[1, 2],[2, 1]])
sage: Image2 = P2.projection().tikz(scale=3, edge_color='blue!95!black', facet_color='orange!95!black', opacity=0.4, vertex_color='yellow', axis=True)
sage: type(Image2)
<class 'sage.misc.latex.LatexExpr'>
sage: print('\n'.join(Image2.splitlines()[:4]))
\begin{tikzpicture}%
[scale=3.000000,
back/.style={loosely dotted, thin},
edge/.style={color=blue!95!black, thick},
sage: with open('polytope-tikz2.tex', 'w') as f: # not tested
....: _ = f.write(Image2)
sage: P3 = Polyhedron(vertices=[[-1, -1, 2],[-1, 2, -1],[2, -1, -1]])
sage: P3
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: Image3 = P3.projection().tikz([0.5,-1,-0.1], 55, scale=3, edge_color='blue!95!black',facet_color='orange!95!black', opacity=0.7, vertex_color='yellow', axis=True)
sage: print('\n'.join(Image3.splitlines()[:4]))
\begin{tikzpicture}%
[x={(0.658184cm, -0.242192cm)},
y={(-0.096240cm, 0.912008cm)},
z={(-0.746680cm, -0.331036cm)},
sage: with open('polytope-tikz3.tex', 'w') as f: # not tested
....: _ = f.write(Image3)
sage: P = Polyhedron(vertices=[[1,1,0,0],[1,2,0,0],[2,1,0,0],[0,0,1,0],[0,0,0,1]])
sage: P
A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 5 vertices
sage: P.projection().tikz()
Traceback (most recent call last):
...
NotImplementedError: The polytope has to live in 2 or 3 dimensions.
.. TODO::
Make it possible to draw Schlegel diagram for 4-polytopes. ::
sage: P=Polyhedron(vertices=[[1,1,0,0],[1,2,0,0],[2,1,0,0],[0,0,1,0],[0,0,0,1]])
sage: P
A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 5 vertices
sage: P.projection().tikz()
Traceback (most recent call last):
...
NotImplementedError: The polytope has to live in 2 or 3 dimensions.
Make it possible to draw 3-polytopes living in higher dimension. | r"""
Return a string ``tikz_pic`` consisting of a tikz picture of ``self``
according to a projection ``view`` and an angle ``angle``
obtained via Jmol through the current state property. | [
"r",
"Return",
"a",
"string",
"tikz_pic",
"consisting",
"of",
"a",
"tikz",
"picture",
"of",
"self",
"according",
"to",
"a",
"projection",
"view",
"and",
"an",
"angle",
"angle",
"obtained",
"via",
"Jmol",
"through",
"the",
"current",
"state",
"property",
"."
] | def tikz(self, view=[0, 0, 1], angle=0, scale=1,
edge_color='blue!95!black', facet_color='blue!95!black',
opacity=0.8, vertex_color='green', axis=False):
r"""
Return a string ``tikz_pic`` consisting of a tikz picture of ``self``
according to a projection ``view`` and an angle ``angle``
obtained via Jmol through the current state property.
INPUT:
- ``view`` - list (default: [0,0,1]) representing the rotation axis (see note below).
- ``angle`` - integer (default: 0) angle of rotation in degree from 0 to 360 (see note
below).
- ``scale`` - integer (default: 1) specifying the scaling of the tikz picture.
- ``edge_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``facet_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``vertex_color`` - string (default: 'green') representing colors which tikz
recognize.
- ``opacity`` - real number (default: 0.8) between 0 and 1 giving the opacity of
the front facets.
- ``axis`` - Boolean (default: False) draw the axes at the origin or not.
OUTPUT:
- LatexExpr -- containing the TikZ picture.
.. NOTE::
The inputs ``view`` and ``angle`` can be obtained by visualizing it
using ``.show(aspect_ratio=1)``. This will open an interactive view
in your default browser, where you can rotate the polytope. Once
the desired view angle is found, click on the information icon in
the lower right-hand corner and select *Get Viewpoint*. This will
copy a string of the form '[x,y,z],angle' to your local clipboard.
Go back to Sage and type ``Img = P.projection().tikz([x,y,z],angle)``.
The inputs ``view`` and ``angle`` can also be obtained from the
viewer Jmol::
1) Right click on the image
2) Select ``Console``
3) Select the tab ``State``
4) Scroll to the line ``moveto``
It reads something like::
moveto 0.0 {x y z angle} Scale
The ``view`` is then [x,y,z] and ``angle`` is angle.
The following number is the scale.
Jmol performs a rotation of ``angle`` degrees along the
vector [x,y,z] and show the result from the z-axis.
EXAMPLES::
sage: P1 = polytopes.small_rhombicuboctahedron()
sage: Image1 = P1.projection().tikz([1,3,5], 175, scale=4)
sage: type(Image1)
<class 'sage.misc.latex.LatexExpr'>
sage: print('\n'.join(Image1.splitlines()[:4]))
\begin{tikzpicture}%
[x={(-0.939161cm, 0.244762cm)},
y={(0.097442cm, -0.482887cm)},
z={(0.329367cm, 0.840780cm)},
sage: with open('polytope-tikz1.tex', 'w') as f: # not tested
....: _ = f.write(Image1)
sage: P2 = Polyhedron(vertices=[[1, 1],[1, 2],[2, 1]])
sage: Image2 = P2.projection().tikz(scale=3, edge_color='blue!95!black', facet_color='orange!95!black', opacity=0.4, vertex_color='yellow', axis=True)
sage: type(Image2)
<class 'sage.misc.latex.LatexExpr'>
sage: print('\n'.join(Image2.splitlines()[:4]))
\begin{tikzpicture}%
[scale=3.000000,
back/.style={loosely dotted, thin},
edge/.style={color=blue!95!black, thick},
sage: with open('polytope-tikz2.tex', 'w') as f: # not tested
....: _ = f.write(Image2)
sage: P3 = Polyhedron(vertices=[[-1, -1, 2],[-1, 2, -1],[2, -1, -1]])
sage: P3
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: Image3 = P3.projection().tikz([0.5,-1,-0.1], 55, scale=3, edge_color='blue!95!black',facet_color='orange!95!black', opacity=0.7, vertex_color='yellow', axis=True)
sage: print('\n'.join(Image3.splitlines()[:4]))
\begin{tikzpicture}%
[x={(0.658184cm, -0.242192cm)},
y={(-0.096240cm, 0.912008cm)},
z={(-0.746680cm, -0.331036cm)},
sage: with open('polytope-tikz3.tex', 'w') as f: # not tested
....: _ = f.write(Image3)
sage: P = Polyhedron(vertices=[[1,1,0,0],[1,2,0,0],[2,1,0,0],[0,0,1,0],[0,0,0,1]])
sage: P
A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 5 vertices
sage: P.projection().tikz()
Traceback (most recent call last):
...
NotImplementedError: The polytope has to live in 2 or 3 dimensions.
.. TODO::
Make it possible to draw Schlegel diagram for 4-polytopes. ::
sage: P=Polyhedron(vertices=[[1,1,0,0],[1,2,0,0],[2,1,0,0],[0,0,1,0],[0,0,0,1]])
sage: P
A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 5 vertices
sage: P.projection().tikz()
Traceback (most recent call last):
...
NotImplementedError: The polytope has to live in 2 or 3 dimensions.
Make it possible to draw 3-polytopes living in higher dimension.
"""
if self.polyhedron_ambient_dim > 3 or self.polyhedron_ambient_dim < 2:
raise NotImplementedError("The polytope has to live in 2 or 3 dimensions.")
elif self.polyhedron_dim < 2 or self.polyhedron_dim > 3:
raise NotImplementedError("The polytope has to be 2 or 3-dimensional.")
elif self.polyhedron_ambient_dim == 2: # self is a polygon in 2-space
return self._tikz_2d(scale, edge_color, facet_color, opacity,
vertex_color, axis)
elif self.polyhedron_dim == 2: # self is a polygon in 3-space
return self._tikz_2d_in_3d(view, angle, scale, edge_color,
facet_color, opacity, vertex_color, axis)
else: # self is a 3-polytope in 3-space
return self._tikz_3d_in_3d(view, angle, scale, edge_color,
facet_color, opacity, vertex_color, axis) | [
"def",
"tikz",
"(",
"self",
",",
"view",
"=",
"[",
"0",
",",
"0",
",",
"1",
"]",
",",
"angle",
"=",
"0",
",",
"scale",
"=",
"1",
",",
"edge_color",
"=",
"'blue!95!black'",
",",
"facet_color",
"=",
"'blue!95!black'",
",",
"opacity",
"=",
"0.8",
",",
"vertex_color",
"=",
"'green'",
",",
"axis",
"=",
"False",
")",
":",
"if",
"self",
".",
"polyhedron_ambient_dim",
">",
"3",
"or",
"self",
".",
"polyhedron_ambient_dim",
"<",
"2",
":",
"raise",
"NotImplementedError",
"(",
"\"The polytope has to live in 2 or 3 dimensions.\"",
")",
"elif",
"self",
".",
"polyhedron_dim",
"<",
"2",
"or",
"self",
".",
"polyhedron_dim",
">",
"3",
":",
"raise",
"NotImplementedError",
"(",
"\"The polytope has to be 2 or 3-dimensional.\"",
")",
"elif",
"self",
".",
"polyhedron_ambient_dim",
"==",
"2",
":",
"# self is a polygon in 2-space",
"return",
"self",
".",
"_tikz_2d",
"(",
"scale",
",",
"edge_color",
",",
"facet_color",
",",
"opacity",
",",
"vertex_color",
",",
"axis",
")",
"elif",
"self",
".",
"polyhedron_dim",
"==",
"2",
":",
"# self is a polygon in 3-space",
"return",
"self",
".",
"_tikz_2d_in_3d",
"(",
"view",
",",
"angle",
",",
"scale",
",",
"edge_color",
",",
"facet_color",
",",
"opacity",
",",
"vertex_color",
",",
"axis",
")",
"else",
":",
"# self is a 3-polytope in 3-space",
"return",
"self",
".",
"_tikz_3d_in_3d",
"(",
"view",
",",
"angle",
",",
"scale",
",",
"edge_color",
",",
"facet_color",
",",
"opacity",
",",
"vertex_color",
",",
"axis",
")"
] | https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/geometry/polyhedron/plot.py#L1181-L1309 |
||
SymbiFlow/prjxray | 5349556bc2c230801d6df0cf11bccb9cfd171639 | prjxray/tile_segbits.py | python | parsebit | (val) | return Bit(
word_column=int(seg_word_column),
word_bit=int(word_bit_n),
isset=isset,
) | Return "!012_23" => (12, 23, False) | Return "!012_23" => (12, 23, False) | [
"Return",
"!012_23",
"=",
">",
"(",
"12",
"23",
"False",
")"
] | def parsebit(val):
'''Return "!012_23" => (12, 23, False)'''
isset = True
# Default is 0. Skip explicit call outs
if val[0] == '!':
isset = False
val = val[1:]
# 28_05 => 28, 05
parts = val.split('_')
assert len(parts) == 2, val
seg_word_column, word_bit_n = parts
return Bit(
word_column=int(seg_word_column),
word_bit=int(word_bit_n),
isset=isset,
) | [
"def",
"parsebit",
"(",
"val",
")",
":",
"isset",
"=",
"True",
"# Default is 0. Skip explicit call outs",
"if",
"val",
"[",
"0",
"]",
"==",
"'!'",
":",
"isset",
"=",
"False",
"val",
"=",
"val",
"[",
"1",
":",
"]",
"# 28_05 => 28, 05",
"parts",
"=",
"val",
".",
"split",
"(",
"'_'",
")",
"assert",
"len",
"(",
"parts",
")",
"==",
"2",
",",
"val",
"seg_word_column",
",",
"word_bit_n",
"=",
"parts",
"return",
"Bit",
"(",
"word_column",
"=",
"int",
"(",
"seg_word_column",
")",
",",
"word_bit",
"=",
"int",
"(",
"word_bit_n",
")",
",",
"isset",
"=",
"isset",
",",
")"
] | https://github.com/SymbiFlow/prjxray/blob/5349556bc2c230801d6df0cf11bccb9cfd171639/prjxray/tile_segbits.py#L41-L57 |
|
researchmm/tasn | 5dba8ccc096cedc63913730eeea14a9647911129 | tasn-mxnet/python/mxnet/symbol/symbol.py | python | Symbol.broadcast_like | (self, *args, **kwargs) | return op.broadcast_like(self, *args, **kwargs) | Convenience fluent method for :py:func:`broadcast_like`.
The arguments are the same as for :py:func:`broadcast_like`, with
this array as data. | Convenience fluent method for :py:func:`broadcast_like`. | [
"Convenience",
"fluent",
"method",
"for",
":",
"py",
":",
"func",
":",
"broadcast_like",
"."
] | def broadcast_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_like`.
The arguments are the same as for :py:func:`broadcast_like`, with
this array as data.
"""
return op.broadcast_like(self, *args, **kwargs) | [
"def",
"broadcast_like",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"op",
".",
"broadcast_like",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/researchmm/tasn/blob/5dba8ccc096cedc63913730eeea14a9647911129/tasn-mxnet/python/mxnet/symbol/symbol.py#L2018-L2024 |
|
miyakogi/pyppeteer | f5313d0e7f973c57ed31fa443cea1834e223a96c | pyppeteer/dialog.py | python | Dialog.accept | (self, promptText: str = '') | Accept the dialog.
* ``promptText`` (str): A text to enter in prompt. If the dialog's type
is not prompt, this does not cause any effect. | Accept the dialog. | [
"Accept",
"the",
"dialog",
"."
] | async def accept(self, promptText: str = '') -> None:
"""Accept the dialog.
* ``promptText`` (str): A text to enter in prompt. If the dialog's type
is not prompt, this does not cause any effect.
"""
self._handled = True
await self._client.send('Page.handleJavaScriptDialog', {
'accept': True,
'promptText': promptText,
}) | [
"async",
"def",
"accept",
"(",
"self",
",",
"promptText",
":",
"str",
"=",
"''",
")",
"->",
"None",
":",
"self",
".",
"_handled",
"=",
"True",
"await",
"self",
".",
"_client",
".",
"send",
"(",
"'Page.handleJavaScriptDialog'",
",",
"{",
"'accept'",
":",
"True",
",",
"'promptText'",
":",
"promptText",
",",
"}",
")"
] | https://github.com/miyakogi/pyppeteer/blob/f5313d0e7f973c57ed31fa443cea1834e223a96c/pyppeteer/dialog.py#L71-L81 |
||
AI-ON/Multitask-and-Transfer-Learning | 31e0798d436e314ddbc64c4a6b935df1b2160e50 | architectures/chainer/models/predictive_autoencoder.py | python | normalize_2d | (x) | return exp / denominator | [] | def normalize_2d(x):
exp = F.exp(x[0])
sums = F.sum(F.sum(exp, axis=-1), axis=-1)
expanded = F.expand_dims(F.expand_dims(sums, axis=-1), axis=-1)
denominator = F.tile(expanded, (1, 160, 210))
return exp / denominator | [
"def",
"normalize_2d",
"(",
"x",
")",
":",
"exp",
"=",
"F",
".",
"exp",
"(",
"x",
"[",
"0",
"]",
")",
"sums",
"=",
"F",
".",
"sum",
"(",
"F",
".",
"sum",
"(",
"exp",
",",
"axis",
"=",
"-",
"1",
")",
",",
"axis",
"=",
"-",
"1",
")",
"expanded",
"=",
"F",
".",
"expand_dims",
"(",
"F",
".",
"expand_dims",
"(",
"sums",
",",
"axis",
"=",
"-",
"1",
")",
",",
"axis",
"=",
"-",
"1",
")",
"denominator",
"=",
"F",
".",
"tile",
"(",
"expanded",
",",
"(",
"1",
",",
"160",
",",
"210",
")",
")",
"return",
"exp",
"/",
"denominator"
] | https://github.com/AI-ON/Multitask-and-Transfer-Learning/blob/31e0798d436e314ddbc64c4a6b935df1b2160e50/architectures/chainer/models/predictive_autoencoder.py#L247-L252 |
|||
Tencent/bk-bcs-saas | 2b437bf2f5fd5ce2078f7787c3a12df609f7679d | bcs-app/backend/container_service/clusters/views/node.py | python | NodeLabelQueryCreateViewSet.create_node_labels | (self, request, project_id) | return Response({"code": 0, "message": _("创建成功!")}) | 添加节点标签 | 添加节点标签 | [
"添加节点标签"
] | def create_node_labels(self, request, project_id):
"""添加节点标签"""
# 解析参数
node_id_list, node_label_info = self.get_create_label_params(request)
# 校验label中key和value
self.label_regex(node_label_info)
# 获取数据库中节点的label
# NOTE: 节点为正常状态时,才允许设置标签
project_node_info = self.get_node_list(request, project_id, None).get('results') or []
if not project_node_info:
raise error_codes.APIError(_("当前项目下节点为空,请确认"))
all_node_id_list = []
all_node_id_ip_map = {}
for info in project_node_info:
all_node_id_list.append(info["id"])
all_node_id_ip_map[info["id"]] = {"inner_ip": info["inner_ip"], "cluster_id": info["cluster_id"]}
if info['id'] in node_id_list and info['status'] != CommonStatus.Normal:
raise error_codes.CheckFailed(_("节点不是正常状态时,不允许设置标签"))
diff_node_id_list = set(node_id_list) - set(all_node_id_list)
if diff_node_id_list:
raise error_codes.CheckFailed(_("节点ID [{}] 不属于当前项目,请确认").format(",".join(diff_node_id_list)))
# 校验权限
self.check_perm(request, project_id, all_node_id_ip_map, node_id_list)
# 匹配数据
pre_node_labels = self.get_labels_by_node(request, project_id, node_id_list)
label_operation_map = self.get_label_operation(
pre_node_labels, node_label_info, node_id_list, all_node_id_ip_map
)
# k8s 是以节点为维度
self.create_node_label_via_k8s(request, project_id, label_operation_map)
# 写入数据库
self.create_or_update(request, project_id, label_operation_map)
client.ContextActivityLogClient(
project_id=project_id,
user=request.user.username,
resource_type="node",
resource=str(node_id_list),
resource_id=str(node_id_list),
extra=json.dumps(node_label_info),
description=_("节点打标签"),
).log_add(activity_status="succeed")
return Response({"code": 0, "message": _("创建成功!")}) | [
"def",
"create_node_labels",
"(",
"self",
",",
"request",
",",
"project_id",
")",
":",
"# 解析参数",
"node_id_list",
",",
"node_label_info",
"=",
"self",
".",
"get_create_label_params",
"(",
"request",
")",
"# 校验label中key和value",
"self",
".",
"label_regex",
"(",
"node_label_info",
")",
"# 获取数据库中节点的label",
"# NOTE: 节点为正常状态时,才允许设置标签",
"project_node_info",
"=",
"self",
".",
"get_node_list",
"(",
"request",
",",
"project_id",
",",
"None",
")",
".",
"get",
"(",
"'results'",
")",
"or",
"[",
"]",
"if",
"not",
"project_node_info",
":",
"raise",
"error_codes",
".",
"APIError",
"(",
"_",
"(",
"\"当前项目下节点为空,请确认\"))",
"",
"",
"all_node_id_list",
"=",
"[",
"]",
"all_node_id_ip_map",
"=",
"{",
"}",
"for",
"info",
"in",
"project_node_info",
":",
"all_node_id_list",
".",
"append",
"(",
"info",
"[",
"\"id\"",
"]",
")",
"all_node_id_ip_map",
"[",
"info",
"[",
"\"id\"",
"]",
"]",
"=",
"{",
"\"inner_ip\"",
":",
"info",
"[",
"\"inner_ip\"",
"]",
",",
"\"cluster_id\"",
":",
"info",
"[",
"\"cluster_id\"",
"]",
"}",
"if",
"info",
"[",
"'id'",
"]",
"in",
"node_id_list",
"and",
"info",
"[",
"'status'",
"]",
"!=",
"CommonStatus",
".",
"Normal",
":",
"raise",
"error_codes",
".",
"CheckFailed",
"(",
"_",
"(",
"\"节点不是正常状态时,不允许设置标签\"))",
"",
"",
"diff_node_id_list",
"=",
"set",
"(",
"node_id_list",
")",
"-",
"set",
"(",
"all_node_id_list",
")",
"if",
"diff_node_id_list",
":",
"raise",
"error_codes",
".",
"CheckFailed",
"(",
"_",
"(",
"\"节点ID [{}] 不属于当前项目,请确认\").format(\",\".join(diff_nod",
"e",
"_",
"id_lis",
"t",
")))",
"",
"",
"",
"",
"",
"",
"",
"# 校验权限",
"self",
".",
"check_perm",
"(",
"request",
",",
"project_id",
",",
"all_node_id_ip_map",
",",
"node_id_list",
")",
"# 匹配数据",
"pre_node_labels",
"=",
"self",
".",
"get_labels_by_node",
"(",
"request",
",",
"project_id",
",",
"node_id_list",
")",
"label_operation_map",
"=",
"self",
".",
"get_label_operation",
"(",
"pre_node_labels",
",",
"node_label_info",
",",
"node_id_list",
",",
"all_node_id_ip_map",
")",
"# k8s 是以节点为维度",
"self",
".",
"create_node_label_via_k8s",
"(",
"request",
",",
"project_id",
",",
"label_operation_map",
")",
"# 写入数据库",
"self",
".",
"create_or_update",
"(",
"request",
",",
"project_id",
",",
"label_operation_map",
")",
"client",
".",
"ContextActivityLogClient",
"(",
"project_id",
"=",
"project_id",
",",
"user",
"=",
"request",
".",
"user",
".",
"username",
",",
"resource_type",
"=",
"\"node\"",
",",
"resource",
"=",
"str",
"(",
"node_id_list",
")",
",",
"resource_id",
"=",
"str",
"(",
"node_id_list",
")",
",",
"extra",
"=",
"json",
".",
"dumps",
"(",
"node_label_info",
")",
",",
"description",
"=",
"_",
"(",
"\"节点打标签\"),",
"",
"",
")",
".",
"log_add",
"(",
"activity_status",
"=",
"\"succeed\"",
")",
"return",
"Response",
"(",
"{",
"\"code\"",
":",
"0",
",",
"\"message\"",
":",
"_",
"(",
"\"创建成功!\")})",
"",
"",
""
] | https://github.com/Tencent/bk-bcs-saas/blob/2b437bf2f5fd5ce2078f7787c3a12df609f7679d/bcs-app/backend/container_service/clusters/views/node.py#L725-L767 |
|
guildai/guildai | 1665985a3d4d788efc1a3180ca51cc417f71ca78 | guild/external/setuptools/command/sdist.py | python | sdist.make_distribution | (self) | Workaround for #516 | Workaround for #516 | [
"Workaround",
"for",
"#516"
] | def make_distribution(self):
"""
Workaround for #516
"""
with self._remove_os_link():
orig.sdist.make_distribution(self) | [
"def",
"make_distribution",
"(",
"self",
")",
":",
"with",
"self",
".",
"_remove_os_link",
"(",
")",
":",
"orig",
".",
"sdist",
".",
"make_distribution",
"(",
"self",
")"
] | https://github.com/guildai/guildai/blob/1665985a3d4d788efc1a3180ca51cc417f71ca78/guild/external/setuptools/command/sdist.py#L73-L78 |
||
jython/frozen-mirror | b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99 | lib-python/2.7/hotshot/__init__.py | python | Profile.stop | (self) | Stop the profiler. | Stop the profiler. | [
"Stop",
"the",
"profiler",
"."
] | def stop(self):
"""Stop the profiler."""
self._prof.stop() | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"_prof",
".",
"stop",
"(",
")"
] | https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/hotshot/__init__.py#L38-L40 |
||
fossasia/x-mario-center | fe67afe28d995dcf4e2498e305825a4859566172 | build/lib.linux-i686-2.7/softwarecenter/ui/gtk3/app.py | python | SoftwareCenterAppGtk3.show_available_packages | (self, packages) | Show packages given as arguments in the available_pane
If the list of packages is only one element long show that,
otherwise turn it into a comma seperated search | Show packages given as arguments in the available_pane
If the list of packages is only one element long show that,
otherwise turn it into a comma seperated search | [
"Show",
"packages",
"given",
"as",
"arguments",
"in",
"the",
"available_pane",
"If",
"the",
"list",
"of",
"packages",
"is",
"only",
"one",
"element",
"long",
"show",
"that",
"otherwise",
"turn",
"it",
"into",
"a",
"comma",
"seperated",
"search"
] | def show_available_packages(self, packages):
""" Show packages given as arguments in the available_pane
If the list of packages is only one element long show that,
otherwise turn it into a comma seperated search
"""
try:
search_text, app = parse_packages_args(packages)
except DebFileOpenError as e:
LOG.exception("show_available_packages: can not open %r, error:",
packages)
dialogs.error(None,
_("Error"),
_("The file “%s” could not be opened.") % e.path)
search_text = app = None
LOG.info('show_available_packages: search_text is %r, app is %r.',
search_text, app)
if search_text:
self.available_pane.init_view()
self.available_pane.searchentry.set_text(search_text)
elif app is not None:
self.show_app(app)
else:
# normal startup, show the lobby (it will have a spinner when
# its not ready yet) - it will also initialize the view
self.view_manager.set_active_view(ViewPages.AVAILABLE) | [
"def",
"show_available_packages",
"(",
"self",
",",
"packages",
")",
":",
"try",
":",
"search_text",
",",
"app",
"=",
"parse_packages_args",
"(",
"packages",
")",
"except",
"DebFileOpenError",
"as",
"e",
":",
"LOG",
".",
"exception",
"(",
"\"show_available_packages: can not open %r, error:\"",
",",
"packages",
")",
"dialogs",
".",
"error",
"(",
"None",
",",
"_",
"(",
"\"Error\"",
")",
",",
"_",
"(",
"\"The file “%s” could not be opened.\") % ",
"e",
"p",
"t",
"h",
")",
"",
"search_text",
"=",
"app",
"=",
"None",
"LOG",
".",
"info",
"(",
"'show_available_packages: search_text is %r, app is %r.'",
",",
"search_text",
",",
"app",
")",
"if",
"search_text",
":",
"self",
".",
"available_pane",
".",
"init_view",
"(",
")",
"self",
".",
"available_pane",
".",
"searchentry",
".",
"set_text",
"(",
"search_text",
")",
"elif",
"app",
"is",
"not",
"None",
":",
"self",
".",
"show_app",
"(",
"app",
")",
"else",
":",
"# normal startup, show the lobby (it will have a spinner when",
"# its not ready yet) - it will also initialize the view",
"self",
".",
"view_manager",
".",
"set_active_view",
"(",
"ViewPages",
".",
"AVAILABLE",
")"
] | https://github.com/fossasia/x-mario-center/blob/fe67afe28d995dcf4e2498e305825a4859566172/build/lib.linux-i686-2.7/softwarecenter/ui/gtk3/app.py#L1324-L1350 |
||
NervanaSystems/ngraph-python | ac032c83c7152b615a9ad129d54d350f9d6a2986 | ngraph/transformers/exop.py | python | TensorViewDecl.key | (self) | return self.tensor_description.parameter_key | Returns: A tuple unique to this view of the tensor. | Returns: A tuple unique to this view of the tensor. | [
"Returns",
":",
"A",
"tuple",
"unique",
"to",
"this",
"view",
"of",
"the",
"tensor",
"."
] | def key(self):
"""
Returns: A tuple unique to this view of the tensor.
"""
return self.tensor_description.parameter_key | [
"def",
"key",
"(",
"self",
")",
":",
"return",
"self",
".",
"tensor_description",
".",
"parameter_key"
] | https://github.com/NervanaSystems/ngraph-python/blob/ac032c83c7152b615a9ad129d54d350f9d6a2986/ngraph/transformers/exop.py#L1175-L1180 |
|
Tautulli/Tautulli | 2410eb33805aaac4bd1c5dad0f71e4f15afaf742 | lib/html5lib/treebuilders/base.py | python | TreeBuilder.elementInActiveFormattingElements | (self, name) | return False | Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false | Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false | [
"Check",
"if",
"an",
"element",
"exists",
"between",
"the",
"end",
"of",
"the",
"active",
"formatting",
"elements",
"and",
"the",
"last",
"marker",
".",
"If",
"it",
"does",
"return",
"it",
"else",
"return",
"false"
] | def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False | [
"def",
"elementInActiveFormattingElements",
"(",
"self",
",",
"name",
")",
":",
"for",
"item",
"in",
"self",
".",
"activeFormattingElements",
"[",
":",
":",
"-",
"1",
"]",
":",
"# Check for Marker first because if it's a Marker it doesn't have a",
"# name attribute.",
"if",
"item",
"==",
"Marker",
":",
"break",
"elif",
"item",
".",
"name",
"==",
"name",
":",
"return",
"item",
"return",
"False"
] | https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/html5lib/treebuilders/base.py#L269-L281 |
|
ducksboard/libsaas | 615981a3336f65be9d51ae95a48aed9ad3bd1c3c | libsaas/services/bitbucket/issues.py | python | RepoIssues.filter | (self, filters) | return http.Request('GET', url), parsers.parse_json | Search through the issues applying filters.
Look at https://confluence.atlassian.com/display/BITBUCKET/Issues
to get a complete list of possible filters.
:var filters: A dictionary of filters. Keys are strings corresponding
to the filter names and values are ether string filter values or
tuples, in which case their conditions are implicitly ORed. For
example, {"title": ("~one", "~two")} would mean issues with the
title containing either "one" or "two"
:vartype filters: dict of str to str or tuple of str | Search through the issues applying filters. | [
"Search",
"through",
"the",
"issues",
"applying",
"filters",
"."
] | def filter(self, filters):
"""
Search through the issues applying filters.
Look at https://confluence.atlassian.com/display/BITBUCKET/Issues
to get a complete list of possible filters.
:var filters: A dictionary of filters. Keys are strings corresponding
to the filter names and values are ether string filter values or
tuples, in which case their conditions are implicitly ORed. For
example, {"title": ("~one", "~two")} would mean issues with the
title containing either "one" or "two"
:vartype filters: dict of str to str or tuple of str
"""
# because http.Request needs params to be a dict of strings to strings
# (roughly) and since BitBucket wants repeated parameters to express
# OR, we'll do the quoting by hand ourselves
def flatten_conditions(filters):
for key, val in filters.items():
if isinstance(val, (list, tuple)):
for v in val:
yield (port.to_b(key), port.to_b(v))
else:
yield (port.to_b(key), port.to_b(val))
to_encode = tuple(flatten_conditions(filters))
qs = port.urlencode(to_encode)
url = '{0}/?{1}'.format(self.get_url(), qs)
return http.Request('GET', url), parsers.parse_json | [
"def",
"filter",
"(",
"self",
",",
"filters",
")",
":",
"# because http.Request needs params to be a dict of strings to strings",
"# (roughly) and since BitBucket wants repeated parameters to express",
"# OR, we'll do the quoting by hand ourselves",
"def",
"flatten_conditions",
"(",
"filters",
")",
":",
"for",
"key",
",",
"val",
"in",
"filters",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"v",
"in",
"val",
":",
"yield",
"(",
"port",
".",
"to_b",
"(",
"key",
")",
",",
"port",
".",
"to_b",
"(",
"v",
")",
")",
"else",
":",
"yield",
"(",
"port",
".",
"to_b",
"(",
"key",
")",
",",
"port",
".",
"to_b",
"(",
"val",
")",
")",
"to_encode",
"=",
"tuple",
"(",
"flatten_conditions",
"(",
"filters",
")",
")",
"qs",
"=",
"port",
".",
"urlencode",
"(",
"to_encode",
")",
"url",
"=",
"'{0}/?{1}'",
".",
"format",
"(",
"self",
".",
"get_url",
"(",
")",
",",
"qs",
")",
"return",
"http",
".",
"Request",
"(",
"'GET'",
",",
"url",
")",
",",
"parsers",
".",
"parse_json"
] | https://github.com/ducksboard/libsaas/blob/615981a3336f65be9d51ae95a48aed9ad3bd1c3c/libsaas/services/bitbucket/issues.py#L128-L157 |
|
openstack/cinder | 23494a6d6c51451688191e1847a458f1d3cdcaa5 | cinder/zonemanager/utils.py | python | get_formatted_wwn | (wwn_str) | Utility API that formats WWN to insert ':'. | Utility API that formats WWN to insert ':'. | [
"Utility",
"API",
"that",
"formats",
"WWN",
"to",
"insert",
":",
"."
] | def get_formatted_wwn(wwn_str):
"""Utility API that formats WWN to insert ':'."""
if (len(wwn_str) != 16):
return wwn_str.lower()
else:
return (':'.join([wwn_str[i:i + 2]
for i in range(0, len(wwn_str), 2)])).lower() | [
"def",
"get_formatted_wwn",
"(",
"wwn_str",
")",
":",
"if",
"(",
"len",
"(",
"wwn_str",
")",
"!=",
"16",
")",
":",
"return",
"wwn_str",
".",
"lower",
"(",
")",
"else",
":",
"return",
"(",
"':'",
".",
"join",
"(",
"[",
"wwn_str",
"[",
"i",
":",
"i",
"+",
"2",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"wwn_str",
")",
",",
"2",
")",
"]",
")",
")",
".",
"lower",
"(",
")"
] | https://github.com/openstack/cinder/blob/23494a6d6c51451688191e1847a458f1d3cdcaa5/cinder/zonemanager/utils.py#L67-L73 |
||
ailabx/ailabx | 4a8c701a3604bbc34157167224588041944ac1a2 | codes/qlib-main/qlib/workflow/online/utils.py | python | OnlineToolR.get_online_tag | (self, recorder: Recorder) | return tags.get(self.ONLINE_KEY, self.OFFLINE_TAG) | Given a model recorder and return its online tag.
Args:
recorder (Recorder): an instance of recorder
Returns:
str: the online tag | Given a model recorder and return its online tag. | [
"Given",
"a",
"model",
"recorder",
"and",
"return",
"its",
"online",
"tag",
"."
] | def get_online_tag(self, recorder: Recorder) -> str:
"""
Given a model recorder and return its online tag.
Args:
recorder (Recorder): an instance of recorder
Returns:
str: the online tag
"""
tags = recorder.list_tags()
return tags.get(self.ONLINE_KEY, self.OFFLINE_TAG) | [
"def",
"get_online_tag",
"(",
"self",
",",
"recorder",
":",
"Recorder",
")",
"->",
"str",
":",
"tags",
"=",
"recorder",
".",
"list_tags",
"(",
")",
"return",
"tags",
".",
"get",
"(",
"self",
".",
"ONLINE_KEY",
",",
"self",
".",
"OFFLINE_TAG",
")"
] | https://github.com/ailabx/ailabx/blob/4a8c701a3604bbc34157167224588041944ac1a2/codes/qlib-main/qlib/workflow/online/utils.py#L118-L129 |
|
bruceyang2012/Face-detection-with-mobilenet-ssd | 58fafb6e93d28531797aac1e9a4436730c8cee7c | keras_ssd_loss.py | python | SSDLoss.compute_loss | (self, y_true, y_pred) | return total_loss | Compute the loss of the SSD model prediction against the ground truth.
Arguments:
y_true (array): A Numpy array of shape `(batch_size, #boxes, #classes + 8)`,
where `#boxes` is the total number of boxes that the model predicts
per image. Be careful to make sure that the index of each given
box in `y_true` is the same as the index for the corresponding
box in `y_pred`. The last axis must have length `#classes + 8` and contain
`[classes one-hot encoded, 4 ground truth box coordinates, 4 arbitrary entries]`
in this order, including the background class. The last four entries of the
last axis are not used by this function and therefore their contents are
irrelevant, they only exist so that `y_true` has the same shape as `y_pred`,
where the last four entries of the last axis contain the anchor box
coordinates, which are needed during inference. Important: Boxes that
you want the cost function to ignore need to have a one-hot
class vector of all zeros.
y_pred (Keras tensor): The model prediction. The shape is identical
to that of `y_true`.
Returns:
A scalar, the total multitask loss for classification and localization. | Compute the loss of the SSD model prediction against the ground truth. | [
"Compute",
"the",
"loss",
"of",
"the",
"SSD",
"model",
"prediction",
"against",
"the",
"ground",
"truth",
"."
] | def compute_loss(self, y_true, y_pred):
'''
Compute the loss of the SSD model prediction against the ground truth.
Arguments:
y_true (array): A Numpy array of shape `(batch_size, #boxes, #classes + 8)`,
where `#boxes` is the total number of boxes that the model predicts
per image. Be careful to make sure that the index of each given
box in `y_true` is the same as the index for the corresponding
box in `y_pred`. The last axis must have length `#classes + 8` and contain
`[classes one-hot encoded, 4 ground truth box coordinates, 4 arbitrary entries]`
in this order, including the background class. The last four entries of the
last axis are not used by this function and therefore their contents are
irrelevant, they only exist so that `y_true` has the same shape as `y_pred`,
where the last four entries of the last axis contain the anchor box
coordinates, which are needed during inference. Important: Boxes that
you want the cost function to ignore need to have a one-hot
class vector of all zeros.
y_pred (Keras tensor): The model prediction. The shape is identical
to that of `y_true`.
Returns:
A scalar, the total multitask loss for classification and localization.
'''
batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32
n_boxes = tf.shape(y_pred)[
1] # Output dtype: tf.int32, note that `n_boxes` in this context denotes the total number of boxes per image, not the number of boxes per cell
# 1: Compute the losses for class and box predictions for every box
classification_loss = tf.cast(
self.log_loss(y_true[:, :, :-12], y_pred[:, :, :-12]),
dtype=tf.float32) # Output shape: (batch_size, n_boxes)
localization_loss = tf.cast(
self.smooth_L1_loss(y_true[:, :, -12:-8], y_pred[:, :, -12:-8]),
dtype=tf.float32) # Output shape: (batch_size, n_boxes)
# 2: Compute the classification losses for the positive and negative targets
# Create masks for the positive and negative ground truth classes
negatives = y_true[:, :, 0] # Tensor of shape (batch_size, n_boxes)
positives = tf.cast(tf.reduce_max(y_true[:, :, 1:-12], axis=-1),
dtype=tf.float32) # Tensor of shape (batch_size, n_boxes)
# Count the number of positive boxes (classes 1 to n) in y_true across the whole batch
n_positive = tf.reduce_sum(positives)
# Now mask all negative boxes and sum up the losses for the positive boxes PER batch item
# (Keras loss functions must output one scalar loss value PER batch item, rather than just
# one scalar for the entire batch, that's why we're not summing across all axes)
pos_class_loss = tf.reduce_sum(classification_loss * positives, axis=-1) # Tensor of shape (batch_size,)
# Compute the classification loss for the negative default boxes (if there are any)
# First, compute the classification loss for all negative boxes
neg_class_loss_all = classification_loss * negatives # Tensor of shape (batch_size, n_boxes)
n_neg_losses = tf.math.count_nonzero(neg_class_loss_all,
dtype=tf.int32) # The number of non-zero loss entries in `neg_class_loss_all`
# What's the point of `n_neg_losses`? For the next step, which will be to compute which negative boxes enter the classification
# loss, we don't just want to know how many negative ground truth boxes there are, but for how many of those there actually is
# a positive (i.e. non-zero) loss. This is necessary because `tf.nn.top-k()` in the function below will pick the top k boxes with
# the highest losses no matter what, even if it receives a vector where all losses are zero. In the unlikely event that all negative
# classification losses ARE actually zero though, this behavior might lead to `tf.nn.top-k()` returning the indices of positive
# boxes, leading to an incorrect negative classification loss computation, and hence an incorrect overall loss computation.
# We therefore need to make sure that `n_negative_keep`, which assumes the role of the `k` argument in `tf.nn.top-k()`,
# is at most the number of negative boxes for which there is a positive classification loss.
# Compute the number of negative examples we want to account for in the loss
# We'll keep at most `self.neg_pos_ratio` times the number of positives in `y_true`, but at least `self.n_neg_min` (unless `n_neg_loses` is smaller)
n_negative_keep = tf.minimum(
tf.maximum(self.neg_pos_ratio * tf.cast(n_positive, dtype=tf.int32), self.n_neg_min),
n_neg_losses)
# In the unlikely case when either (1) there are no negative ground truth boxes at all
# or (2) the classification loss for all negative boxes is zero, return zero as the `neg_class_loss`
def f1():
return tf.zeros([batch_size])
# Otherwise compute the negative loss
def f2():
# Now we'll identify the top-k (where k == `n_negative_keep`) boxes with the highest confidence loss that
# belong to the background class in the ground truth data. Note that this doesn't necessarily mean that the model
# predicted the wrong class for those boxes, it just means that the loss for those boxes is the highest.
# To do this, we reshape `neg_class_loss_all` to 1D...
neg_class_loss_all_1D = tf.reshape(neg_class_loss_all, [-1]) # Tensor of shape (batch_size * n_boxes,)
# ...and then we get the indices for the `n_negative_keep` boxes with the highest loss out of those...
values, indices = tf.nn.top_k(neg_class_loss_all_1D, n_negative_keep, False) # We don't need sorting
# ...and with these indices we'll create a mask...
negatives_keep = tf.scatter_nd(tf.expand_dims(indices, axis=1),
updates=tf.ones_like(indices, dtype=tf.int32), shape=tf.shape(
neg_class_loss_all_1D)) # Tensor of shape (batch_size * n_boxes,)
negatives_keep = tf.cast(
tf.reshape(negatives_keep, [batch_size, n_boxes]),
dtype=tf.float32) # Tensor of shape (batch_size, n_boxes)
# ...and use it to keep only those boxes and mask all other classification losses
neg_class_loss = tf.reduce_sum(classification_loss * negatives_keep,
axis=-1) # Tensor of shape (batch_size,)
return neg_class_loss
neg_class_loss = tf.cond(tf.equal(n_neg_losses, tf.constant(0)), f1, f2)
class_loss = pos_class_loss + neg_class_loss # Tensor of shape (batch_size,)
# 3: Compute the localization loss for the positive targets
# We don't penalize localization loss for negative predicted boxes (obviously: there are no ground truth boxes they would correspond to)
loc_loss = tf.reduce_sum(localization_loss * positives, axis=-1) # Tensor of shape (batch_size,)
# 4: Compute the total loss
total_loss = (self.beta * class_loss + self.alpha * loc_loss) / tf.maximum(1.0,
n_positive) # In case `n_positive == 0`
return total_loss | [
"def",
"compute_loss",
"(",
"self",
",",
"y_true",
",",
"y_pred",
")",
":",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"y_pred",
")",
"[",
"0",
"]",
"# Output dtype: tf.int32",
"n_boxes",
"=",
"tf",
".",
"shape",
"(",
"y_pred",
")",
"[",
"1",
"]",
"# Output dtype: tf.int32, note that `n_boxes` in this context denotes the total number of boxes per image, not the number of boxes per cell",
"# 1: Compute the losses for class and box predictions for every box",
"classification_loss",
"=",
"tf",
".",
"cast",
"(",
"self",
".",
"log_loss",
"(",
"y_true",
"[",
":",
",",
":",
",",
":",
"-",
"12",
"]",
",",
"y_pred",
"[",
":",
",",
":",
",",
":",
"-",
"12",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Output shape: (batch_size, n_boxes)",
"localization_loss",
"=",
"tf",
".",
"cast",
"(",
"self",
".",
"smooth_L1_loss",
"(",
"y_true",
"[",
":",
",",
":",
",",
"-",
"12",
":",
"-",
"8",
"]",
",",
"y_pred",
"[",
":",
",",
":",
",",
"-",
"12",
":",
"-",
"8",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Output shape: (batch_size, n_boxes)",
"# 2: Compute the classification losses for the positive and negative targets",
"# Create masks for the positive and negative ground truth classes",
"negatives",
"=",
"y_true",
"[",
":",
",",
":",
",",
"0",
"]",
"# Tensor of shape (batch_size, n_boxes)",
"positives",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"reduce_max",
"(",
"y_true",
"[",
":",
",",
":",
",",
"1",
":",
"-",
"12",
"]",
",",
"axis",
"=",
"-",
"1",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Tensor of shape (batch_size, n_boxes)",
"# Count the number of positive boxes (classes 1 to n) in y_true across the whole batch",
"n_positive",
"=",
"tf",
".",
"reduce_sum",
"(",
"positives",
")",
"# Now mask all negative boxes and sum up the losses for the positive boxes PER batch item",
"# (Keras loss functions must output one scalar loss value PER batch item, rather than just",
"# one scalar for the entire batch, that's why we're not summing across all axes)",
"pos_class_loss",
"=",
"tf",
".",
"reduce_sum",
"(",
"classification_loss",
"*",
"positives",
",",
"axis",
"=",
"-",
"1",
")",
"# Tensor of shape (batch_size,)",
"# Compute the classification loss for the negative default boxes (if there are any)",
"# First, compute the classification loss for all negative boxes",
"neg_class_loss_all",
"=",
"classification_loss",
"*",
"negatives",
"# Tensor of shape (batch_size, n_boxes)",
"n_neg_losses",
"=",
"tf",
".",
"math",
".",
"count_nonzero",
"(",
"neg_class_loss_all",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"# The number of non-zero loss entries in `neg_class_loss_all`",
"# What's the point of `n_neg_losses`? For the next step, which will be to compute which negative boxes enter the classification",
"# loss, we don't just want to know how many negative ground truth boxes there are, but for how many of those there actually is",
"# a positive (i.e. non-zero) loss. This is necessary because `tf.nn.top-k()` in the function below will pick the top k boxes with",
"# the highest losses no matter what, even if it receives a vector where all losses are zero. In the unlikely event that all negative",
"# classification losses ARE actually zero though, this behavior might lead to `tf.nn.top-k()` returning the indices of positive",
"# boxes, leading to an incorrect negative classification loss computation, and hence an incorrect overall loss computation.",
"# We therefore need to make sure that `n_negative_keep`, which assumes the role of the `k` argument in `tf.nn.top-k()`,",
"# is at most the number of negative boxes for which there is a positive classification loss.",
"# Compute the number of negative examples we want to account for in the loss",
"# We'll keep at most `self.neg_pos_ratio` times the number of positives in `y_true`, but at least `self.n_neg_min` (unless `n_neg_loses` is smaller)",
"n_negative_keep",
"=",
"tf",
".",
"minimum",
"(",
"tf",
".",
"maximum",
"(",
"self",
".",
"neg_pos_ratio",
"*",
"tf",
".",
"cast",
"(",
"n_positive",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"self",
".",
"n_neg_min",
")",
",",
"n_neg_losses",
")",
"# In the unlikely case when either (1) there are no negative ground truth boxes at all",
"# or (2) the classification loss for all negative boxes is zero, return zero as the `neg_class_loss`",
"def",
"f1",
"(",
")",
":",
"return",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
"]",
")",
"# Otherwise compute the negative loss",
"def",
"f2",
"(",
")",
":",
"# Now we'll identify the top-k (where k == `n_negative_keep`) boxes with the highest confidence loss that",
"# belong to the background class in the ground truth data. Note that this doesn't necessarily mean that the model",
"# predicted the wrong class for those boxes, it just means that the loss for those boxes is the highest.",
"# To do this, we reshape `neg_class_loss_all` to 1D...",
"neg_class_loss_all_1D",
"=",
"tf",
".",
"reshape",
"(",
"neg_class_loss_all",
",",
"[",
"-",
"1",
"]",
")",
"# Tensor of shape (batch_size * n_boxes,)",
"# ...and then we get the indices for the `n_negative_keep` boxes with the highest loss out of those...",
"values",
",",
"indices",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"neg_class_loss_all_1D",
",",
"n_negative_keep",
",",
"False",
")",
"# We don't need sorting",
"# ...and with these indices we'll create a mask...",
"negatives_keep",
"=",
"tf",
".",
"scatter_nd",
"(",
"tf",
".",
"expand_dims",
"(",
"indices",
",",
"axis",
"=",
"1",
")",
",",
"updates",
"=",
"tf",
".",
"ones_like",
"(",
"indices",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"shape",
"=",
"tf",
".",
"shape",
"(",
"neg_class_loss_all_1D",
")",
")",
"# Tensor of shape (batch_size * n_boxes,)",
"negatives_keep",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"reshape",
"(",
"negatives_keep",
",",
"[",
"batch_size",
",",
"n_boxes",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Tensor of shape (batch_size, n_boxes)",
"# ...and use it to keep only those boxes and mask all other classification losses",
"neg_class_loss",
"=",
"tf",
".",
"reduce_sum",
"(",
"classification_loss",
"*",
"negatives_keep",
",",
"axis",
"=",
"-",
"1",
")",
"# Tensor of shape (batch_size,)",
"return",
"neg_class_loss",
"neg_class_loss",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"equal",
"(",
"n_neg_losses",
",",
"tf",
".",
"constant",
"(",
"0",
")",
")",
",",
"f1",
",",
"f2",
")",
"class_loss",
"=",
"pos_class_loss",
"+",
"neg_class_loss",
"# Tensor of shape (batch_size,)",
"# 3: Compute the localization loss for the positive targets",
"# We don't penalize localization loss for negative predicted boxes (obviously: there are no ground truth boxes they would correspond to)",
"loc_loss",
"=",
"tf",
".",
"reduce_sum",
"(",
"localization_loss",
"*",
"positives",
",",
"axis",
"=",
"-",
"1",
")",
"# Tensor of shape (batch_size,)",
"# 4: Compute the total loss",
"total_loss",
"=",
"(",
"self",
".",
"beta",
"*",
"class_loss",
"+",
"self",
".",
"alpha",
"*",
"loc_loss",
")",
"/",
"tf",
".",
"maximum",
"(",
"1.0",
",",
"n_positive",
")",
"# In case `n_positive == 0`",
"return",
"total_loss"
] | https://github.com/bruceyang2012/Face-detection-with-mobilenet-ssd/blob/58fafb6e93d28531797aac1e9a4436730c8cee7c/keras_ssd_loss.py#L101-L215 |
|
getting-things-gnome/gtg | 4b02c43744b32a00facb98174f04ec5953bd055d | GTG/core/datastore.py | python | TaskSource.get_task_filter_for_backend | (self) | return lambda task: backend_filter(self.requester, task,
{"tags": set(attached_tags)}) | Filter that checks if the task should be stored in this backend.
@returns function: a function that accepts a task and returns
True/False whether the task should be stored or not | Filter that checks if the task should be stored in this backend. | [
"Filter",
"that",
"checks",
"if",
"the",
"task",
"should",
"be",
"stored",
"in",
"this",
"backend",
"."
] | def get_task_filter_for_backend(self):
"""
Filter that checks if the task should be stored in this backend.
@returns function: a function that accepts a task and returns
True/False whether the task should be stored or not
"""
def backend_filter(req, task, parameters):
"""
Filter that checks if two tags sets intersect. It is used to check
if a task should be stored inside a backend
@param task: a task object
@param tags_to_match_set: a *set* of tag names
"""
try:
tags_to_match_set = parameters['tags']
except KeyError:
return []
all_tasks_tag = req.get_alltag_tag().get_name()
if all_tasks_tag in tags_to_match_set:
return True
task_tags = set(task.get_tags_name())
return task_tags.intersection(tags_to_match_set)
attached_tags = self.backend.get_attached_tags()
return lambda task: backend_filter(self.requester, task,
{"tags": set(attached_tags)}) | [
"def",
"get_task_filter_for_backend",
"(",
"self",
")",
":",
"def",
"backend_filter",
"(",
"req",
",",
"task",
",",
"parameters",
")",
":",
"\"\"\"\n Filter that checks if two tags sets intersect. It is used to check\n if a task should be stored inside a backend\n @param task: a task object\n @param tags_to_match_set: a *set* of tag names\n \"\"\"",
"try",
":",
"tags_to_match_set",
"=",
"parameters",
"[",
"'tags'",
"]",
"except",
"KeyError",
":",
"return",
"[",
"]",
"all_tasks_tag",
"=",
"req",
".",
"get_alltag_tag",
"(",
")",
".",
"get_name",
"(",
")",
"if",
"all_tasks_tag",
"in",
"tags_to_match_set",
":",
"return",
"True",
"task_tags",
"=",
"set",
"(",
"task",
".",
"get_tags_name",
"(",
")",
")",
"return",
"task_tags",
".",
"intersection",
"(",
"tags_to_match_set",
")",
"attached_tags",
"=",
"self",
".",
"backend",
".",
"get_attached_tags",
"(",
")",
"return",
"lambda",
"task",
":",
"backend_filter",
"(",
"self",
".",
"requester",
",",
"task",
",",
"{",
"\"tags\"",
":",
"set",
"(",
"attached_tags",
")",
"}",
")"
] | https://github.com/getting-things-gnome/gtg/blob/4b02c43744b32a00facb98174f04ec5953bd055d/GTG/core/datastore.py#L699-L726 |
|
kobiso/CBAM-keras | 796ae9ea31253d87f46ac4908e94ad5d799fbdd5 | models/.ipynb_checkpoints/mobilenets-checkpoint.py | python | _depthwise_conv_block | (inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1), block_id=1, attention_module=None) | return x | Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block. | Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block. | [
"Adds",
"a",
"depthwise",
"convolution",
"block",
".",
"A",
"depthwise",
"convolution",
"block",
"consists",
"of",
"a",
"depthwise",
"conv",
"batch",
"normalization",
"relu6",
"pointwise",
"convolution",
"batch",
"normalization",
"and",
"relu6",
"activation",
".",
"#",
"Arguments",
"inputs",
":",
"Input",
"tensor",
"of",
"shape",
"(",
"rows",
"cols",
"channels",
")",
"(",
"with",
"channels_last",
"data",
"format",
")",
"or",
"(",
"channels",
"rows",
"cols",
")",
"(",
"with",
"channels_first",
"data",
"format",
")",
".",
"pointwise_conv_filters",
":",
"Integer",
"the",
"dimensionality",
"of",
"the",
"output",
"space",
"(",
"i",
".",
"e",
".",
"the",
"number",
"output",
"of",
"filters",
"in",
"the",
"pointwise",
"convolution",
")",
".",
"alpha",
":",
"controls",
"the",
"width",
"of",
"the",
"network",
".",
"-",
"If",
"alpha",
"<",
"1",
".",
"0",
"proportionally",
"decreases",
"the",
"number",
"of",
"filters",
"in",
"each",
"layer",
".",
"-",
"If",
"alpha",
">",
"1",
".",
"0",
"proportionally",
"increases",
"the",
"number",
"of",
"filters",
"in",
"each",
"layer",
".",
"-",
"If",
"alpha",
"=",
"1",
"default",
"number",
"of",
"filters",
"from",
"the",
"paper",
"are",
"used",
"at",
"each",
"layer",
".",
"depth_multiplier",
":",
"The",
"number",
"of",
"depthwise",
"convolution",
"output",
"channels",
"for",
"each",
"input",
"channel",
".",
"The",
"total",
"number",
"of",
"depthwise",
"convolution",
"output",
"channels",
"will",
"be",
"equal",
"to",
"filters_in",
"*",
"depth_multiplier",
".",
"strides",
":",
"An",
"integer",
"or",
"tuple",
"/",
"list",
"of",
"2",
"integers",
"specifying",
"the",
"strides",
"of",
"the",
"convolution",
"along",
"the",
"width",
"and",
"height",
".",
"Can",
"be",
"a",
"single",
"integer",
"to",
"specify",
"the",
"same",
"value",
"for",
"all",
"spatial",
"dimensions",
".",
"Specifying",
"any",
"stride",
"value",
"!",
"=",
"1",
"is",
"incompatible",
"with",
"specifying",
"any",
"dilation_rate",
"value",
"!",
"=",
"1",
".",
"block_id",
":",
"Integer",
"a",
"unique",
"identification",
"designating",
"the",
"block",
"number",
".",
"#",
"Input",
"shape",
"4D",
"tensor",
"with",
"shape",
":",
"(",
"batch",
"channels",
"rows",
"cols",
")",
"if",
"data_format",
"=",
"channels_first",
"or",
"4D",
"tensor",
"with",
"shape",
":",
"(",
"batch",
"rows",
"cols",
"channels",
")",
"if",
"data_format",
"=",
"channels_last",
".",
"#",
"Output",
"shape",
"4D",
"tensor",
"with",
"shape",
":",
"(",
"batch",
"filters",
"new_rows",
"new_cols",
")",
"if",
"data_format",
"=",
"channels_first",
"or",
"4D",
"tensor",
"with",
"shape",
":",
"(",
"batch",
"new_rows",
"new_cols",
"filters",
")",
"if",
"data_format",
"=",
"channels_last",
".",
"rows",
"and",
"cols",
"values",
"might",
"have",
"changed",
"due",
"to",
"stride",
".",
"#",
"Returns",
"Output",
"tensor",
"of",
"block",
"."
] | def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1), block_id=1, attention_module=None):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
x = DepthwiseConv2D((3, 3),
padding='same',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(inputs)
x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(x)
x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
if attention_module == 'se_block':
x = se_block(x)
if attention_module == 'cbam_block':
x = cbam_block(x)
return x | [
"def",
"_depthwise_conv_block",
"(",
"inputs",
",",
"pointwise_conv_filters",
",",
"alpha",
",",
"depth_multiplier",
"=",
"1",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"block_id",
"=",
"1",
",",
"attention_module",
"=",
"None",
")",
":",
"channel_axis",
"=",
"1",
"if",
"K",
".",
"image_data_format",
"(",
")",
"==",
"'channels_first'",
"else",
"-",
"1",
"pointwise_conv_filters",
"=",
"int",
"(",
"pointwise_conv_filters",
"*",
"alpha",
")",
"x",
"=",
"DepthwiseConv2D",
"(",
"(",
"3",
",",
"3",
")",
",",
"padding",
"=",
"'same'",
",",
"depth_multiplier",
"=",
"depth_multiplier",
",",
"strides",
"=",
"strides",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"'conv_dw_%d'",
"%",
"block_id",
")",
"(",
"inputs",
")",
"x",
"=",
"BatchNormalization",
"(",
"axis",
"=",
"channel_axis",
",",
"name",
"=",
"'conv_dw_%d_bn'",
"%",
"block_id",
")",
"(",
"x",
")",
"x",
"=",
"Activation",
"(",
"relu6",
",",
"name",
"=",
"'conv_dw_%d_relu'",
"%",
"block_id",
")",
"(",
"x",
")",
"x",
"=",
"Conv2D",
"(",
"pointwise_conv_filters",
",",
"(",
"1",
",",
"1",
")",
",",
"padding",
"=",
"'same'",
",",
"use_bias",
"=",
"False",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"name",
"=",
"'conv_pw_%d'",
"%",
"block_id",
")",
"(",
"x",
")",
"x",
"=",
"BatchNormalization",
"(",
"axis",
"=",
"channel_axis",
",",
"name",
"=",
"'conv_pw_%d_bn'",
"%",
"block_id",
")",
"(",
"x",
")",
"x",
"=",
"Activation",
"(",
"relu6",
",",
"name",
"=",
"'conv_pw_%d_relu'",
"%",
"block_id",
")",
"(",
"x",
")",
"if",
"attention_module",
"==",
"'se_block'",
":",
"x",
"=",
"se_block",
"(",
"x",
")",
"if",
"attention_module",
"==",
"'cbam_block'",
":",
"x",
"=",
"cbam_block",
"(",
"x",
")",
"return",
"x"
] | https://github.com/kobiso/CBAM-keras/blob/796ae9ea31253d87f46ac4908e94ad5d799fbdd5/models/.ipynb_checkpoints/mobilenets-checkpoint.py#L477-L546 |
|
microsoft/unilm | 65f15af2a307ebb64cfb25adf54375b002e6fe8d | infoxlm/fairseq/fairseq/progress_bar.py | python | simple_progress_bar.print | (self, stats, tag='', step=None) | Print end-of-epoch stats. | Print end-of-epoch stats. | [
"Print",
"end",
"-",
"of",
"-",
"epoch",
"stats",
"."
] | def print(self, stats, tag='', step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
print('{} | {}'.format(self.prefix, postfix), flush=True) | [
"def",
"print",
"(",
"self",
",",
"stats",
",",
"tag",
"=",
"''",
",",
"step",
"=",
"None",
")",
":",
"postfix",
"=",
"self",
".",
"_str_pipes",
"(",
"self",
".",
"_format_stats",
"(",
"stats",
")",
")",
"print",
"(",
"'{} | {}'",
".",
"format",
"(",
"self",
".",
"prefix",
",",
"postfix",
")",
",",
"flush",
"=",
"True",
")"
] | https://github.com/microsoft/unilm/blob/65f15af2a307ebb64cfb25adf54375b002e6fe8d/infoxlm/fairseq/fairseq/progress_bar.py#L194-L197 |
||
Yelp/mrjob | 091572e87bc24cc64be40278dd0f5c3617c98d4b | mrjob/emr.py | python | EMRJobRunner.get_cluster_id | (self) | return self._cluster_id | Get the ID of the cluster our job is running on, or ``None``. | Get the ID of the cluster our job is running on, or ``None``. | [
"Get",
"the",
"ID",
"of",
"the",
"cluster",
"our",
"job",
"is",
"running",
"on",
"or",
"None",
"."
] | def get_cluster_id(self):
"""Get the ID of the cluster our job is running on, or ``None``."""
return self._cluster_id | [
"def",
"get_cluster_id",
"(",
"self",
")",
":",
"return",
"self",
".",
"_cluster_id"
] | https://github.com/Yelp/mrjob/blob/091572e87bc24cc64be40278dd0f5c3617c98d4b/mrjob/emr.py#L2388-L2390 |
|
Komodo/KomodoEdit | 61edab75dce2bdb03943b387b0608ea36f548e8e | contrib/paramiko/paramiko/transport.py | python | Transport.open_channel | (self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None,
timeout=None) | Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the channel created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:param float timeout:
optional timeout opening a channel, default 3600s (1h)
:return: a new `.Channel` on success
:raises SSHException: if the request is rejected, the session ends
prematurely or there is a timeout openning a channel
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments. | Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating. | [
"Request",
"a",
"new",
"channel",
"to",
"the",
"server",
".",
"Channels",
"<",
".",
"Channel",
">",
"are",
"socket",
"-",
"like",
"objects",
"used",
"for",
"the",
"actual",
"transfer",
"of",
"data",
"across",
"the",
"session",
".",
"You",
"may",
"only",
"request",
"a",
"channel",
"after",
"negotiating",
"encryption",
"(",
"using",
"connect",
"or",
"start_client",
")",
"and",
"authenticating",
"."
] | def open_channel(self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None,
timeout=None):
"""
Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the channel created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:param float timeout:
optional timeout opening a channel, default 3600s (1h)
:return: a new `.Channel` on success
:raises SSHException: if the request is rejected, the session ends
prematurely or there is a timeout openning a channel
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
if not self.active:
raise SSHException('SSH session not active')
timeout = 3600 if timeout is None else timeout
self.lock.acquire()
try:
window_size = self._sanitize_window_size(window_size)
max_packet_size = self._sanitize_packet_size(max_packet_size)
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(window_size)
m.add_int(max_packet_size)
if (kind == 'forwarded-tcpip') or (kind == 'direct-tcpip'):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == 'x11':
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(window_size, max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
start_ts = time.time()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
if event.is_set():
break
elif start_ts + timeout < time.time():
raise SSHException('Timeout openning channel.')
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e | [
"def",
"open_channel",
"(",
"self",
",",
"kind",
",",
"dest_addr",
"=",
"None",
",",
"src_addr",
"=",
"None",
",",
"window_size",
"=",
"None",
",",
"max_packet_size",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"active",
":",
"raise",
"SSHException",
"(",
"'SSH session not active'",
")",
"timeout",
"=",
"3600",
"if",
"timeout",
"is",
"None",
"else",
"timeout",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"window_size",
"=",
"self",
".",
"_sanitize_window_size",
"(",
"window_size",
")",
"max_packet_size",
"=",
"self",
".",
"_sanitize_packet_size",
"(",
"max_packet_size",
")",
"chanid",
"=",
"self",
".",
"_next_channel",
"(",
")",
"m",
"=",
"Message",
"(",
")",
"m",
".",
"add_byte",
"(",
"cMSG_CHANNEL_OPEN",
")",
"m",
".",
"add_string",
"(",
"kind",
")",
"m",
".",
"add_int",
"(",
"chanid",
")",
"m",
".",
"add_int",
"(",
"window_size",
")",
"m",
".",
"add_int",
"(",
"max_packet_size",
")",
"if",
"(",
"kind",
"==",
"'forwarded-tcpip'",
")",
"or",
"(",
"kind",
"==",
"'direct-tcpip'",
")",
":",
"m",
".",
"add_string",
"(",
"dest_addr",
"[",
"0",
"]",
")",
"m",
".",
"add_int",
"(",
"dest_addr",
"[",
"1",
"]",
")",
"m",
".",
"add_string",
"(",
"src_addr",
"[",
"0",
"]",
")",
"m",
".",
"add_int",
"(",
"src_addr",
"[",
"1",
"]",
")",
"elif",
"kind",
"==",
"'x11'",
":",
"m",
".",
"add_string",
"(",
"src_addr",
"[",
"0",
"]",
")",
"m",
".",
"add_int",
"(",
"src_addr",
"[",
"1",
"]",
")",
"chan",
"=",
"Channel",
"(",
"chanid",
")",
"self",
".",
"_channels",
".",
"put",
"(",
"chanid",
",",
"chan",
")",
"self",
".",
"channel_events",
"[",
"chanid",
"]",
"=",
"event",
"=",
"threading",
".",
"Event",
"(",
")",
"self",
".",
"channels_seen",
"[",
"chanid",
"]",
"=",
"True",
"chan",
".",
"_set_transport",
"(",
"self",
")",
"chan",
".",
"_set_window",
"(",
"window_size",
",",
"max_packet_size",
")",
"finally",
":",
"self",
".",
"lock",
".",
"release",
"(",
")",
"self",
".",
"_send_user_message",
"(",
"m",
")",
"start_ts",
"=",
"time",
".",
"time",
"(",
")",
"while",
"True",
":",
"event",
".",
"wait",
"(",
"0.1",
")",
"if",
"not",
"self",
".",
"active",
":",
"e",
"=",
"self",
".",
"get_exception",
"(",
")",
"if",
"e",
"is",
"None",
":",
"e",
"=",
"SSHException",
"(",
"'Unable to open channel.'",
")",
"raise",
"e",
"if",
"event",
".",
"is_set",
"(",
")",
":",
"break",
"elif",
"start_ts",
"+",
"timeout",
"<",
"time",
".",
"time",
"(",
")",
":",
"raise",
"SSHException",
"(",
"'Timeout openning channel.'",
")",
"chan",
"=",
"self",
".",
"_channels",
".",
"get",
"(",
"chanid",
")",
"if",
"chan",
"is",
"not",
"None",
":",
"return",
"chan",
"e",
"=",
"self",
".",
"get_exception",
"(",
")",
"if",
"e",
"is",
"None",
":",
"e",
"=",
"SSHException",
"(",
"'Unable to open channel.'",
")",
"raise",
"e"
] | https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/contrib/paramiko/paramiko/transport.py#L746-L836 |
||
AppScale/gts | 46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9 | AppTaskQueue/appscale/taskqueue/queue.py | python | PostgresPullQueue.get_task | (self, task, omit_payload=False) | return self._task_from_row(columns, row, id=task.id) | Gets a task from the queue.
Args:
task: A Task object.
omit_payload: A boolean indicating that the payload should not be
fetched.
Returns:
A task object or None. | Gets a task from the queue. | [
"Gets",
"a",
"task",
"from",
"the",
"queue",
"."
] | def get_task(self, task, omit_payload=False):
""" Gets a task from the queue.
Args:
task: A Task object.
omit_payload: A boolean indicating that the payload should not be
fetched.
Returns:
A task object or None.
"""
if omit_payload:
columns = ['task_name', 'time_enqueued',
'lease_expires', 'lease_count', 'tag']
else:
columns = ['payload', 'task_name', 'time_enqueued',
'lease_expires', 'lease_count', 'tag']
pg_connection = pg_wrapper.get_connection()
with pg_connection:
with pg_connection.cursor() as pg_cursor:
pg_cursor.execute(
'SELECT {columns} FROM "{tasks_table}" '
'WHERE task_name = %(task_name)s AND time_deleted IS NULL'
.format(columns=', '.join(columns),
tasks_table=self.tasks_table_name),
vars={
'task_name': task.id,
}
)
row = pg_cursor.fetchone()
if not row:
return None
return self._task_from_row(columns, row, id=task.id) | [
"def",
"get_task",
"(",
"self",
",",
"task",
",",
"omit_payload",
"=",
"False",
")",
":",
"if",
"omit_payload",
":",
"columns",
"=",
"[",
"'task_name'",
",",
"'time_enqueued'",
",",
"'lease_expires'",
",",
"'lease_count'",
",",
"'tag'",
"]",
"else",
":",
"columns",
"=",
"[",
"'payload'",
",",
"'task_name'",
",",
"'time_enqueued'",
",",
"'lease_expires'",
",",
"'lease_count'",
",",
"'tag'",
"]",
"pg_connection",
"=",
"pg_wrapper",
".",
"get_connection",
"(",
")",
"with",
"pg_connection",
":",
"with",
"pg_connection",
".",
"cursor",
"(",
")",
"as",
"pg_cursor",
":",
"pg_cursor",
".",
"execute",
"(",
"'SELECT {columns} FROM \"{tasks_table}\" '",
"'WHERE task_name = %(task_name)s AND time_deleted IS NULL'",
".",
"format",
"(",
"columns",
"=",
"', '",
".",
"join",
"(",
"columns",
")",
",",
"tasks_table",
"=",
"self",
".",
"tasks_table_name",
")",
",",
"vars",
"=",
"{",
"'task_name'",
":",
"task",
".",
"id",
",",
"}",
")",
"row",
"=",
"pg_cursor",
".",
"fetchone",
"(",
")",
"if",
"not",
"row",
":",
"return",
"None",
"return",
"self",
".",
"_task_from_row",
"(",
"columns",
",",
"row",
",",
"id",
"=",
"task",
".",
"id",
")"
] | https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppTaskQueue/appscale/taskqueue/queue.py#L313-L345 |
|
andresriancho/w3af | cd22e5252243a87aaa6d0ddea47cf58dacfe00a9 | w3af/core/ui/gui/tools/proxywin.py | python | ProxiedRequests.reload_options | (self) | Reload options.
1. Stop proxy
2. Try to start proxy with new params
3. If can't => alert
4. If everything is ok then start proxy
5. Set Trap options
6. Save options | Reload options.
1. Stop proxy
2. Try to start proxy with new params
3. If can't => alert
4. If everything is ok then start proxy
5. Set Trap options
6. Save options | [
"Reload",
"options",
".",
"1",
".",
"Stop",
"proxy",
"2",
".",
"Try",
"to",
"start",
"proxy",
"with",
"new",
"params",
"3",
".",
"If",
"can",
"t",
"=",
">",
"alert",
"4",
".",
"If",
"everything",
"is",
"ok",
"then",
"start",
"proxy",
"5",
".",
"Set",
"Trap",
"options",
"6",
".",
"Save",
"options"
] | def reload_options(self):
"""Reload options.
1. Stop proxy
2. Try to start proxy with new params
3. If can't => alert
4. If everything is ok then start proxy
5. Set Trap options
6. Save options
"""
new_port = self.pref.get_value('proxy', 'ipport')
if new_port != self._prev_ip_port:
self.w3af.mainwin.sb(_("Stopping local proxy"))
if self.proxy:
self.proxy.stop()
try:
self._start_proxy()
except ProxyException:
# Ups, port looks already used..:(
# Let's show alert and focus Options tab
self.w3af.mainwin.sb(_("Failed to start local proxy"))
self.fuzzable = None
self.waiting_requests = False
self.keep_checking = False
# Focus Options tab
self.nb.set_current_page(2)
return
else:
self.fuzzable = None
self.waiting_requests = True
self.keep_checking = True
# Config test
try:
self.proxy.set_what_to_trap(self.pref.get_value('proxy', 'trap'))
self.proxy.set_what_not_to_trap(self.pref.get_value('proxy', 'notrap'))
self.proxy.set_methods_to_trap(self.pref.get_value('proxy', 'methodtrap'))
except BaseFrameworkException, w3:
self.show_alert(_("Invalid configuration!\n" + str(w3)))
self._prev_ip_port = new_port
httpeditor = self.reqresp.request.get_view_by_id('HttpRawView')
httpeditor.set_show_line_numbers(self.pref.get_value('editor',
'display_line_num'))
httpeditor.set_highlight_current_line(self.pref.get_value('editor',
'highlight_current_line'))
httpeditor.set_highlight_syntax(self.pref.get_value('editor',
'highlight_syntax'))
httpeditor.set_wrap(self.pref.get_value('editor', 'wrap'))
self.pref.save()
if self._layout != self.pref.get_value('proxy', 'trap_view'):
self.show_alert(_('Some of options will take effect after you'
' restart proxy tool')) | [
"def",
"reload_options",
"(",
"self",
")",
":",
"new_port",
"=",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'ipport'",
")",
"if",
"new_port",
"!=",
"self",
".",
"_prev_ip_port",
":",
"self",
".",
"w3af",
".",
"mainwin",
".",
"sb",
"(",
"_",
"(",
"\"Stopping local proxy\"",
")",
")",
"if",
"self",
".",
"proxy",
":",
"self",
".",
"proxy",
".",
"stop",
"(",
")",
"try",
":",
"self",
".",
"_start_proxy",
"(",
")",
"except",
"ProxyException",
":",
"# Ups, port looks already used..:(",
"# Let's show alert and focus Options tab",
"self",
".",
"w3af",
".",
"mainwin",
".",
"sb",
"(",
"_",
"(",
"\"Failed to start local proxy\"",
")",
")",
"self",
".",
"fuzzable",
"=",
"None",
"self",
".",
"waiting_requests",
"=",
"False",
"self",
".",
"keep_checking",
"=",
"False",
"# Focus Options tab",
"self",
".",
"nb",
".",
"set_current_page",
"(",
"2",
")",
"return",
"else",
":",
"self",
".",
"fuzzable",
"=",
"None",
"self",
".",
"waiting_requests",
"=",
"True",
"self",
".",
"keep_checking",
"=",
"True",
"# Config test",
"try",
":",
"self",
".",
"proxy",
".",
"set_what_to_trap",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'trap'",
")",
")",
"self",
".",
"proxy",
".",
"set_what_not_to_trap",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'notrap'",
")",
")",
"self",
".",
"proxy",
".",
"set_methods_to_trap",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'methodtrap'",
")",
")",
"except",
"BaseFrameworkException",
",",
"w3",
":",
"self",
".",
"show_alert",
"(",
"_",
"(",
"\"Invalid configuration!\\n\"",
"+",
"str",
"(",
"w3",
")",
")",
")",
"self",
".",
"_prev_ip_port",
"=",
"new_port",
"httpeditor",
"=",
"self",
".",
"reqresp",
".",
"request",
".",
"get_view_by_id",
"(",
"'HttpRawView'",
")",
"httpeditor",
".",
"set_show_line_numbers",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'editor'",
",",
"'display_line_num'",
")",
")",
"httpeditor",
".",
"set_highlight_current_line",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'editor'",
",",
"'highlight_current_line'",
")",
")",
"httpeditor",
".",
"set_highlight_syntax",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'editor'",
",",
"'highlight_syntax'",
")",
")",
"httpeditor",
".",
"set_wrap",
"(",
"self",
".",
"pref",
".",
"get_value",
"(",
"'editor'",
",",
"'wrap'",
")",
")",
"self",
".",
"pref",
".",
"save",
"(",
")",
"if",
"self",
".",
"_layout",
"!=",
"self",
".",
"pref",
".",
"get_value",
"(",
"'proxy'",
",",
"'trap_view'",
")",
":",
"self",
".",
"show_alert",
"(",
"_",
"(",
"'Some of options will take effect after you'",
"' restart proxy tool'",
")",
")"
] | https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/core/ui/gui/tools/proxywin.py#L238-L291 |
||
mathics/Mathics | 318e06dea8f1c70758a50cb2f95c9900150e3a68 | mathics/builtin/structure.py | python | Apply.apply_invalidlevel | (self, f, expr, ls, evaluation, options={}) | Apply[f_, expr_, ls_, OptionsPattern[Apply]] | Apply[f_, expr_, ls_, OptionsPattern[Apply]] | [
"Apply",
"[",
"f_",
"expr_",
"ls_",
"OptionsPattern",
"[",
"Apply",
"]]"
] | def apply_invalidlevel(self, f, expr, ls, evaluation, options={}):
"Apply[f_, expr_, ls_, OptionsPattern[Apply]]"
evaluation.message("Apply", "level", ls) | [
"def",
"apply_invalidlevel",
"(",
"self",
",",
"f",
",",
"expr",
",",
"ls",
",",
"evaluation",
",",
"options",
"=",
"{",
"}",
")",
":",
"evaluation",
".",
"message",
"(",
"\"Apply\"",
",",
"\"level\"",
",",
"ls",
")"
] | https://github.com/mathics/Mathics/blob/318e06dea8f1c70758a50cb2f95c9900150e3a68/mathics/builtin/structure.py#L434-L437 |
||
Trusted-AI/adversarial-robustness-toolbox | 9fabffdbb92947efa1ecc5d825d634d30dfbaf29 | art/attacks/evasion/pe_malware_attack.py | python | MalwareGDTensorFlow.check_valid_size | (
self,
y: np.ndarray,
sample_sizes: np.ndarray,
append_perturbation_size: np.ndarray,
) | return adv_label_vector | Checks that we can append the l0 perturbation to the malware sample and not exceed the
maximum file size. A new label vector with just the valid files indicated is created.
:param y: Labels.
:param sample_sizes: The size of the original file, before it was padded to the input size required by MalConv.
:param append_perturbation_size: Size of the perturbations in L0 terms to put at end of file.
:return adv_label_vector: Labels which indicate which malware samples have enough free features to
accommodate all the adversarial perturbation. | Checks that we can append the l0 perturbation to the malware sample and not exceed the
maximum file size. A new label vector with just the valid files indicated is created. | [
"Checks",
"that",
"we",
"can",
"append",
"the",
"l0",
"perturbation",
"to",
"the",
"malware",
"sample",
"and",
"not",
"exceed",
"the",
"maximum",
"file",
"size",
".",
"A",
"new",
"label",
"vector",
"with",
"just",
"the",
"valid",
"files",
"indicated",
"is",
"created",
"."
] | def check_valid_size(
self,
y: np.ndarray,
sample_sizes: np.ndarray,
append_perturbation_size: np.ndarray,
) -> np.ndarray:
"""
Checks that we can append the l0 perturbation to the malware sample and not exceed the
maximum file size. A new label vector with just the valid files indicated is created.
:param y: Labels.
:param sample_sizes: The size of the original file, before it was padded to the input size required by MalConv.
:param append_perturbation_size: Size of the perturbations in L0 terms to put at end of file.
:return adv_label_vector: Labels which indicate which malware samples have enough free features to
accommodate all the adversarial perturbation.
"""
adv_label_vector = np.zeros_like(y)
for i, label in enumerate(y):
if label == 1:
if sample_sizes[i] + append_perturbation_size[i] <= self.param_dic["maxlen"]:
adv_label_vector[i] = 1
logger.info("size to append on sample %d is %d", i, append_perturbation_size[i])
return adv_label_vector | [
"def",
"check_valid_size",
"(",
"self",
",",
"y",
":",
"np",
".",
"ndarray",
",",
"sample_sizes",
":",
"np",
".",
"ndarray",
",",
"append_perturbation_size",
":",
"np",
".",
"ndarray",
",",
")",
"->",
"np",
".",
"ndarray",
":",
"adv_label_vector",
"=",
"np",
".",
"zeros_like",
"(",
"y",
")",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"y",
")",
":",
"if",
"label",
"==",
"1",
":",
"if",
"sample_sizes",
"[",
"i",
"]",
"+",
"append_perturbation_size",
"[",
"i",
"]",
"<=",
"self",
".",
"param_dic",
"[",
"\"maxlen\"",
"]",
":",
"adv_label_vector",
"[",
"i",
"]",
"=",
"1",
"logger",
".",
"info",
"(",
"\"size to append on sample %d is %d\"",
",",
"i",
",",
"append_perturbation_size",
"[",
"i",
"]",
")",
"return",
"adv_label_vector"
] | https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/9fabffdbb92947efa1ecc5d825d634d30dfbaf29/art/attacks/evasion/pe_malware_attack.py#L170-L194 |
|
python/cpython | e13cdca0f5224ec4e23bdd04bb3120506964bc8b | Lib/importlib/metadata/__init__.py | python | distributions | (**kwargs) | return Distribution.discover(**kwargs) | Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances. | Get all ``Distribution`` instances in the current environment. | [
"Get",
"all",
"Distribution",
"instances",
"in",
"the",
"current",
"environment",
"."
] | def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs) | [
"def",
"distributions",
"(",
"*",
"*",
"kwargs",
")",
":",
"return",
"Distribution",
".",
"discover",
"(",
"*",
"*",
"kwargs",
")"
] | https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/importlib/metadata/__init__.py#L956-L961 |
|
nitishsrivastava/deepnet | f4e4ff207923e01552c96038a1e2c29eb5d16160 | eigenmat/eigenmat.py | python | EigenMatrix.overwrite | (self, array) | Overwrites self with array.
'array' should have a size smaller than that of the array used to
initialize the EigenMatrix. The method will not throw an Exception just
yet if this is not true. It will throw exceptions or behave in strange
ways later on. | Overwrites self with array.
'array' should have a size smaller than that of the array used to
initialize the EigenMatrix. The method will not throw an Exception just
yet if this is not true. It will throw exceptions or behave in strange
ways later on. | [
"Overwrites",
"self",
"with",
"array",
".",
"array",
"should",
"have",
"a",
"size",
"smaller",
"than",
"that",
"of",
"the",
"array",
"used",
"to",
"initialize",
"the",
"EigenMatrix",
".",
"The",
"method",
"will",
"not",
"throw",
"an",
"Exception",
"just",
"yet",
"if",
"this",
"is",
"not",
"true",
".",
"It",
"will",
"throw",
"exceptions",
"or",
"behave",
"in",
"strange",
"ways",
"later",
"on",
"."
] | def overwrite(self, array):
"""Overwrites self with array.
'array' should have a size smaller than that of the array used to
initialize the EigenMatrix. The method will not throw an Exception just
yet if this is not true. It will throw exceptions or behave in strange
ways later on.
"""
assert type(array) == np.ndarray, 'array must be a np.ndarray.'
array = reformat(array)
self.numpy_array = array
_eigenmat.init_from_array(self.p_mat, array.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_int(array.shape[0]), ct.c_int(array.shape[1])) | [
"def",
"overwrite",
"(",
"self",
",",
"array",
")",
":",
"assert",
"type",
"(",
"array",
")",
"==",
"np",
".",
"ndarray",
",",
"'array must be a np.ndarray.'",
"array",
"=",
"reformat",
"(",
"array",
")",
"self",
".",
"numpy_array",
"=",
"array",
"_eigenmat",
".",
"init_from_array",
"(",
"self",
".",
"p_mat",
",",
"array",
".",
"ctypes",
".",
"data_as",
"(",
"ct",
".",
"POINTER",
"(",
"ct",
".",
"c_float",
")",
")",
",",
"ct",
".",
"c_int",
"(",
"array",
".",
"shape",
"[",
"0",
"]",
")",
",",
"ct",
".",
"c_int",
"(",
"array",
".",
"shape",
"[",
"1",
"]",
")",
")"
] | https://github.com/nitishsrivastava/deepnet/blob/f4e4ff207923e01552c96038a1e2c29eb5d16160/eigenmat/eigenmat.py#L88-L99 |
||
XUSean0118/DVSNet | 2b67d991ca13de0a1210fbfbab4ad68f8c2f193a | inference.py | python | get_arguments | () | return parser.parse_args() | Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments. | Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments. | [
"Parse",
"all",
"the",
"arguments",
"provided",
"from",
"the",
"CLI",
".",
"Returns",
":",
"A",
"list",
"of",
"parsed",
"arguments",
"."
] | def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Dynamic Video Segmentation Network")
parser.add_argument("--data_dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the dataset.")
parser.add_argument("--data_list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--restore_from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--decision_from", type=str, default=RESTORE_FROM,
help="Where restore decision model parameters from.")
parser.add_argument("--save_dir", type=str, default=SAVE_DIR,
help="Where to save segmented output.")
parser.add_argument("--num_steps", type=int, default=NUM_STEPS,
help="Number of images in the video.")
parser.add_argument("--overlap", type=int, default=OVERLAP,
help="Overlapping size.")
parser.add_argument("--target", type=float, default=TARGET,
help="Confidence score threshold.")
parser.add_argument("--dynamic", action="store_true",
help="Whether to dynamically adjust target")
return parser.parse_args() | [
"def",
"get_arguments",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Dynamic Video Segmentation Network\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--data_dir\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"DATA_DIRECTORY",
",",
"help",
"=",
"\"Path to the directory containing the dataset.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--data_list\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"DATA_LIST_PATH",
",",
"help",
"=",
"\"Path to the file listing the images in the dataset.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--restore_from\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"RESTORE_FROM",
",",
"help",
"=",
"\"Where restore model parameters from.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--decision_from\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"RESTORE_FROM",
",",
"help",
"=",
"\"Where restore decision model parameters from.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--save_dir\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"SAVE_DIR",
",",
"help",
"=",
"\"Where to save segmented output.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--num_steps\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"NUM_STEPS",
",",
"help",
"=",
"\"Number of images in the video.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--overlap\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"OVERLAP",
",",
"help",
"=",
"\"Overlapping size.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--target\"",
",",
"type",
"=",
"float",
",",
"default",
"=",
"TARGET",
",",
"help",
"=",
"\"Confidence score threshold.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--dynamic\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Whether to dynamically adjust target\"",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] | https://github.com/XUSean0118/DVSNet/blob/2b67d991ca13de0a1210fbfbab4ad68f8c2f193a/inference.py#L29-L54 |
|
pallets/werkzeug | 9efe8c00dcb2b6fc086961ba304729db01912652 | src/werkzeug/datastructures.py | python | MultiDict.getlist | (self, key, type=None) | return result | Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just like `get`,
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key. | Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just like `get`,
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there. | [
"Return",
"the",
"list",
"of",
"items",
"for",
"a",
"given",
"key",
".",
"If",
"that",
"key",
"is",
"not",
"in",
"the",
"MultiDict",
"the",
"return",
"value",
"will",
"be",
"an",
"empty",
"list",
".",
"Just",
"like",
"get",
"getlist",
"accepts",
"a",
"type",
"parameter",
".",
"All",
"items",
"will",
"be",
"converted",
"with",
"the",
"callable",
"defined",
"there",
"."
] | def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just like `get`,
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result | [
"def",
"getlist",
"(",
"self",
",",
"key",
",",
"type",
"=",
"None",
")",
":",
"try",
":",
"rv",
"=",
"dict",
".",
"__getitem__",
"(",
"self",
",",
"key",
")",
"except",
"KeyError",
":",
"return",
"[",
"]",
"if",
"type",
"is",
"None",
":",
"return",
"list",
"(",
"rv",
")",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"rv",
":",
"try",
":",
"result",
".",
"append",
"(",
"type",
"(",
"item",
")",
")",
"except",
"ValueError",
":",
"pass",
"return",
"result"
] | https://github.com/pallets/werkzeug/blob/9efe8c00dcb2b6fc086961ba304729db01912652/src/werkzeug/datastructures.py#L395-L419 |
|
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e | tensorflow_dl_models/research/lfads/synth_data/synthetic_data_utils.py | python | spikify_data | (data_e, rng, dt=1.0, max_firing_rate=100) | return spikes_e | Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process. | Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process. | [
"Apply",
"spikes",
"to",
"a",
"continuous",
"dataset",
"whose",
"values",
"are",
"between",
"0",
".",
"0",
"and",
"1",
".",
"0",
"Args",
":",
"data_e",
":",
"nexamples",
"length",
"list",
"of",
"NxT",
"trials",
"dt",
":",
"how",
"often",
"the",
"data",
"are",
"sampled",
"max_firing_rate",
":",
"the",
"firing",
"rate",
"that",
"is",
"associated",
"with",
"a",
"value",
"of",
"1",
".",
"0",
"Returns",
":",
"spikified_e",
":",
"a",
"list",
"of",
"length",
"b",
"of",
"the",
"data",
"represented",
"as",
"spikes",
"sampled",
"from",
"the",
"underlying",
"poisson",
"process",
"."
] | def spikify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process.
"""
E = len(data_e)
spikes_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
data_s = np.zeros([N,T]).astype(np.int)
for n in range(N):
f = data[n,:]
s = rng.poisson(f*max_firing_rate*dt, size=T)
data_s[n,:] = s
spikes_e.append(data_s)
return spikes_e | [
"def",
"spikify_data",
"(",
"data_e",
",",
"rng",
",",
"dt",
"=",
"1.0",
",",
"max_firing_rate",
"=",
"100",
")",
":",
"E",
"=",
"len",
"(",
"data_e",
")",
"spikes_e",
"=",
"[",
"]",
"for",
"e",
"in",
"range",
"(",
"E",
")",
":",
"data",
"=",
"data_e",
"[",
"e",
"]",
"N",
",",
"T",
"=",
"data",
".",
"shape",
"data_s",
"=",
"np",
".",
"zeros",
"(",
"[",
"N",
",",
"T",
"]",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"for",
"n",
"in",
"range",
"(",
"N",
")",
":",
"f",
"=",
"data",
"[",
"n",
",",
":",
"]",
"s",
"=",
"rng",
".",
"poisson",
"(",
"f",
"*",
"max_firing_rate",
"*",
"dt",
",",
"size",
"=",
"T",
")",
"data_s",
"[",
"n",
",",
":",
"]",
"=",
"s",
"spikes_e",
".",
"append",
"(",
"data_s",
")",
"return",
"spikes_e"
] | https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/tensorflow_dl_models/research/lfads/synth_data/synthetic_data_utils.py#L128-L151 |
|
nosmokingbandit/watcher | dadacd21a5790ee609058a98a17fcc8954d24439 | lib/sqlalchemy/orm/strategy_options.py | python | subqueryload | (loadopt, attr) | return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"}) | Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
query(Order).options(subqueryload(Order.items).subqueryload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
query(Order).options(lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:func:`.orm.joinedload`
:func:`.orm.lazyload`
:paramref:`.relationship.lazy` | Indicate that the given attribute should be loaded using
subquery eager loading. | [
"Indicate",
"that",
"the",
"given",
"attribute",
"should",
"be",
"loaded",
"using",
"subquery",
"eager",
"loading",
"."
] | def subqueryload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
query(Order).options(subqueryload(Order.items).subqueryload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
query(Order).options(lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:func:`.orm.joinedload`
:func:`.orm.lazyload`
:paramref:`.relationship.lazy`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"}) | [
"def",
"subqueryload",
"(",
"loadopt",
",",
"attr",
")",
":",
"return",
"loadopt",
".",
"set_relationship_strategy",
"(",
"attr",
",",
"{",
"\"lazy\"",
":",
"\"subquery\"",
"}",
")"
] | https://github.com/nosmokingbandit/watcher/blob/dadacd21a5790ee609058a98a17fcc8954d24439/lib/sqlalchemy/orm/strategy_options.py#L770-L801 |
|
gem/oq-engine | 1bdb88f3914e390abcbd285600bfd39477aae47c | openquake/calculators/base.py | python | create_gmf_data | (dstore, prim_imts, sec_imts=(), data=None) | Create and possibly populate the datasets in the gmf_data group | Create and possibly populate the datasets in the gmf_data group | [
"Create",
"and",
"possibly",
"populate",
"the",
"datasets",
"in",
"the",
"gmf_data",
"group"
] | def create_gmf_data(dstore, prim_imts, sec_imts=(), data=None):
"""
Create and possibly populate the datasets in the gmf_data group
"""
oq = dstore['oqparam']
R = dstore['full_lt'].get_num_rlzs()
M = len(prim_imts)
n = 0 if data is None else len(data['sid'])
items = [('sid', U32 if n == 0 else data['sid']),
('eid', U32 if n == 0 else data['eid'])]
for m in range(M):
col = f'gmv_{m}'
items.append((col, F32 if data is None else data[col]))
for imt in sec_imts:
items.append((str(imt), F32 if n == 0 else data[imt]))
if oq.investigation_time:
eff_time = oq.investigation_time * oq.ses_per_logic_tree_path * R
else:
eff_time = 0
dstore.create_df('gmf_data', items, 'gzip')
dstore.set_attrs('gmf_data', num_events=len(dstore['events']),
imts=' '.join(map(str, prim_imts)),
effective_time=eff_time)
if data is not None:
df = pandas.DataFrame(dict(items))
avg_gmf = numpy.zeros((2, n, M + len(sec_imts)), F32)
for sid, df in df.groupby(df.sid):
df.pop('eid')
df.pop('sid')
avg_gmf[:, sid] = stats.avg_std(df.to_numpy())
dstore['avg_gmf'] = avg_gmf | [
"def",
"create_gmf_data",
"(",
"dstore",
",",
"prim_imts",
",",
"sec_imts",
"=",
"(",
")",
",",
"data",
"=",
"None",
")",
":",
"oq",
"=",
"dstore",
"[",
"'oqparam'",
"]",
"R",
"=",
"dstore",
"[",
"'full_lt'",
"]",
".",
"get_num_rlzs",
"(",
")",
"M",
"=",
"len",
"(",
"prim_imts",
")",
"n",
"=",
"0",
"if",
"data",
"is",
"None",
"else",
"len",
"(",
"data",
"[",
"'sid'",
"]",
")",
"items",
"=",
"[",
"(",
"'sid'",
",",
"U32",
"if",
"n",
"==",
"0",
"else",
"data",
"[",
"'sid'",
"]",
")",
",",
"(",
"'eid'",
",",
"U32",
"if",
"n",
"==",
"0",
"else",
"data",
"[",
"'eid'",
"]",
")",
"]",
"for",
"m",
"in",
"range",
"(",
"M",
")",
":",
"col",
"=",
"f'gmv_{m}'",
"items",
".",
"append",
"(",
"(",
"col",
",",
"F32",
"if",
"data",
"is",
"None",
"else",
"data",
"[",
"col",
"]",
")",
")",
"for",
"imt",
"in",
"sec_imts",
":",
"items",
".",
"append",
"(",
"(",
"str",
"(",
"imt",
")",
",",
"F32",
"if",
"n",
"==",
"0",
"else",
"data",
"[",
"imt",
"]",
")",
")",
"if",
"oq",
".",
"investigation_time",
":",
"eff_time",
"=",
"oq",
".",
"investigation_time",
"*",
"oq",
".",
"ses_per_logic_tree_path",
"*",
"R",
"else",
":",
"eff_time",
"=",
"0",
"dstore",
".",
"create_df",
"(",
"'gmf_data'",
",",
"items",
",",
"'gzip'",
")",
"dstore",
".",
"set_attrs",
"(",
"'gmf_data'",
",",
"num_events",
"=",
"len",
"(",
"dstore",
"[",
"'events'",
"]",
")",
",",
"imts",
"=",
"' '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"prim_imts",
")",
")",
",",
"effective_time",
"=",
"eff_time",
")",
"if",
"data",
"is",
"not",
"None",
":",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"dict",
"(",
"items",
")",
")",
"avg_gmf",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"2",
",",
"n",
",",
"M",
"+",
"len",
"(",
"sec_imts",
")",
")",
",",
"F32",
")",
"for",
"sid",
",",
"df",
"in",
"df",
".",
"groupby",
"(",
"df",
".",
"sid",
")",
":",
"df",
".",
"pop",
"(",
"'eid'",
")",
"df",
".",
"pop",
"(",
"'sid'",
")",
"avg_gmf",
"[",
":",
",",
"sid",
"]",
"=",
"stats",
".",
"avg_std",
"(",
"df",
".",
"to_numpy",
"(",
")",
")",
"dstore",
"[",
"'avg_gmf'",
"]",
"=",
"avg_gmf"
] | https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/calculators/base.py#L1141-L1171 |
||
linxid/Machine_Learning_Study_Path | 558e82d13237114bbb8152483977806fc0c222af | Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/ipaddress.py | python | _BaseNetwork.hostmask | (self) | return x | [] | def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x | [
"def",
"hostmask",
"(",
"self",
")",
":",
"x",
"=",
"self",
".",
"_cache",
".",
"get",
"(",
"'hostmask'",
")",
"if",
"x",
"is",
"None",
":",
"x",
"=",
"self",
".",
"_address_class",
"(",
"int",
"(",
"self",
".",
"netmask",
")",
"^",
"self",
".",
"_ALL_ONES",
")",
"self",
".",
"_cache",
"[",
"'hostmask'",
"]",
"=",
"x",
"return",
"x"
] | https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/ipaddress.py#L826-L831 |
|||
dropbox/dropbox-sdk-python | 015437429be224732990041164a21a0501235db1 | dropbox/base.py | python | DropboxBase.sharing_create_shared_link | (self,
path,
short_url=False,
pending_upload=None) | return r | Create a shared link. If a shared link already exists for the given
path, that link is returned. Previously, it was technically possible to
break a shared link by moving or renaming the corresponding file or
folder. In the future, this will no longer be the case, so your app
shouldn't rely on this behavior. Instead, if your app needs to revoke a
shared link, use :meth:`sharing_revoke_shared_link`.
:param str path: The path to share.
:type short_url: bool
:param Nullable[:class:`dropbox.sharing.PendingUploadMode`]
pending_upload: If it's okay to share a path that does not yet
exist, set this to either ``PendingUploadMode.file`` or
``PendingUploadMode.folder`` to indicate whether to assume it's a
file or folder.
:rtype: :class:`dropbox.sharing.PathLinkMetadata`
:raises: :class:`.exceptions.ApiError`
If this raises, ApiError will contain:
:class:`dropbox.sharing.CreateSharedLinkError` | Create a shared link. If a shared link already exists for the given
path, that link is returned. Previously, it was technically possible to
break a shared link by moving or renaming the corresponding file or
folder. In the future, this will no longer be the case, so your app
shouldn't rely on this behavior. Instead, if your app needs to revoke a
shared link, use :meth:`sharing_revoke_shared_link`. | [
"Create",
"a",
"shared",
"link",
".",
"If",
"a",
"shared",
"link",
"already",
"exists",
"for",
"the",
"given",
"path",
"that",
"link",
"is",
"returned",
".",
"Previously",
"it",
"was",
"technically",
"possible",
"to",
"break",
"a",
"shared",
"link",
"by",
"moving",
"or",
"renaming",
"the",
"corresponding",
"file",
"or",
"folder",
".",
"In",
"the",
"future",
"this",
"will",
"no",
"longer",
"be",
"the",
"case",
"so",
"your",
"app",
"shouldn",
"t",
"rely",
"on",
"this",
"behavior",
".",
"Instead",
"if",
"your",
"app",
"needs",
"to",
"revoke",
"a",
"shared",
"link",
"use",
":",
"meth",
":",
"sharing_revoke_shared_link",
"."
] | def sharing_create_shared_link(self,
path,
short_url=False,
pending_upload=None):
"""
Create a shared link. If a shared link already exists for the given
path, that link is returned. Previously, it was technically possible to
break a shared link by moving or renaming the corresponding file or
folder. In the future, this will no longer be the case, so your app
shouldn't rely on this behavior. Instead, if your app needs to revoke a
shared link, use :meth:`sharing_revoke_shared_link`.
:param str path: The path to share.
:type short_url: bool
:param Nullable[:class:`dropbox.sharing.PendingUploadMode`]
pending_upload: If it's okay to share a path that does not yet
exist, set this to either ``PendingUploadMode.file`` or
``PendingUploadMode.folder`` to indicate whether to assume it's a
file or folder.
:rtype: :class:`dropbox.sharing.PathLinkMetadata`
:raises: :class:`.exceptions.ApiError`
If this raises, ApiError will contain:
:class:`dropbox.sharing.CreateSharedLinkError`
"""
warnings.warn(
'create_shared_link is deprecated. Use create_shared_link_with_settings.',
DeprecationWarning,
)
arg = sharing.CreateSharedLinkArg(path,
short_url,
pending_upload)
r = self.request(
sharing.create_shared_link,
'sharing',
arg,
None,
)
return r | [
"def",
"sharing_create_shared_link",
"(",
"self",
",",
"path",
",",
"short_url",
"=",
"False",
",",
"pending_upload",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"'create_shared_link is deprecated. Use create_shared_link_with_settings.'",
",",
"DeprecationWarning",
",",
")",
"arg",
"=",
"sharing",
".",
"CreateSharedLinkArg",
"(",
"path",
",",
"short_url",
",",
"pending_upload",
")",
"r",
"=",
"self",
".",
"request",
"(",
"sharing",
".",
"create_shared_link",
",",
"'sharing'",
",",
"arg",
",",
"None",
",",
")",
"return",
"r"
] | https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/base.py#L4071-L4109 |
|
cisco/mindmeld | 809c36112e9ea8019fe29d54d136ca14eb4fd8db | mindmeld/system_entity_recognizer.py | python | SystemEntityRecognizer.load_from_app_path | (app_path) | If the application configuration is empty, we do not use Duckling.
Otherwise, we return the Duckling recognizer with the URL defined in the application's
config, default to the DEFAULT_DUCKLING_URL.
Args:
app_path (str): Application path
Returns:
(SystemEntityRecognizer) | If the application configuration is empty, we do not use Duckling. | [
"If",
"the",
"application",
"configuration",
"is",
"empty",
"we",
"do",
"not",
"use",
"Duckling",
"."
] | def load_from_app_path(app_path):
"""If the application configuration is empty, we do not use Duckling.
Otherwise, we return the Duckling recognizer with the URL defined in the application's
config, default to the DEFAULT_DUCKLING_URL.
Args:
app_path (str): Application path
Returns:
(SystemEntityRecognizer)
"""
if not app_path:
raise SystemEntityError(
"App path must be valid to load entity recognizer config."
)
if is_duckling_configured(app_path):
url = get_system_entity_url_config(app_path=app_path)
return DucklingRecognizer.get_instance(url)
else:
return NoOpSystemEntityRecognizer.get_instance() | [
"def",
"load_from_app_path",
"(",
"app_path",
")",
":",
"if",
"not",
"app_path",
":",
"raise",
"SystemEntityError",
"(",
"\"App path must be valid to load entity recognizer config.\"",
")",
"if",
"is_duckling_configured",
"(",
"app_path",
")",
":",
"url",
"=",
"get_system_entity_url_config",
"(",
"app_path",
"=",
"app_path",
")",
"return",
"DucklingRecognizer",
".",
"get_instance",
"(",
"url",
")",
"else",
":",
"return",
"NoOpSystemEntityRecognizer",
".",
"get_instance",
"(",
")"
] | https://github.com/cisco/mindmeld/blob/809c36112e9ea8019fe29d54d136ca14eb4fd8db/mindmeld/system_entity_recognizer.py#L118-L139 |
||
Delta-ML/delta | 31dfebc8f20b7cb282b62f291ff25a87e403cc86 | delta/utils/solver/utils/callbacks.py | python | ParallelModelCheckpoint.__init__ | (self,
model,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
load_weights_on_restart=False,
period=1) | [] | def __init__(self,
model,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
load_weights_on_restart=False,
period=1):
self.model_to_save = model
super().__init__(
filepath=filepath,
monitor=monitor,
verbose=verbose,
save_best_only=save_best_only,
save_weights_only=save_weights_only,
mode=mode,
save_freq=save_freq,
load_weights_on_restart=load_weights_on_restart,
period=period) | [
"def",
"__init__",
"(",
"self",
",",
"model",
",",
"filepath",
",",
"monitor",
"=",
"'val_loss'",
",",
"verbose",
"=",
"0",
",",
"save_best_only",
"=",
"False",
",",
"save_weights_only",
"=",
"False",
",",
"mode",
"=",
"'auto'",
",",
"save_freq",
"=",
"'epoch'",
",",
"load_weights_on_restart",
"=",
"False",
",",
"period",
"=",
"1",
")",
":",
"self",
".",
"model_to_save",
"=",
"model",
"super",
"(",
")",
".",
"__init__",
"(",
"filepath",
"=",
"filepath",
",",
"monitor",
"=",
"monitor",
",",
"verbose",
"=",
"verbose",
",",
"save_best_only",
"=",
"save_best_only",
",",
"save_weights_only",
"=",
"save_weights_only",
",",
"mode",
"=",
"mode",
",",
"save_freq",
"=",
"save_freq",
",",
"load_weights_on_restart",
"=",
"load_weights_on_restart",
",",
"period",
"=",
"period",
")"
] | https://github.com/Delta-ML/delta/blob/31dfebc8f20b7cb282b62f291ff25a87e403cc86/delta/utils/solver/utils/callbacks.py#L160-L181 |
||||
vmware/vsphere-automation-sdk-python | ba7d4e0742f58a641dfed9538ecbbb1db4f3891e | samples/vmc/draas/site_recovery_activation_ops.py | python | SiteRecoveryActivationOperations.deactivate_srm | (self) | [] | def deactivate_srm(self):
if self.cleanup:
try:
srm_deactivation_task = self.vmc_client.draas.SiteRecovery.delete(self.org_id,
self.sddc_id,
force=True)
except InvalidRequest as e:
# Convert InvalidRequest to ErrorResponse to get error message
error_response = e.data.convert_to(ErrorResponse)
raise Exception(error_response.error_messages)
wait_for_task(task_client=self.vmc_client.draas.Task,
org_id=self.org_id,
task_id=srm_deactivation_task.id,
interval_sec=self.interval_sec) | [
"def",
"deactivate_srm",
"(",
"self",
")",
":",
"if",
"self",
".",
"cleanup",
":",
"try",
":",
"srm_deactivation_task",
"=",
"self",
".",
"vmc_client",
".",
"draas",
".",
"SiteRecovery",
".",
"delete",
"(",
"self",
".",
"org_id",
",",
"self",
".",
"sddc_id",
",",
"force",
"=",
"True",
")",
"except",
"InvalidRequest",
"as",
"e",
":",
"# Convert InvalidRequest to ErrorResponse to get error message",
"error_response",
"=",
"e",
".",
"data",
".",
"convert_to",
"(",
"ErrorResponse",
")",
"raise",
"Exception",
"(",
"error_response",
".",
"error_messages",
")",
"wait_for_task",
"(",
"task_client",
"=",
"self",
".",
"vmc_client",
".",
"draas",
".",
"Task",
",",
"org_id",
"=",
"self",
".",
"org_id",
",",
"task_id",
"=",
"srm_deactivation_task",
".",
"id",
",",
"interval_sec",
"=",
"self",
".",
"interval_sec",
")"
] | https://github.com/vmware/vsphere-automation-sdk-python/blob/ba7d4e0742f58a641dfed9538ecbbb1db4f3891e/samples/vmc/draas/site_recovery_activation_ops.py#L81-L95 |
||||
chainer/chainer-chemistry | efe323aa21f63a815130d673781e7cca1ccb72d2 | chainer_chemistry/dataset/networkx_preprocessors/reddit_coo.py | python | get_reddit_coo_data | (dirpath) | return PaddingGraphData(
x=reddit_data['feature'].astype(numpy.float32),
adj=adj,
y=reddit_data['label'].astype(numpy.int32),
label_num=41
) | Temporary function to obtain reddit coo data for GIN
(because it takes to much time to convert it to networkx)
Returns:
PaddingGraphData: `PaddingGraphData` of reddit | Temporary function to obtain reddit coo data for GIN | [
"Temporary",
"function",
"to",
"obtain",
"reddit",
"coo",
"data",
"for",
"GIN"
] | def get_reddit_coo_data(dirpath):
"""Temporary function to obtain reddit coo data for GIN
(because it takes to much time to convert it to networkx)
Returns:
PaddingGraphData: `PaddingGraphData` of reddit
"""
print("Loading node feature and label")
reddit_data = numpy.load(os.path.join(dirpath, "reddit_data.npz"))
print("Loading edge data")
coo_adj = scipy.sparse.load_npz(os.path.join(dirpath, "reddit_graph.npz"))
row = coo_adj.row.astype(numpy.int32)
col = coo_adj.col.astype(numpy.int32)
data = coo_adj.data.astype(numpy.float32)
# ensure row is sorted
if not numpy.all(row[:-1] <= row[1:]):
order = numpy.argsort(row)
row = row[order]
col = col[order]
assert numpy.all(row[:-1] <= row[1:])
adj = chainer.utils.CooMatrix(
data=data, row=row, col=col,
shape=coo_adj.shape,
order='C')
return PaddingGraphData(
x=reddit_data['feature'].astype(numpy.float32),
adj=adj,
y=reddit_data['label'].astype(numpy.int32),
label_num=41
) | [
"def",
"get_reddit_coo_data",
"(",
"dirpath",
")",
":",
"print",
"(",
"\"Loading node feature and label\"",
")",
"reddit_data",
"=",
"numpy",
".",
"load",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"\"reddit_data.npz\"",
")",
")",
"print",
"(",
"\"Loading edge data\"",
")",
"coo_adj",
"=",
"scipy",
".",
"sparse",
".",
"load_npz",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"\"reddit_graph.npz\"",
")",
")",
"row",
"=",
"coo_adj",
".",
"row",
".",
"astype",
"(",
"numpy",
".",
"int32",
")",
"col",
"=",
"coo_adj",
".",
"col",
".",
"astype",
"(",
"numpy",
".",
"int32",
")",
"data",
"=",
"coo_adj",
".",
"data",
".",
"astype",
"(",
"numpy",
".",
"float32",
")",
"# ensure row is sorted",
"if",
"not",
"numpy",
".",
"all",
"(",
"row",
"[",
":",
"-",
"1",
"]",
"<=",
"row",
"[",
"1",
":",
"]",
")",
":",
"order",
"=",
"numpy",
".",
"argsort",
"(",
"row",
")",
"row",
"=",
"row",
"[",
"order",
"]",
"col",
"=",
"col",
"[",
"order",
"]",
"assert",
"numpy",
".",
"all",
"(",
"row",
"[",
":",
"-",
"1",
"]",
"<=",
"row",
"[",
"1",
":",
"]",
")",
"adj",
"=",
"chainer",
".",
"utils",
".",
"CooMatrix",
"(",
"data",
"=",
"data",
",",
"row",
"=",
"row",
",",
"col",
"=",
"col",
",",
"shape",
"=",
"coo_adj",
".",
"shape",
",",
"order",
"=",
"'C'",
")",
"return",
"PaddingGraphData",
"(",
"x",
"=",
"reddit_data",
"[",
"'feature'",
"]",
".",
"astype",
"(",
"numpy",
".",
"float32",
")",
",",
"adj",
"=",
"adj",
",",
"y",
"=",
"reddit_data",
"[",
"'label'",
"]",
".",
"astype",
"(",
"numpy",
".",
"int32",
")",
",",
"label_num",
"=",
"41",
")"
] | https://github.com/chainer/chainer-chemistry/blob/efe323aa21f63a815130d673781e7cca1ccb72d2/chainer_chemistry/dataset/networkx_preprocessors/reddit_coo.py#L11-L46 |
|
Symbo1/wsltools | 0b6e536fc85c707a1c81f0296c4e91ca835396a1 | wsltools/utils/faker/providers/address/fr_FR/__init__.py | python | Provider.street_prefix | (self) | return self.random_element(self.street_prefixes) | :example 'rue' | :example 'rue' | [
":",
"example",
"rue"
] | def street_prefix(self):
"""
:example 'rue'
"""
return self.random_element(self.street_prefixes) | [
"def",
"street_prefix",
"(",
"self",
")",
":",
"return",
"self",
".",
"random_element",
"(",
"self",
".",
"street_prefixes",
")"
] | https://github.com/Symbo1/wsltools/blob/0b6e536fc85c707a1c81f0296c4e91ca835396a1/wsltools/utils/faker/providers/address/fr_FR/__init__.py#L141-L145 |
|
chribsen/simple-machine-learning-examples | dc94e52a4cebdc8bb959ff88b81ff8cfeca25022 | venv/lib/python2.7/site-packages/numpy/ma/core.py | python | asanyarray | (a, dtype=None) | return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) | Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'> | Convert the input to a masked array, conserving subclasses. | [
"Convert",
"the",
"input",
"to",
"a",
"masked",
"array",
"conserving",
"subclasses",
"."
] | def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) | [
"def",
"asanyarray",
"(",
"a",
",",
"dtype",
"=",
"None",
")",
":",
"return",
"masked_array",
"(",
"a",
",",
"dtype",
"=",
"dtype",
",",
"copy",
"=",
"False",
",",
"keep_mask",
"=",
"True",
",",
"subok",
"=",
"True",
")"
] | https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/numpy/ma/core.py#L7566-L7609 |
|
mesalock-linux/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | lib-python/2.7/mailbox.py | python | MH.get_message | (self, key) | return msg | Return a Message representation or raise a KeyError. | Return a Message representation or raise a KeyError. | [
"Return",
"a",
"Message",
"representation",
"or",
"raise",
"a",
"KeyError",
"."
] | def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
for name, key_list in self.get_sequences().iteritems():
if key in key_list:
msg.add_sequence(name)
return msg | [
"def",
"get_message",
"(",
"self",
",",
"key",
")",
":",
"try",
":",
"if",
"self",
".",
"_locked",
":",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_path",
",",
"str",
"(",
"key",
")",
")",
",",
"'r+'",
")",
"else",
":",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_path",
",",
"str",
"(",
"key",
")",
")",
",",
"'r'",
")",
"except",
"IOError",
",",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"raise",
"KeyError",
"(",
"'No message with key: %s'",
"%",
"key",
")",
"else",
":",
"raise",
"try",
":",
"if",
"self",
".",
"_locked",
":",
"_lock_file",
"(",
"f",
")",
"try",
":",
"msg",
"=",
"MHMessage",
"(",
"f",
")",
"finally",
":",
"if",
"self",
".",
"_locked",
":",
"_unlock_file",
"(",
"f",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"for",
"name",
",",
"key_list",
"in",
"self",
".",
"get_sequences",
"(",
")",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"in",
"key_list",
":",
"msg",
".",
"add_sequence",
"(",
"name",
")",
"return",
"msg"
] | https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/lib-python/2.7/mailbox.py#L1004-L1029 |
|
seppius-xbmc-repo/ru | d0879d56ec8243b2c7af44fda5cf3d1ff77fd2e2 | plugin.video.torrent.gnu/resources/lib/torr2xbmc.py | python | stream | (params) | [] | def stream (params):
torr_link='f4a94963c11a47f213b145697f494b5fc5485b02'
TSplayer=tsengine()
out=TSplayer.load_torrent(torr_link,'INFOHASH',port=aceport)
if out=='Ok':
TSplayer.play_url_ind(0,'stream',None)
TSplayer.end() | [
"def",
"stream",
"(",
"params",
")",
":",
"torr_link",
"=",
"'f4a94963c11a47f213b145697f494b5fc5485b02'",
"TSplayer",
"=",
"tsengine",
"(",
")",
"out",
"=",
"TSplayer",
".",
"load_torrent",
"(",
"torr_link",
",",
"'INFOHASH'",
",",
"port",
"=",
"aceport",
")",
"if",
"out",
"==",
"'Ok'",
":",
"TSplayer",
".",
"play_url_ind",
"(",
"0",
",",
"'stream'",
",",
"None",
")",
"TSplayer",
".",
"end",
"(",
")"
] | https://github.com/seppius-xbmc-repo/ru/blob/d0879d56ec8243b2c7af44fda5cf3d1ff77fd2e2/plugin.video.torrent.gnu/resources/lib/torr2xbmc.py#L112-L118 |
||||
plaid/plaid-python | 8c60fca608e426f3ff30da8857775946d29e122c | plaid/model/payment_initiation_optional_restriction_bacs.py | python | PaymentInitiationOptionalRestrictionBacs.openapi_types | () | return {
'account': (str,), # noqa: E501
'sort_code': (str,), # noqa: E501
} | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type. | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded | [
"This",
"must",
"be",
"a",
"method",
"because",
"a",
"model",
"may",
"have",
"properties",
"that",
"are",
"of",
"type",
"self",
"this",
"must",
"run",
"after",
"the",
"class",
"is",
"loaded"
] | def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'account': (str,), # noqa: E501
'sort_code': (str,), # noqa: E501
} | [
"def",
"openapi_types",
"(",
")",
":",
"lazy_import",
"(",
")",
"return",
"{",
"'account'",
":",
"(",
"str",
",",
")",
",",
"# noqa: E501",
"'sort_code'",
":",
"(",
"str",
",",
")",
",",
"# noqa: E501",
"}"
] | https://github.com/plaid/plaid-python/blob/8c60fca608e426f3ff30da8857775946d29e122c/plaid/model/payment_initiation_optional_restriction_bacs.py#L82-L95 |
|
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_hxb2/lib/python3.5/site-packages/django/utils/feedgenerator.py | python | SyndicationFeed.root_attributes | (self) | return {} | Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write(). | Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write(). | [
"Return",
"extra",
"attributes",
"to",
"place",
"on",
"the",
"root",
"(",
"i",
".",
"e",
".",
"feed",
"/",
"channel",
")",
"element",
".",
"Called",
"from",
"write",
"()",
"."
] | def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {} | [
"def",
"root_attributes",
"(",
"self",
")",
":",
"return",
"{",
"}"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/utils/feedgenerator.py#L170-L175 |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 67