id_within_dataset
int64 46
2.71M
| snippet
stringlengths 63
481k
| tokens
sequencelengths 20
15.6k
| language
stringclasses 2
values | nl
stringlengths 1
32.4k
| is_duplicated
bool 2
classes |
---|---|---|---|---|---|
2,492,669 | def has_wrong_break(real_seg, pred_seg):
"""
Parameters
----------
real_seg : list of integers
The segmentation as it should be.
pred_seg : list of integers
The predicted segmentation.
Returns
-------
bool :
True, if strokes of one symbol were segmented to be in different
symbols.
"""
for symbol_real in real_seg:
for symbol_pred in pred_seg:
if symbol_real[0] in symbol_pred:
for stroke in symbol_real:
if stroke not in symbol_pred:
return True
return False | [
"def",
"has_wrong_break",
"(",
"real_seg",
",",
"pred_seg",
")",
":",
"for",
"symbol_real",
"in",
"real_seg",
":",
"for",
"symbol_pred",
"in",
"pred_seg",
":",
"if",
"symbol_real",
"[",
"0",
"]",
"in",
"symbol_pred",
":",
"for",
"stroke",
"in",
"symbol_real",
":",
"if",
"stroke",
"not",
"in",
"symbol_pred",
":",
"return",
"True",
"return",
"False"
] | python | Parameters
----------
real_seg : list of integers
The segmentation as it should be.
pred_seg : list of integers
The predicted segmentation.
Returns
-------
bool :
True, if strokes of one symbol were segmented to be in different
symbols. | false |
2,692,958 | def resolve_freezer(freezer):
"""
Locate the appropriate freezer given FREEZER or string input from the programmer.
:param freezer: FREEZER constant or string for the freezer that is requested. (None = FREEZER.DEFAULT)
:return:
"""
# Set default freezer if there was none
if not freezer:
return _Default()
# Allow character based lookups as well
if isinstance(freezer, six.string_types):
cls = _freezer_lookup(freezer)
return cls()
# Allow plain class definition lookups (we instantiate the class)
if freezer.__class__ == type.__class__:
return freezer()
# Warn when a custom freezer implementation is used.
if freezer not in FREEZER.ALL:
warn(u"Using custom freezer implelmentation: {0}".format(freezer))
return freezer | [
"def",
"resolve_freezer",
"(",
"freezer",
")",
":",
"if",
"not",
"freezer",
":",
"return",
"_Default",
"(",
")",
"if",
"isinstance",
"(",
"freezer",
",",
"six",
".",
"string_types",
")",
":",
"cls",
"=",
"_freezer_lookup",
"(",
"freezer",
")",
"return",
"cls",
"(",
")",
"if",
"freezer",
".",
"__class__",
"==",
"type",
".",
"__class__",
":",
"return",
"freezer",
"(",
")",
"if",
"freezer",
"not",
"in",
"FREEZER",
".",
"ALL",
":",
"warn",
"(",
"u\"Using custom freezer implelmentation: {0}\"",
".",
"format",
"(",
"freezer",
")",
")",
"return",
"freezer"
] | python | Locate the appropriate freezer given FREEZER or string input from the programmer.
:param freezer: FREEZER constant or string for the freezer that is requested. (None = FREEZER.DEFAULT)
:return: | false |
2,030,942 | def is_handleable(self, device):
# TODO: handle pathes in first argument
"""
Check whether this device should be handled by udiskie.
:param device: device object, block device path or mount path
:returns: handleability
Currently this just means that the device is removable and holds a
filesystem or the device is a LUKS encrypted volume.
"""
ignored = self._ignore_device(device)
# propagate handleability of parent devices:
if ignored is None and device is not None:
return self.is_handleable(_get_parent(device))
return not ignored | [
"def",
"is_handleable",
"(",
"self",
",",
"device",
")",
":",
"ignored",
"=",
"self",
".",
"_ignore_device",
"(",
"device",
")",
"if",
"ignored",
"is",
"None",
"and",
"device",
"is",
"not",
"None",
":",
"return",
"self",
".",
"is_handleable",
"(",
"_get_parent",
"(",
"device",
")",
")",
"return",
"not",
"ignored"
] | python | Check whether this device should be handled by udiskie.
:param device: device object, block device path or mount path
:returns: handleability
Currently this just means that the device is removable and holds a
filesystem or the device is a LUKS encrypted volume. | false |
1,855,992 | def ParseFileObject(self, parser_mediator, file_object):
"""Parses an Opera typed history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
data = file_object.read(self._HEADER_READ_SIZE)
if not data.startswith(b'<?xml'):
raise errors.UnableToParseFile(
'Not an Opera typed history file [not a XML]')
_, _, data = data.partition(b'\n')
if not data.startswith(b'<typed_history'):
raise errors.UnableToParseFile(
'Not an Opera typed history file [wrong XML root key]')
# For ElementTree to work we need to work on a file object seeked
# to the beginning.
file_object.seek(0, os.SEEK_SET)
xml = ElementTree.parse(file_object)
for history_item in xml.iterfind('typed_history_item'):
event_data = OperaTypedHistoryEventData()
event_data.entry_type = history_item.get('type', None)
event_data.url = history_item.get('content', None)
if event_data.entry_type == 'selected':
event_data.entry_selection = 'Filled from autocomplete.'
elif event_data.entry_type == 'text':
event_data.entry_selection = 'Manually typed.'
last_typed_time = history_item.get('last_typed', None)
if last_typed_time is None:
parser_mediator.ProduceExtractionWarning('missing last typed time.')
continue
date_time = dfdatetime_time_elements.TimeElements()
try:
date_time.CopyFromStringISO8601(last_typed_time)
except ValueError as exception:
parser_mediator.ProduceExtractionWarning(
'unsupported last typed time: {0:s} with error: {1!s}.'.format(
last_typed_time, exception))
continue
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data) | [
"def",
"ParseFileObject",
"(",
"self",
",",
"parser_mediator",
",",
"file_object",
")",
":",
"data",
"=",
"file_object",
".",
"read",
"(",
"self",
".",
"_HEADER_READ_SIZE",
")",
"if",
"not",
"data",
".",
"startswith",
"(",
"b'<?xml'",
")",
":",
"raise",
"errors",
".",
"UnableToParseFile",
"(",
"'Not an Opera typed history file [not a XML]'",
")",
"_",
",",
"_",
",",
"data",
"=",
"data",
".",
"partition",
"(",
"b'\\n'",
")",
"if",
"not",
"data",
".",
"startswith",
"(",
"b'<typed_history'",
")",
":",
"raise",
"errors",
".",
"UnableToParseFile",
"(",
"'Not an Opera typed history file [wrong XML root key]'",
")",
"file_object",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_SET",
")",
"xml",
"=",
"ElementTree",
".",
"parse",
"(",
"file_object",
")",
"for",
"history_item",
"in",
"xml",
".",
"iterfind",
"(",
"'typed_history_item'",
")",
":",
"event_data",
"=",
"OperaTypedHistoryEventData",
"(",
")",
"event_data",
".",
"entry_type",
"=",
"history_item",
".",
"get",
"(",
"'type'",
",",
"None",
")",
"event_data",
".",
"url",
"=",
"history_item",
".",
"get",
"(",
"'content'",
",",
"None",
")",
"if",
"event_data",
".",
"entry_type",
"==",
"'selected'",
":",
"event_data",
".",
"entry_selection",
"=",
"'Filled from autocomplete.'",
"elif",
"event_data",
".",
"entry_type",
"==",
"'text'",
":",
"event_data",
".",
"entry_selection",
"=",
"'Manually typed.'",
"last_typed_time",
"=",
"history_item",
".",
"get",
"(",
"'last_typed'",
",",
"None",
")",
"if",
"last_typed_time",
"is",
"None",
":",
"parser_mediator",
".",
"ProduceExtractionWarning",
"(",
"'missing last typed time.'",
")",
"continue",
"date_time",
"=",
"dfdatetime_time_elements",
".",
"TimeElements",
"(",
")",
"try",
":",
"date_time",
".",
"CopyFromStringISO8601",
"(",
"last_typed_time",
")",
"except",
"ValueError",
"as",
"exception",
":",
"parser_mediator",
".",
"ProduceExtractionWarning",
"(",
"'unsupported last typed time: {0:s} with error: {1!s}.'",
".",
"format",
"(",
"last_typed_time",
",",
"exception",
")",
")",
"continue",
"event",
"=",
"time_events",
".",
"DateTimeValuesEvent",
"(",
"date_time",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_VISITED",
")",
"parser_mediator",
".",
"ProduceEventWithEventData",
"(",
"event",
",",
"event_data",
")"
] | python | Parses an Opera typed history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | false |
2,326,655 | def from_json(cls, stream, json_data):
"""Create a new DataPoint object from device cloud JSON data
:param DataStream stream: The :class:`~DataStream` out of which this data is coming
:param dict json_data: Deserialized JSON data from Device Cloud about this device
:raises ValueError: if the data is malformed
:return: (:class:`~DataPoint`) newly created :class:`~DataPoint`
"""
type_converter = _get_decoder_method(stream.get_data_type())
data = type_converter(json_data.get("data"))
return cls(
# these are actually properties of the stream, not the data point
stream_id=stream.get_stream_id(),
data_type=stream.get_data_type(),
units=stream.get_units(),
# and these are part of the data point itself
data=data,
description=json_data.get("description"),
timestamp=json_data.get("timestampISO"),
server_timestamp=json_data.get("serverTimestampISO"),
quality=json_data.get("quality"),
location=json_data.get("location"),
dp_id=json_data.get("id"),
) | [
"def",
"from_json",
"(",
"cls",
",",
"stream",
",",
"json_data",
")",
":",
"type_converter",
"=",
"_get_decoder_method",
"(",
"stream",
".",
"get_data_type",
"(",
")",
")",
"data",
"=",
"type_converter",
"(",
"json_data",
".",
"get",
"(",
"\"data\"",
")",
")",
"return",
"cls",
"(",
"stream_id",
"=",
"stream",
".",
"get_stream_id",
"(",
")",
",",
"data_type",
"=",
"stream",
".",
"get_data_type",
"(",
")",
",",
"units",
"=",
"stream",
".",
"get_units",
"(",
")",
",",
"data",
"=",
"data",
",",
"description",
"=",
"json_data",
".",
"get",
"(",
"\"description\"",
")",
",",
"timestamp",
"=",
"json_data",
".",
"get",
"(",
"\"timestampISO\"",
")",
",",
"server_timestamp",
"=",
"json_data",
".",
"get",
"(",
"\"serverTimestampISO\"",
")",
",",
"quality",
"=",
"json_data",
".",
"get",
"(",
"\"quality\"",
")",
",",
"location",
"=",
"json_data",
".",
"get",
"(",
"\"location\"",
")",
",",
"dp_id",
"=",
"json_data",
".",
"get",
"(",
"\"id\"",
")",
",",
")"
] | python | Create a new DataPoint object from device cloud JSON data
:param DataStream stream: The :class:`~DataStream` out of which this data is coming
:param dict json_data: Deserialized JSON data from Device Cloud about this device
:raises ValueError: if the data is malformed
:return: (:class:`~DataPoint`) newly created :class:`~DataPoint` | false |
1,677,200 | def imresize(x, size=None, interp='bicubic', mode=None):
"""Resize an image by given output size and method.
Warning, this function will rescale the value to [0, 255].
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
size : list of 2 int or None
For height and width.
interp : str
Interpolation method for re-sizing (`nearest`, `lanczos`, `bilinear`, `bicubic` (default) or `cubic`).
mode : str
The PIL image mode (`P`, `L`, etc.) to convert image before resizing.
Returns
-------
numpy.array
A processed image.
References
------------
- `scipy.misc.imresize <https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html>`__
"""
if size is None:
size = [100, 100]
if x.shape[-1] == 1:
# greyscale
x = scipy.misc.imresize(x[:, :, 0], size, interp=interp, mode=mode)
return x[:, :, np.newaxis]
else:
# rgb, bgr, rgba
return scipy.misc.imresize(x, size, interp=interp, mode=mode) | [
"def",
"imresize",
"(",
"x",
",",
"size",
"=",
"None",
",",
"interp",
"=",
"'bicubic'",
",",
"mode",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"[",
"100",
",",
"100",
"]",
"if",
"x",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"1",
":",
"x",
"=",
"scipy",
".",
"misc",
".",
"imresize",
"(",
"x",
"[",
":",
",",
":",
",",
"0",
"]",
",",
"size",
",",
"interp",
"=",
"interp",
",",
"mode",
"=",
"mode",
")",
"return",
"x",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"else",
":",
"return",
"scipy",
".",
"misc",
".",
"imresize",
"(",
"x",
",",
"size",
",",
"interp",
"=",
"interp",
",",
"mode",
"=",
"mode",
")"
] | python | Resize an image by given output size and method.
Warning, this function will rescale the value to [0, 255].
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
size : list of 2 int or None
For height and width.
interp : str
Interpolation method for re-sizing (`nearest`, `lanczos`, `bilinear`, `bicubic` (default) or `cubic`).
mode : str
The PIL image mode (`P`, `L`, etc.) to convert image before resizing.
Returns
-------
numpy.array
A processed image.
References
------------
- `scipy.misc.imresize <https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html>`__ | false |
1,999,816 | def ecef2geodetic_old(x: float, y: float, z: float,
ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]:
"""
convert ECEF (meters) to geodetic coordinates
input
-----
x,y,z [meters] target ECEF location [0,Infinity)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output
------
lat,lon (degrees/radians)
alt (meters)
Algorithm is based on
http://www.astro.uni.torun.pl/~kb/Papers/geod/Geod-BG.htm
This algorithm provides a converging solution to the latitude equation
in terms of the parametric or reduced latitude form (v)
This algorithm provides a uniform solution over all latitudes as it does
not involve division by cos(phi) or sin(phi)
"""
if ell is None:
ell = Ellipsoid()
ea = ell.a
eb = ell.b
rad = hypot(x, y)
# Constant required for Latitude equation
rho = arctan2(eb * z, ea * rad)
# Constant required for latitude equation
c = (ea**2 - eb**2) / hypot(ea * rad, eb * z)
# Starter for the Newtons Iteration Method
vnew = arctan2(ea * z, eb * rad)
# Initializing the parametric latitude
v = 0
for _ in range(5):
v = deepcopy(vnew)
# %% Newtons Method for computing iterations
vnew = v - ((2 * sin(v - rho) - c * sin(2 * v)) /
(2 * (cos(v - rho) - c * cos(2 * v))))
if allclose(v, vnew):
break
# %% Computing latitude from the root of the latitude equation
lat = arctan2(ea * tan(vnew), eb)
# by inspection
lon = arctan2(y, x)
alt = (((rad - ea * cos(vnew)) * cos(lat)) +
((z - eb * sin(vnew)) * sin(lat)))
with np.errstate(invalid='ignore'):
# NOTE: need np.any() to handle scalar and array cases
if np.any((lat < -pi / 2) | (lat > pi / 2)):
raise ValueError('-90 <= lat <= 90')
if np.any((lon < -pi) | (lon > 2 * pi)):
raise ValueError('-180 <= lat <= 360')
if deg:
return degrees(lat), degrees(lon), alt
else:
return lat, lon, alt | [
"def",
"ecef2geodetic_old",
"(",
"x",
":",
"float",
",",
"y",
":",
"float",
",",
"z",
":",
"float",
",",
"ell",
":",
"Ellipsoid",
"=",
"None",
",",
"deg",
":",
"bool",
"=",
"True",
")",
"->",
"Tuple",
"[",
"float",
",",
"float",
",",
"float",
"]",
":",
"if",
"ell",
"is",
"None",
":",
"ell",
"=",
"Ellipsoid",
"(",
")",
"ea",
"=",
"ell",
".",
"a",
"eb",
"=",
"ell",
".",
"b",
"rad",
"=",
"hypot",
"(",
"x",
",",
"y",
")",
"rho",
"=",
"arctan2",
"(",
"eb",
"*",
"z",
",",
"ea",
"*",
"rad",
")",
"c",
"=",
"(",
"ea",
"**",
"2",
"-",
"eb",
"**",
"2",
")",
"/",
"hypot",
"(",
"ea",
"*",
"rad",
",",
"eb",
"*",
"z",
")",
"vnew",
"=",
"arctan2",
"(",
"ea",
"*",
"z",
",",
"eb",
"*",
"rad",
")",
"v",
"=",
"0",
"for",
"_",
"in",
"range",
"(",
"5",
")",
":",
"v",
"=",
"deepcopy",
"(",
"vnew",
")",
"vnew",
"=",
"v",
"-",
"(",
"(",
"2",
"*",
"sin",
"(",
"v",
"-",
"rho",
")",
"-",
"c",
"*",
"sin",
"(",
"2",
"*",
"v",
")",
")",
"/",
"(",
"2",
"*",
"(",
"cos",
"(",
"v",
"-",
"rho",
")",
"-",
"c",
"*",
"cos",
"(",
"2",
"*",
"v",
")",
")",
")",
")",
"if",
"allclose",
"(",
"v",
",",
"vnew",
")",
":",
"break",
"lat",
"=",
"arctan2",
"(",
"ea",
"*",
"tan",
"(",
"vnew",
")",
",",
"eb",
")",
"lon",
"=",
"arctan2",
"(",
"y",
",",
"x",
")",
"alt",
"=",
"(",
"(",
"(",
"rad",
"-",
"ea",
"*",
"cos",
"(",
"vnew",
")",
")",
"*",
"cos",
"(",
"lat",
")",
")",
"+",
"(",
"(",
"z",
"-",
"eb",
"*",
"sin",
"(",
"vnew",
")",
")",
"*",
"sin",
"(",
"lat",
")",
")",
")",
"with",
"np",
".",
"errstate",
"(",
"invalid",
"=",
"'ignore'",
")",
":",
"if",
"np",
".",
"any",
"(",
"(",
"lat",
"<",
"-",
"pi",
"/",
"2",
")",
"|",
"(",
"lat",
">",
"pi",
"/",
"2",
")",
")",
":",
"raise",
"ValueError",
"(",
"'-90 <= lat <= 90'",
")",
"if",
"np",
".",
"any",
"(",
"(",
"lon",
"<",
"-",
"pi",
")",
"|",
"(",
"lon",
">",
"2",
"*",
"pi",
")",
")",
":",
"raise",
"ValueError",
"(",
"'-180 <= lat <= 360'",
")",
"if",
"deg",
":",
"return",
"degrees",
"(",
"lat",
")",
",",
"degrees",
"(",
"lon",
")",
",",
"alt",
"else",
":",
"return",
"lat",
",",
"lon",
",",
"alt"
] | python | convert ECEF (meters) to geodetic coordinates
input
-----
x,y,z [meters] target ECEF location [0,Infinity)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output
------
lat,lon (degrees/radians)
alt (meters)
Algorithm is based on
http://www.astro.uni.torun.pl/~kb/Papers/geod/Geod-BG.htm
This algorithm provides a converging solution to the latitude equation
in terms of the parametric or reduced latitude form (v)
This algorithm provides a uniform solution over all latitudes as it does
not involve division by cos(phi) or sin(phi) | false |
2,030,576 | def nvmlUnitGetPsuInfo(unit):
r"""
/**
* Retrieves the PSU stats for the unit.
*
* For S-class products.
*
* See \ref nvmlPSUInfo_t for details on available PSU info.
*
* @param unit The identifier of the target unit
* @param psu Reference in which to return the PSU information
*
* @return
* - \ref NVML_SUCCESS if \a psu has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlUnitGetPsuInfo
"""
"""
/**
* Retrieves the PSU stats for the unit.
*
* For S-class products.
*
* See \ref nvmlPSUInfo_t for details on available PSU info.
*
* @param unit The identifier of the target unit
* @param psu Reference in which to return the PSU information
*
* @return
* - \ref NVML_SUCCESS if \a psu has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
"""
c_info = c_nvmlPSUInfo_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetPsuInfo")
ret = fn(unit, byref(c_info))
_nvmlCheckReturn(ret)
return bytes_to_str(c_info) | [
"def",
"nvmlUnitGetPsuInfo",
"(",
"unit",
")",
":",
"c_info",
"=",
"c_nvmlPSUInfo_t",
"(",
")",
"fn",
"=",
"_nvmlGetFunctionPointer",
"(",
"\"nvmlUnitGetPsuInfo\"",
")",
"ret",
"=",
"fn",
"(",
"unit",
",",
"byref",
"(",
"c_info",
")",
")",
"_nvmlCheckReturn",
"(",
"ret",
")",
"return",
"bytes_to_str",
"(",
"c_info",
")"
] | python | r"""
/**
* Retrieves the PSU stats for the unit.
*
* For S-class products.
*
* See \ref nvmlPSUInfo_t for details on available PSU info.
*
* @param unit The identifier of the target unit
* @param psu Reference in which to return the PSU information
*
* @return
* - \ref NVML_SUCCESS if \a psu has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlUnitGetPsuInfo | false |
1,757,802 | def predict(self, h=5, intervals=False, oos_data=None, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
intervals : boolean (default: False)
Whether to return prediction intervals
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- pd.DataFrame with predictions
"""
nsims = kwargs.get('nsims', 200)
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
_, X_oos = dmatrices(self.formula, oos_data)
X_oos = np.array([X_oos])[0]
full_X = self.X.copy()
full_X = np.append(full_X,X_oos,axis=0)
Z = full_X
date_index = self.shift_dates(h)
# Retrieve data, dates and (transformed) latent variables
if self.latent_variables.estimation_method in ['M-H']:
lower_1_final = 0
upper_99_final = 0
lower_5_final = 0
upper_95_final = 0
forecasted_values_final = 0
for i in range(nsims):
t_params = self.draw_latent_variables(nsims=1).T[0]
a, P = self._forecast_model(t_params, Z, h)
smoothed_series = np.zeros(h)
series_variance = np.zeros(h)
for t in range(h):
smoothed_series[t] = np.dot(Z[self.y.shape[0]+t],a[:,self.y.shape[0]+t])
series_variance[t] = np.dot(np.dot(Z[self.y.shape[0]+t],P[:,:,self.y.shape[0]+t]),Z[self.y.shape[0]+t].T)
forecasted_values = smoothed_series
lower_5 = smoothed_series - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper_95 = smoothed_series + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_5_final += lower_5
upper_95_final += upper_95
lower_1 = smoothed_series - 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper_99 = smoothed_series + 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_1_final += lower_1
upper_99_final += upper_99
forecasted_values_final += forecasted_values
forecasted_values_final = forecasted_values_final / nsims
lower_1_final = lower_1_final / nsims
lower_5_final = lower_5_final / nsims
upper_95_final = upper_95_final / nsims
upper_99_final = upper_99_final / nsims
if intervals is False:
result = pd.DataFrame(forecasted_values_final)
result.rename(columns={0:self.data_name}, inplace=True)
else:
prediction_05 = lower_5_final
prediction_95 = upper_95_final
prediction_01 = lower_1_final
prediction_99 = upper_99_final
result = pd.DataFrame([forecasted_values_final, prediction_01, prediction_05,
prediction_95, prediction_99]).T
result.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"},
inplace=True)
result.index = date_index[-h:]
return result
else:
t_params = self.latent_variables.get_z_values()
a, P = self._forecast_model(t_params, Z, h)
smoothed_series = np.zeros(h)
for t in range(h):
smoothed_series[t] = np.dot(Z[self.y.shape[0]+t],a[:,self.y.shape[0]+t])
# Retrieve data, dates and (transformed) latent variables
forecasted_values = smoothed_series
if intervals is False:
result = pd.DataFrame(forecasted_values)
result.rename(columns={0:self.data_name}, inplace=True)
else:
series_variance = np.zeros(h)
for t in range(h):
series_variance[t] = np.dot(np.dot(Z[self.y.shape[0]+t],P[:,:,self.y.shape[0]+t]),Z[self.y.shape[0]+t].T)
prediction_05 = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
prediction_95 = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
prediction_01 = forecasted_values - 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
prediction_99 = forecasted_values + 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
result = pd.DataFrame([forecasted_values, prediction_01, prediction_05,
prediction_95, prediction_99]).T
result.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"},
inplace=True)
result.index = date_index[-h:]
return result | [
"def",
"predict",
"(",
"self",
",",
"h",
"=",
"5",
",",
"intervals",
"=",
"False",
",",
"oos_data",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"nsims",
"=",
"kwargs",
".",
"get",
"(",
"'nsims'",
",",
"200",
")",
"if",
"self",
".",
"latent_variables",
".",
"estimated",
"is",
"False",
":",
"raise",
"Exception",
"(",
"\"No latent variables estimated!\"",
")",
"else",
":",
"_",
",",
"X_oos",
"=",
"dmatrices",
"(",
"self",
".",
"formula",
",",
"oos_data",
")",
"X_oos",
"=",
"np",
".",
"array",
"(",
"[",
"X_oos",
"]",
")",
"[",
"0",
"]",
"full_X",
"=",
"self",
".",
"X",
".",
"copy",
"(",
")",
"full_X",
"=",
"np",
".",
"append",
"(",
"full_X",
",",
"X_oos",
",",
"axis",
"=",
"0",
")",
"Z",
"=",
"full_X",
"date_index",
"=",
"self",
".",
"shift_dates",
"(",
"h",
")",
"if",
"self",
".",
"latent_variables",
".",
"estimation_method",
"in",
"[",
"'M-H'",
"]",
":",
"lower_1_final",
"=",
"0",
"upper_99_final",
"=",
"0",
"lower_5_final",
"=",
"0",
"upper_95_final",
"=",
"0",
"forecasted_values_final",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"nsims",
")",
":",
"t_params",
"=",
"self",
".",
"draw_latent_variables",
"(",
"nsims",
"=",
"1",
")",
".",
"T",
"[",
"0",
"]",
"a",
",",
"P",
"=",
"self",
".",
"_forecast_model",
"(",
"t_params",
",",
"Z",
",",
"h",
")",
"smoothed_series",
"=",
"np",
".",
"zeros",
"(",
"h",
")",
"series_variance",
"=",
"np",
".",
"zeros",
"(",
"h",
")",
"for",
"t",
"in",
"range",
"(",
"h",
")",
":",
"smoothed_series",
"[",
"t",
"]",
"=",
"np",
".",
"dot",
"(",
"Z",
"[",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
",",
"a",
"[",
":",
",",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
")",
"series_variance",
"[",
"t",
"]",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"Z",
"[",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
",",
"P",
"[",
":",
",",
":",
",",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
")",
",",
"Z",
"[",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
".",
"T",
")",
"forecasted_values",
"=",
"smoothed_series",
"lower_5",
"=",
"smoothed_series",
"-",
"1.96",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"t_params",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"upper_95",
"=",
"smoothed_series",
"+",
"1.96",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"t_params",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"lower_5_final",
"+=",
"lower_5",
"upper_95_final",
"+=",
"upper_95",
"lower_1",
"=",
"smoothed_series",
"-",
"2.575",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"t_params",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"upper_99",
"=",
"smoothed_series",
"+",
"2.575",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"t_params",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"lower_1_final",
"+=",
"lower_1",
"upper_99_final",
"+=",
"upper_99",
"forecasted_values_final",
"+=",
"forecasted_values",
"forecasted_values_final",
"=",
"forecasted_values_final",
"/",
"nsims",
"lower_1_final",
"=",
"lower_1_final",
"/",
"nsims",
"lower_5_final",
"=",
"lower_5_final",
"/",
"nsims",
"upper_95_final",
"=",
"upper_95_final",
"/",
"nsims",
"upper_99_final",
"=",
"upper_99_final",
"/",
"nsims",
"if",
"intervals",
"is",
"False",
":",
"result",
"=",
"pd",
".",
"DataFrame",
"(",
"forecasted_values_final",
")",
"result",
".",
"rename",
"(",
"columns",
"=",
"{",
"0",
":",
"self",
".",
"data_name",
"}",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"prediction_05",
"=",
"lower_5_final",
"prediction_95",
"=",
"upper_95_final",
"prediction_01",
"=",
"lower_1_final",
"prediction_99",
"=",
"upper_99_final",
"result",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"forecasted_values_final",
",",
"prediction_01",
",",
"prediction_05",
",",
"prediction_95",
",",
"prediction_99",
"]",
")",
".",
"T",
"result",
".",
"rename",
"(",
"columns",
"=",
"{",
"0",
":",
"self",
".",
"data_name",
",",
"1",
":",
"\"1% Prediction Interval\"",
",",
"2",
":",
"\"5% Prediction Interval\"",
",",
"3",
":",
"\"95% Prediction Interval\"",
",",
"4",
":",
"\"99% Prediction Interval\"",
"}",
",",
"inplace",
"=",
"True",
")",
"result",
".",
"index",
"=",
"date_index",
"[",
"-",
"h",
":",
"]",
"return",
"result",
"else",
":",
"t_params",
"=",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
"a",
",",
"P",
"=",
"self",
".",
"_forecast_model",
"(",
"t_params",
",",
"Z",
",",
"h",
")",
"smoothed_series",
"=",
"np",
".",
"zeros",
"(",
"h",
")",
"for",
"t",
"in",
"range",
"(",
"h",
")",
":",
"smoothed_series",
"[",
"t",
"]",
"=",
"np",
".",
"dot",
"(",
"Z",
"[",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
",",
"a",
"[",
":",
",",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
")",
"forecasted_values",
"=",
"smoothed_series",
"if",
"intervals",
"is",
"False",
":",
"result",
"=",
"pd",
".",
"DataFrame",
"(",
"forecasted_values",
")",
"result",
".",
"rename",
"(",
"columns",
"=",
"{",
"0",
":",
"self",
".",
"data_name",
"}",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"series_variance",
"=",
"np",
".",
"zeros",
"(",
"h",
")",
"for",
"t",
"in",
"range",
"(",
"h",
")",
":",
"series_variance",
"[",
"t",
"]",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"Z",
"[",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
",",
"P",
"[",
":",
",",
":",
",",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
")",
",",
"Z",
"[",
"self",
".",
"y",
".",
"shape",
"[",
"0",
"]",
"+",
"t",
"]",
".",
"T",
")",
"prediction_05",
"=",
"forecasted_values",
"-",
"1.96",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"prediction_95",
"=",
"forecasted_values",
"+",
"1.96",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"prediction_01",
"=",
"forecasted_values",
"-",
"2.575",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"prediction_99",
"=",
"forecasted_values",
"+",
"2.575",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"result",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"forecasted_values",
",",
"prediction_01",
",",
"prediction_05",
",",
"prediction_95",
",",
"prediction_99",
"]",
")",
".",
"T",
"result",
".",
"rename",
"(",
"columns",
"=",
"{",
"0",
":",
"self",
".",
"data_name",
",",
"1",
":",
"\"1% Prediction Interval\"",
",",
"2",
":",
"\"5% Prediction Interval\"",
",",
"3",
":",
"\"95% Prediction Interval\"",
",",
"4",
":",
"\"99% Prediction Interval\"",
"}",
",",
"inplace",
"=",
"True",
")",
"result",
".",
"index",
"=",
"date_index",
"[",
"-",
"h",
":",
"]",
"return",
"result"
] | python | Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
intervals : boolean (default: False)
Whether to return prediction intervals
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- pd.DataFrame with predictions | false |
1,647,689 | def init(self):
"""Init the connection to the InfluxDB server."""
if not self.export_enable:
return None
try:
db = InfluxDBClient(host=self.host,
port=self.port,
username=self.user,
password=self.password,
database=self.db)
get_all_db = [i['name'] for i in db.get_list_database()]
except InfluxDBClientError as e:
logger.critical("Cannot connect to InfluxDB database '%s' (%s)" % (self.db, e))
sys.exit(2)
if self.db in get_all_db:
logger.info(
"Stats will be exported to InfluxDB server: {}".format(db._baseurl))
else:
logger.critical("InfluxDB database '%s' did not exist. Please create it" % self.db)
sys.exit(2)
return db | [
"def",
"init",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"export_enable",
":",
"return",
"None",
"try",
":",
"db",
"=",
"InfluxDBClient",
"(",
"host",
"=",
"self",
".",
"host",
",",
"port",
"=",
"self",
".",
"port",
",",
"username",
"=",
"self",
".",
"user",
",",
"password",
"=",
"self",
".",
"password",
",",
"database",
"=",
"self",
".",
"db",
")",
"get_all_db",
"=",
"[",
"i",
"[",
"'name'",
"]",
"for",
"i",
"in",
"db",
".",
"get_list_database",
"(",
")",
"]",
"except",
"InfluxDBClientError",
"as",
"e",
":",
"logger",
".",
"critical",
"(",
"\"Cannot connect to InfluxDB database '%s' (%s)\"",
"%",
"(",
"self",
".",
"db",
",",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"if",
"self",
".",
"db",
"in",
"get_all_db",
":",
"logger",
".",
"info",
"(",
"\"Stats will be exported to InfluxDB server: {}\"",
".",
"format",
"(",
"db",
".",
"_baseurl",
")",
")",
"else",
":",
"logger",
".",
"critical",
"(",
"\"InfluxDB database '%s' did not exist. Please create it\"",
"%",
"self",
".",
"db",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"return",
"db"
] | python | Init the connection to the InfluxDB server. | false |
1,819,302 | def OnDeleteTabs(self, event):
"""Deletes tables"""
with undo.group(_("Delete table")):
self.grid.actions.delete_tabs(self.grid.current_table, 1)
self.grid.GetTable().ResetView()
self.grid.actions.zoom()
event.Skip() | [
"def",
"OnDeleteTabs",
"(",
"self",
",",
"event",
")",
":",
"with",
"undo",
".",
"group",
"(",
"_",
"(",
"\"Delete table\"",
")",
")",
":",
"self",
".",
"grid",
".",
"actions",
".",
"delete_tabs",
"(",
"self",
".",
"grid",
".",
"current_table",
",",
"1",
")",
"self",
".",
"grid",
".",
"GetTable",
"(",
")",
".",
"ResetView",
"(",
")",
"self",
".",
"grid",
".",
"actions",
".",
"zoom",
"(",
")",
"event",
".",
"Skip",
"(",
")"
] | python | Deletes tables | false |
2,062,960 | def _starttls(self):
"""
Exchange a STARTTLS message with Riak to initiate secure communications
return True is Riak responds with a STARTTLS response, False otherwise
"""
resp_code, _ = self._non_connect_send_recv(
riak.pb.messages.MSG_CODE_START_TLS)
if resp_code == riak.pb.messages.MSG_CODE_START_TLS:
return True
else:
return False | [
"def",
"_starttls",
"(",
"self",
")",
":",
"resp_code",
",",
"_",
"=",
"self",
".",
"_non_connect_send_recv",
"(",
"riak",
".",
"pb",
".",
"messages",
".",
"MSG_CODE_START_TLS",
")",
"if",
"resp_code",
"==",
"riak",
".",
"pb",
".",
"messages",
".",
"MSG_CODE_START_TLS",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | python | Exchange a STARTTLS message with Riak to initiate secure communications
return True is Riak responds with a STARTTLS response, False otherwise | false |
1,623,261 | def create_new_client(self, give_focus=True, filename='', is_cython=False,
is_pylab=False, is_sympy=False, given_name=None):
"""Create a new client"""
self.master_clients += 1
client_id = dict(int_id=to_text_string(self.master_clients),
str_id='A')
cf = self._new_connection_file()
show_elapsed_time = self.get_option('show_elapsed_time')
reset_warning = self.get_option('show_reset_namespace_warning')
ask_before_restart = self.get_option('ask_before_restart')
client = ClientWidget(self, id_=client_id,
history_filename=get_conf_path('history.py'),
config_options=self.config_options(),
additional_options=self.additional_options(
is_pylab=is_pylab,
is_sympy=is_sympy),
interpreter_versions=self.interpreter_versions(),
connection_file=cf,
menu_actions=self.menu_actions,
options_button=self.options_button,
show_elapsed_time=show_elapsed_time,
reset_warning=reset_warning,
given_name=given_name,
ask_before_restart=ask_before_restart,
css_path=self.css_path)
# Change stderr_dir if requested
if self.test_dir is not None:
client.stderr_dir = self.test_dir
self.add_tab(client, name=client.get_name(), filename=filename)
if cf is None:
error_msg = self.permission_error_msg.format(jupyter_runtime_dir())
client.show_kernel_error(error_msg)
return
# Check if ipykernel is present in the external interpreter.
# Else we won't be able to create a client
if not CONF.get('main_interpreter', 'default'):
pyexec = CONF.get('main_interpreter', 'executable')
has_spyder_kernels = programs.is_module_installed(
'spyder_kernels',
interpreter=pyexec,
version='>=1.0.0')
if not has_spyder_kernels:
client.show_kernel_error(
_("Your Python environment or installation doesn't "
"have the <tt>spyder-kernels</tt> module or the "
"right version of it installed. "
"Without this module is not possible for "
"Spyder to create a console for you.<br><br>"
"You can install it by running in a system terminal:"
"<br><br>"
"<tt>conda install spyder-kernels</tt>"
"<br><br>or<br><br>"
"<tt>pip install spyder-kernels</tt>"))
return
self.connect_client_to_kernel(client, is_cython=is_cython,
is_pylab=is_pylab, is_sympy=is_sympy)
if client.shellwidget.kernel_manager is None:
return
self.register_client(client) | [
"def",
"create_new_client",
"(",
"self",
",",
"give_focus",
"=",
"True",
",",
"filename",
"=",
"''",
",",
"is_cython",
"=",
"False",
",",
"is_pylab",
"=",
"False",
",",
"is_sympy",
"=",
"False",
",",
"given_name",
"=",
"None",
")",
":",
"self",
".",
"master_clients",
"+=",
"1",
"client_id",
"=",
"dict",
"(",
"int_id",
"=",
"to_text_string",
"(",
"self",
".",
"master_clients",
")",
",",
"str_id",
"=",
"'A'",
")",
"cf",
"=",
"self",
".",
"_new_connection_file",
"(",
")",
"show_elapsed_time",
"=",
"self",
".",
"get_option",
"(",
"'show_elapsed_time'",
")",
"reset_warning",
"=",
"self",
".",
"get_option",
"(",
"'show_reset_namespace_warning'",
")",
"ask_before_restart",
"=",
"self",
".",
"get_option",
"(",
"'ask_before_restart'",
")",
"client",
"=",
"ClientWidget",
"(",
"self",
",",
"id_",
"=",
"client_id",
",",
"history_filename",
"=",
"get_conf_path",
"(",
"'history.py'",
")",
",",
"config_options",
"=",
"self",
".",
"config_options",
"(",
")",
",",
"additional_options",
"=",
"self",
".",
"additional_options",
"(",
"is_pylab",
"=",
"is_pylab",
",",
"is_sympy",
"=",
"is_sympy",
")",
",",
"interpreter_versions",
"=",
"self",
".",
"interpreter_versions",
"(",
")",
",",
"connection_file",
"=",
"cf",
",",
"menu_actions",
"=",
"self",
".",
"menu_actions",
",",
"options_button",
"=",
"self",
".",
"options_button",
",",
"show_elapsed_time",
"=",
"show_elapsed_time",
",",
"reset_warning",
"=",
"reset_warning",
",",
"given_name",
"=",
"given_name",
",",
"ask_before_restart",
"=",
"ask_before_restart",
",",
"css_path",
"=",
"self",
".",
"css_path",
")",
"if",
"self",
".",
"test_dir",
"is",
"not",
"None",
":",
"client",
".",
"stderr_dir",
"=",
"self",
".",
"test_dir",
"self",
".",
"add_tab",
"(",
"client",
",",
"name",
"=",
"client",
".",
"get_name",
"(",
")",
",",
"filename",
"=",
"filename",
")",
"if",
"cf",
"is",
"None",
":",
"error_msg",
"=",
"self",
".",
"permission_error_msg",
".",
"format",
"(",
"jupyter_runtime_dir",
"(",
")",
")",
"client",
".",
"show_kernel_error",
"(",
"error_msg",
")",
"return",
"if",
"not",
"CONF",
".",
"get",
"(",
"'main_interpreter'",
",",
"'default'",
")",
":",
"pyexec",
"=",
"CONF",
".",
"get",
"(",
"'main_interpreter'",
",",
"'executable'",
")",
"has_spyder_kernels",
"=",
"programs",
".",
"is_module_installed",
"(",
"'spyder_kernels'",
",",
"interpreter",
"=",
"pyexec",
",",
"version",
"=",
"'>=1.0.0'",
")",
"if",
"not",
"has_spyder_kernels",
":",
"client",
".",
"show_kernel_error",
"(",
"_",
"(",
"\"Your Python environment or installation doesn't \"",
"\"have the <tt>spyder-kernels</tt> module or the \"",
"\"right version of it installed. \"",
"\"Without this module is not possible for \"",
"\"Spyder to create a console for you.<br><br>\"",
"\"You can install it by running in a system terminal:\"",
"\"<br><br>\"",
"\"<tt>conda install spyder-kernels</tt>\"",
"\"<br><br>or<br><br>\"",
"\"<tt>pip install spyder-kernels</tt>\"",
")",
")",
"return",
"self",
".",
"connect_client_to_kernel",
"(",
"client",
",",
"is_cython",
"=",
"is_cython",
",",
"is_pylab",
"=",
"is_pylab",
",",
"is_sympy",
"=",
"is_sympy",
")",
"if",
"client",
".",
"shellwidget",
".",
"kernel_manager",
"is",
"None",
":",
"return",
"self",
".",
"register_client",
"(",
"client",
")"
] | python | Create a new client | false |
2,015,099 | def get(self, key, value):
"""Get a single record by id
Supports resource cache
.. versionchanged:: 2.17.0
Added option to retrieve record by tracking_id
Keyword Args:
id (str): Full record ID
tracking_id (str): Record Tracking ID
Returns:
Record: Matching Record instance returned from API
Raises:
TypeError: No id argument provided
"""
if key == 'id':
response = self._swimlane.request('get', "app/{0}/record/{1}".format(self._app.id, value))
return Record(self._app, response.json())
if key == 'tracking_id':
response = self._swimlane.request('get', "app/{0}/record/tracking/{1}".format(self._app.id, value))
return Record(self._app, response.json()) | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"==",
"'id'",
":",
"response",
"=",
"self",
".",
"_swimlane",
".",
"request",
"(",
"'get'",
",",
"\"app/{0}/record/{1}\"",
".",
"format",
"(",
"self",
".",
"_app",
".",
"id",
",",
"value",
")",
")",
"return",
"Record",
"(",
"self",
".",
"_app",
",",
"response",
".",
"json",
"(",
")",
")",
"if",
"key",
"==",
"'tracking_id'",
":",
"response",
"=",
"self",
".",
"_swimlane",
".",
"request",
"(",
"'get'",
",",
"\"app/{0}/record/tracking/{1}\"",
".",
"format",
"(",
"self",
".",
"_app",
".",
"id",
",",
"value",
")",
")",
"return",
"Record",
"(",
"self",
".",
"_app",
",",
"response",
".",
"json",
"(",
")",
")"
] | python | Get a single record by id
Supports resource cache
.. versionchanged:: 2.17.0
Added option to retrieve record by tracking_id
Keyword Args:
id (str): Full record ID
tracking_id (str): Record Tracking ID
Returns:
Record: Matching Record instance returned from API
Raises:
TypeError: No id argument provided | false |
2,334,394 | def get_current_activities(self, login=None, **kwargs):
"""Get the current activities of user.
Either use the `login` param, or the client's login if unset.
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_activity_url = ACTIVITY_URL.format(login=_login)
return self._request_api(url=_activity_url).json() | [
"def",
"get_current_activities",
"(",
"self",
",",
"login",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"_login",
"=",
"kwargs",
".",
"get",
"(",
"'login'",
",",
"login",
"or",
"self",
".",
"_login",
")",
"_activity_url",
"=",
"ACTIVITY_URL",
".",
"format",
"(",
"login",
"=",
"_login",
")",
"return",
"self",
".",
"_request_api",
"(",
"url",
"=",
"_activity_url",
")",
".",
"json",
"(",
")"
] | python | Get the current activities of user.
Either use the `login` param, or the client's login if unset.
:return: JSON | false |
1,674,617 | def timedelta_to_string(timedelta):
"""
Utility that converts a pandas.Timedelta to a string representation
compatible with pandas.Timedelta constructor format
Parameters
----------
timedelta: pd.Timedelta
Returns
-------
string
string representation of 'timedelta'
"""
c = timedelta.components
format = ''
if c.days != 0:
format += '%dD' % c.days
if c.hours > 0:
format += '%dh' % c.hours
if c.minutes > 0:
format += '%dm' % c.minutes
if c.seconds > 0:
format += '%ds' % c.seconds
if c.milliseconds > 0:
format += '%dms' % c.milliseconds
if c.microseconds > 0:
format += '%dus' % c.microseconds
if c.nanoseconds > 0:
format += '%dns' % c.nanoseconds
return format | [
"def",
"timedelta_to_string",
"(",
"timedelta",
")",
":",
"c",
"=",
"timedelta",
".",
"components",
"format",
"=",
"''",
"if",
"c",
".",
"days",
"!=",
"0",
":",
"format",
"+=",
"'%dD'",
"%",
"c",
".",
"days",
"if",
"c",
".",
"hours",
">",
"0",
":",
"format",
"+=",
"'%dh'",
"%",
"c",
".",
"hours",
"if",
"c",
".",
"minutes",
">",
"0",
":",
"format",
"+=",
"'%dm'",
"%",
"c",
".",
"minutes",
"if",
"c",
".",
"seconds",
">",
"0",
":",
"format",
"+=",
"'%ds'",
"%",
"c",
".",
"seconds",
"if",
"c",
".",
"milliseconds",
">",
"0",
":",
"format",
"+=",
"'%dms'",
"%",
"c",
".",
"milliseconds",
"if",
"c",
".",
"microseconds",
">",
"0",
":",
"format",
"+=",
"'%dus'",
"%",
"c",
".",
"microseconds",
"if",
"c",
".",
"nanoseconds",
">",
"0",
":",
"format",
"+=",
"'%dns'",
"%",
"c",
".",
"nanoseconds",
"return",
"format"
] | python | Utility that converts a pandas.Timedelta to a string representation
compatible with pandas.Timedelta constructor format
Parameters
----------
timedelta: pd.Timedelta
Returns
-------
string
string representation of 'timedelta' | false |
2,038,886 | def sg_queue_context(sess=None):
r"""Context helper for queue routines.
Args:
sess: A session to open queues. If not specified, a new session is created.
Returns:
None
"""
# default session
sess = tf.get_default_session() if sess is None else sess
# thread coordinator
coord = tf.train.Coordinator()
try:
# start queue thread
threads = tf.train.start_queue_runners(sess, coord)
yield
finally:
# stop queue thread
coord.request_stop()
# wait thread to exit.
coord.join(threads) | [
"def",
"sg_queue_context",
"(",
"sess",
"=",
"None",
")",
":",
"sess",
"=",
"tf",
".",
"get_default_session",
"(",
")",
"if",
"sess",
"is",
"None",
"else",
"sess",
"coord",
"=",
"tf",
".",
"train",
".",
"Coordinator",
"(",
")",
"try",
":",
"threads",
"=",
"tf",
".",
"train",
".",
"start_queue_runners",
"(",
"sess",
",",
"coord",
")",
"yield",
"finally",
":",
"coord",
".",
"request_stop",
"(",
")",
"coord",
".",
"join",
"(",
"threads",
")"
] | python | r"""Context helper for queue routines.
Args:
sess: A session to open queues. If not specified, a new session is created.
Returns:
None | false |
2,004,797 | def getParser():
"Creates and returns the argparse parser object."
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__description__)
parser.add_argument('input', help='the input image')
parser.add_argument('output', help='the output image')
parser.add_argument('shape', type=argparseu.sequenceOfIntegersGt, help='the desired shape in colon-separated values, e.g. 255,255,32')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose output')
parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.')
parser.add_argument('-f', '--force', dest='force', action='store_true', help='overwrite existing files')
return parser | [
"def",
"getParser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"formatter_class",
"=",
"argparse",
".",
"RawDescriptionHelpFormatter",
",",
"description",
"=",
"__description__",
")",
"parser",
".",
"add_argument",
"(",
"'input'",
",",
"help",
"=",
"'the input image'",
")",
"parser",
".",
"add_argument",
"(",
"'output'",
",",
"help",
"=",
"'the output image'",
")",
"parser",
".",
"add_argument",
"(",
"'shape'",
",",
"type",
"=",
"argparseu",
".",
"sequenceOfIntegersGt",
",",
"help",
"=",
"'the desired shape in colon-separated values, e.g. 255,255,32'",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"dest",
"=",
"'verbose'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'verbose output'",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"dest",
"=",
"'debug'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Display debug information.'",
")",
"parser",
".",
"add_argument",
"(",
"'-f'",
",",
"'--force'",
",",
"dest",
"=",
"'force'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'overwrite existing files'",
")",
"return",
"parser"
] | python | Creates and returns the argparse parser object. | false |
2,073,641 | def get_class_that_defined_method(meth):
"""Determines the class owning the given method.
"""
if is_classmethod(meth):
return meth.__self__
if hasattr(meth, 'im_class'):
return meth.im_class
elif hasattr(meth, '__qualname__'):
# Python 3
try:
cls_names = meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0].split('.')
cls = inspect.getmodule(meth)
for cls_name in cls_names:
cls = getattr(cls, cls_name)
if isinstance(cls, type):
return cls
except AttributeError:
# If this was called from a decorator and meth is not a method, this
# can result in AttributeError, because at decorator-time meth has not
# yet been added to module. If it's really a method, its class would be
# already in, so no problem in that case.
pass
raise ValueError(str(meth)+' is not a method.') | [
"def",
"get_class_that_defined_method",
"(",
"meth",
")",
":",
"if",
"is_classmethod",
"(",
"meth",
")",
":",
"return",
"meth",
".",
"__self__",
"if",
"hasattr",
"(",
"meth",
",",
"'im_class'",
")",
":",
"return",
"meth",
".",
"im_class",
"elif",
"hasattr",
"(",
"meth",
",",
"'__qualname__'",
")",
":",
"try",
":",
"cls_names",
"=",
"meth",
".",
"__qualname__",
".",
"split",
"(",
"'.<locals>'",
",",
"1",
")",
"[",
"0",
"]",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"cls",
"=",
"inspect",
".",
"getmodule",
"(",
"meth",
")",
"for",
"cls_name",
"in",
"cls_names",
":",
"cls",
"=",
"getattr",
"(",
"cls",
",",
"cls_name",
")",
"if",
"isinstance",
"(",
"cls",
",",
"type",
")",
":",
"return",
"cls",
"except",
"AttributeError",
":",
"pass",
"raise",
"ValueError",
"(",
"str",
"(",
"meth",
")",
"+",
"' is not a method.'",
")"
] | python | Determines the class owning the given method. | false |
2,629,959 | def update_metadata_image(sdc_url, token, vdc, product, metadata_image):
"""It updates the product metadada for image filtered
:param glance_url: the sdc url
:param token: the valid token
:param metadata_image: image name
:param product: image name
"""
print 'update metadata'
print product
url = sdc_url+ "/catalog/product/"+product
print url
headers = {'X-Auth-Token': token, 'Tenant-Id': vdc,
'Accept': "application/json",
'Content-Type': 'application/json'}
print headers
response = http.get(url, headers)
print url
if response.status != 200:
print 'error to get the product ' + str(response.status)
return
else:
payload = '{"key":"image","value":"' + metadata_image + '"}'
print payload
response = http.put(url + "/metadatas/image", headers, payload)
print response
if response.status != 200:
print 'error to update the product ' + product \
+ ' ' + str(response.status) | [
"def",
"update_metadata_image",
"(",
"sdc_url",
",",
"token",
",",
"vdc",
",",
"product",
",",
"metadata_image",
")",
":",
"print",
"'update metadata'",
"print",
"product",
"url",
"=",
"sdc_url",
"+",
"\"/catalog/product/\"",
"+",
"product",
"print",
"url",
"headers",
"=",
"{",
"'X-Auth-Token'",
":",
"token",
",",
"'Tenant-Id'",
":",
"vdc",
",",
"'Accept'",
":",
"\"application/json\"",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"print",
"headers",
"response",
"=",
"http",
".",
"get",
"(",
"url",
",",
"headers",
")",
"print",
"url",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"print",
"'error to get the product '",
"+",
"str",
"(",
"response",
".",
"status",
")",
"return",
"else",
":",
"payload",
"=",
"'{\"key\":\"image\",\"value\":\"'",
"+",
"metadata_image",
"+",
"'\"}'",
"print",
"payload",
"response",
"=",
"http",
".",
"put",
"(",
"url",
"+",
"\"/metadatas/image\"",
",",
"headers",
",",
"payload",
")",
"print",
"response",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"print",
"'error to update the product '",
"+",
"product",
"+",
"' '",
"+",
"str",
"(",
"response",
".",
"status",
")"
] | python | It updates the product metadada for image filtered
:param glance_url: the sdc url
:param token: the valid token
:param metadata_image: image name
:param product: image name | false |
1,593,735 | def __init__(self, augseq, processes=None, maxtasksperchild=None, seed=None):
"""
Initialize augmentation pool.
Parameters
----------
augseq : Augmenter
The augmentation sequence to apply to batches.
processes : None or int, optional
The number of background workers, similar to the same parameter in multiprocessing.Pool.
If ``None``, the number of the machine's CPU cores will be used (this counts hyperthreads as CPU cores).
If this is set to a negative value ``p``, then ``P - abs(p)`` will be used, where ``P`` is the number
of CPU cores. E.g. ``-1`` would use all cores except one (this is useful to e.g. reserve one core to
feed batches to the GPU).
maxtasksperchild : None or int, optional
The number of tasks done per worker process before the process is killed and restarted, similar to the
same parameter in multiprocessing.Pool. If ``None``, worker processes will not be automatically restarted.
seed : None or int, optional
The seed to use for child processes. If ``None``, a random seed will be used.
"""
# make sure that don't call pool again in a child process
assert Pool._WORKER_AUGSEQ is None, "_WORKER_AUGSEQ was already set when calling " \
"Pool.__init__(). Did you try to instantiate a Pool within a Pool?"
assert processes is None or processes != 0
self.augseq = augseq
self.processes = processes
self.maxtasksperchild = maxtasksperchild
self.seed = seed
if self.seed is not None:
assert ia.SEED_MIN_VALUE <= self.seed <= ia.SEED_MAX_VALUE
# multiprocessing.Pool instance
self._pool = None
# Running counter of the number of augmented batches. This will be used to send indexes for each batch to
# the workers so that they can augment using SEED_BASE+SEED_BATCH and ensure consistency of applied
# augmentation order between script runs.
self._batch_idx = 0 | [
"def",
"__init__",
"(",
"self",
",",
"augseq",
",",
"processes",
"=",
"None",
",",
"maxtasksperchild",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"assert",
"Pool",
".",
"_WORKER_AUGSEQ",
"is",
"None",
",",
"\"_WORKER_AUGSEQ was already set when calling \"",
"\"Pool.__init__(). Did you try to instantiate a Pool within a Pool?\"",
"assert",
"processes",
"is",
"None",
"or",
"processes",
"!=",
"0",
"self",
".",
"augseq",
"=",
"augseq",
"self",
".",
"processes",
"=",
"processes",
"self",
".",
"maxtasksperchild",
"=",
"maxtasksperchild",
"self",
".",
"seed",
"=",
"seed",
"if",
"self",
".",
"seed",
"is",
"not",
"None",
":",
"assert",
"ia",
".",
"SEED_MIN_VALUE",
"<=",
"self",
".",
"seed",
"<=",
"ia",
".",
"SEED_MAX_VALUE",
"self",
".",
"_pool",
"=",
"None",
"self",
".",
"_batch_idx",
"=",
"0"
] | python | Initialize augmentation pool.
Parameters
----------
augseq : Augmenter
The augmentation sequence to apply to batches.
processes : None or int, optional
The number of background workers, similar to the same parameter in multiprocessing.Pool.
If ``None``, the number of the machine's CPU cores will be used (this counts hyperthreads as CPU cores).
If this is set to a negative value ``p``, then ``P - abs(p)`` will be used, where ``P`` is the number
of CPU cores. E.g. ``-1`` would use all cores except one (this is useful to e.g. reserve one core to
feed batches to the GPU).
maxtasksperchild : None or int, optional
The number of tasks done per worker process before the process is killed and restarted, similar to the
same parameter in multiprocessing.Pool. If ``None``, worker processes will not be automatically restarted.
seed : None or int, optional
The seed to use for child processes. If ``None``, a random seed will be used. | false |
2,412,151 | def remove_empty_cols(records):
"""Remove all-gap columns from aligned SeqRecords."""
# In case it's a generator, turn it into a list
records = list(records)
seqstrs = [str(rec.seq) for rec in records]
clean_cols = [col
for col in zip(*seqstrs)
if not all(c == '-' for c in col)]
clean_seqs = [''.join(row)
for row in zip(*clean_cols)]
for rec, clean_seq in zip(records, clean_seqs):
yield SeqRecord(Seq(clean_seq, rec.seq.alphabet), id=rec.id,
name=rec.name, description=rec.description,
dbxrefs=rec.dbxrefs, features=rec.features,
annotations=rec.annotations,
letter_annotations=rec.letter_annotations) | [
"def",
"remove_empty_cols",
"(",
"records",
")",
":",
"records",
"=",
"list",
"(",
"records",
")",
"seqstrs",
"=",
"[",
"str",
"(",
"rec",
".",
"seq",
")",
"for",
"rec",
"in",
"records",
"]",
"clean_cols",
"=",
"[",
"col",
"for",
"col",
"in",
"zip",
"(",
"*",
"seqstrs",
")",
"if",
"not",
"all",
"(",
"c",
"==",
"'-'",
"for",
"c",
"in",
"col",
")",
"]",
"clean_seqs",
"=",
"[",
"''",
".",
"join",
"(",
"row",
")",
"for",
"row",
"in",
"zip",
"(",
"*",
"clean_cols",
")",
"]",
"for",
"rec",
",",
"clean_seq",
"in",
"zip",
"(",
"records",
",",
"clean_seqs",
")",
":",
"yield",
"SeqRecord",
"(",
"Seq",
"(",
"clean_seq",
",",
"rec",
".",
"seq",
".",
"alphabet",
")",
",",
"id",
"=",
"rec",
".",
"id",
",",
"name",
"=",
"rec",
".",
"name",
",",
"description",
"=",
"rec",
".",
"description",
",",
"dbxrefs",
"=",
"rec",
".",
"dbxrefs",
",",
"features",
"=",
"rec",
".",
"features",
",",
"annotations",
"=",
"rec",
".",
"annotations",
",",
"letter_annotations",
"=",
"rec",
".",
"letter_annotations",
")"
] | python | Remove all-gap columns from aligned SeqRecords. | false |
1,928,294 | def trim(self):
'''Remove items that are expired or exceed the max size.'''
now_time = time.time()
while self._seq and self._seq[0].expire_time < now_time:
item = self._seq.popleft()
del self._map[item.key]
if self._max_items:
while self._seq and len(self._seq) > self._max_items:
item = self._seq.popleft()
del self._map[item.key] | [
"def",
"trim",
"(",
"self",
")",
":",
"now_time",
"=",
"time",
".",
"time",
"(",
")",
"while",
"self",
".",
"_seq",
"and",
"self",
".",
"_seq",
"[",
"0",
"]",
".",
"expire_time",
"<",
"now_time",
":",
"item",
"=",
"self",
".",
"_seq",
".",
"popleft",
"(",
")",
"del",
"self",
".",
"_map",
"[",
"item",
".",
"key",
"]",
"if",
"self",
".",
"_max_items",
":",
"while",
"self",
".",
"_seq",
"and",
"len",
"(",
"self",
".",
"_seq",
")",
">",
"self",
".",
"_max_items",
":",
"item",
"=",
"self",
".",
"_seq",
".",
"popleft",
"(",
")",
"del",
"self",
".",
"_map",
"[",
"item",
".",
"key",
"]"
] | python | Remove items that are expired or exceed the max size. | false |
2,324,698 | def append(self, cpe):
"""
Adds a CPE Name to the set if not already.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
TEST:
>>> from .cpeset2_2 import CPESet2_2
>>> from .cpe2_2 import CPE2_2
>>> uri1 = 'cpe:/h:hp'
>>> c1 = CPE2_2(uri1)
>>> s = CPESet2_2()
>>> s.append(c1)
"""
if cpe.VERSION != CPE.VERSION_2_2:
errmsg = "CPE Name version {0} not valid, version 2.2 expected".format(
cpe.VERSION)
raise ValueError(errmsg)
for k in self.K:
if cpe.cpe_str == k.cpe_str:
return None
self.K.append(cpe) | [
"def",
"append",
"(",
"self",
",",
"cpe",
")",
":",
"if",
"cpe",
".",
"VERSION",
"!=",
"CPE",
".",
"VERSION_2_2",
":",
"errmsg",
"=",
"\"CPE Name version {0} not valid, version 2.2 expected\"",
".",
"format",
"(",
"cpe",
".",
"VERSION",
")",
"raise",
"ValueError",
"(",
"errmsg",
")",
"for",
"k",
"in",
"self",
".",
"K",
":",
"if",
"cpe",
".",
"cpe_str",
"==",
"k",
".",
"cpe_str",
":",
"return",
"None",
"self",
".",
"K",
".",
"append",
"(",
"cpe",
")"
] | python | Adds a CPE Name to the set if not already.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
TEST:
>>> from .cpeset2_2 import CPESet2_2
>>> from .cpe2_2 import CPE2_2
>>> uri1 = 'cpe:/h:hp'
>>> c1 = CPE2_2(uri1)
>>> s = CPESet2_2()
>>> s.append(c1) | false |
2,453,288 | def discard_defaults(self, *args):
'''
node.discard_defaults(a, b...) yields a new calculation node identical to the given node
except that the default values for the given afferent parameters named by the arguments a,
b, etc. have been removed. In the new node that is returned, these parameters will be
required.
'''
rms = set(arg for aa in args for arg in ([aa] if isinstance(aa, six.string_types) else aa))
new_defaults = ps.pmap({k:v for (k,v) in six.iteritems(args) if k not in rms})
new_cnode = copy.copy(self)
object.__setattr__(new_cnode, 'defaults', new_defaults)
return new_cnode | [
"def",
"discard_defaults",
"(",
"self",
",",
"*",
"args",
")",
":",
"rms",
"=",
"set",
"(",
"arg",
"for",
"aa",
"in",
"args",
"for",
"arg",
"in",
"(",
"[",
"aa",
"]",
"if",
"isinstance",
"(",
"aa",
",",
"six",
".",
"string_types",
")",
"else",
"aa",
")",
")",
"new_defaults",
"=",
"ps",
".",
"pmap",
"(",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"six",
".",
"iteritems",
"(",
"args",
")",
"if",
"k",
"not",
"in",
"rms",
"}",
")",
"new_cnode",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"object",
".",
"__setattr__",
"(",
"new_cnode",
",",
"'defaults'",
",",
"new_defaults",
")",
"return",
"new_cnode"
] | python | node.discard_defaults(a, b...) yields a new calculation node identical to the given node
except that the default values for the given afferent parameters named by the arguments a,
b, etc. have been removed. In the new node that is returned, these parameters will be
required. | false |
2,582,700 | def is_equivalent(self, callback, details_filter=None):
"""Check if the callback provided is the same as the internal one.
:param callback: callback used for comparison
:param details_filter: callback used for comparison
:returns: false if not the same callback, otherwise true
:rtype: boolean
"""
cb = self.callback
if cb is None and callback is not None:
return False
if cb is not None and callback is None:
return False
if cb is not None and callback is not None \
and not reflection.is_same_callback(cb, callback):
return False
if details_filter is not None:
if self._details_filter is None:
return False
else:
return reflection.is_same_callback(self._details_filter,
details_filter)
else:
return self._details_filter is None | [
"def",
"is_equivalent",
"(",
"self",
",",
"callback",
",",
"details_filter",
"=",
"None",
")",
":",
"cb",
"=",
"self",
".",
"callback",
"if",
"cb",
"is",
"None",
"and",
"callback",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"cb",
"is",
"not",
"None",
"and",
"callback",
"is",
"None",
":",
"return",
"False",
"if",
"cb",
"is",
"not",
"None",
"and",
"callback",
"is",
"not",
"None",
"and",
"not",
"reflection",
".",
"is_same_callback",
"(",
"cb",
",",
"callback",
")",
":",
"return",
"False",
"if",
"details_filter",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_details_filter",
"is",
"None",
":",
"return",
"False",
"else",
":",
"return",
"reflection",
".",
"is_same_callback",
"(",
"self",
".",
"_details_filter",
",",
"details_filter",
")",
"else",
":",
"return",
"self",
".",
"_details_filter",
"is",
"None"
] | python | Check if the callback provided is the same as the internal one.
:param callback: callback used for comparison
:param details_filter: callback used for comparison
:returns: false if not the same callback, otherwise true
:rtype: boolean | false |
1,661,117 | def get_member_groups(
self, object_id, security_enabled_only, additional_properties=None, custom_headers=None, raw=False, **operation_config):
"""Gets a collection that contains the object IDs of the groups of which
the user is a member.
:param object_id: The object ID of the user for which to get group
membership.
:type object_id: str
:param security_enabled_only: If true, only membership in
security-enabled groups should be checked. Otherwise, membership in
all groups should be checked.
:type security_enabled_only: bool
:param additional_properties: Unmatched properties from the message
are deserialized this collection
:type additional_properties: dict[str, object]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of str
:rtype: ~azure.graphrbac.models.StrPaged[str]
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
parameters = models.UserGetMemberGroupsParameters(additional_properties=additional_properties, security_enabled_only=security_enabled_only)
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_member_groups.metadata['url']
path_format_arguments = {
'objectId': self._serialize.url("object_id", object_id, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'UserGetMemberGroupsParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.GraphErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.StrPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StrPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized | [
"def",
"get_member_groups",
"(",
"self",
",",
"object_id",
",",
"security_enabled_only",
",",
"additional_properties",
"=",
"None",
",",
"custom_headers",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"**",
"operation_config",
")",
":",
"parameters",
"=",
"models",
".",
"UserGetMemberGroupsParameters",
"(",
"additional_properties",
"=",
"additional_properties",
",",
"security_enabled_only",
"=",
"security_enabled_only",
")",
"def",
"internal_paging",
"(",
"next_link",
"=",
"None",
",",
"raw",
"=",
"False",
")",
":",
"if",
"not",
"next_link",
":",
"url",
"=",
"self",
".",
"get_member_groups",
".",
"metadata",
"[",
"'url'",
"]",
"path_format_arguments",
"=",
"{",
"'objectId'",
":",
"self",
".",
"_serialize",
".",
"url",
"(",
"\"object_id\"",
",",
"object_id",
",",
"'str'",
")",
",",
"'tenantID'",
":",
"self",
".",
"_serialize",
".",
"url",
"(",
"\"self.config.tenant_id\"",
",",
"self",
".",
"config",
".",
"tenant_id",
",",
"'str'",
")",
"}",
"url",
"=",
"self",
".",
"_client",
".",
"format_url",
"(",
"url",
",",
"**",
"path_format_arguments",
")",
"query_parameters",
"=",
"{",
"}",
"query_parameters",
"[",
"'api-version'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"\"self.api_version\"",
",",
"self",
".",
"api_version",
",",
"'str'",
")",
"else",
":",
"url",
"=",
"next_link",
"query_parameters",
"=",
"{",
"}",
"header_parameters",
"=",
"{",
"}",
"header_parameters",
"[",
"'Accept'",
"]",
"=",
"'application/json'",
"header_parameters",
"[",
"'Content-Type'",
"]",
"=",
"'application/json; charset=utf-8'",
"if",
"self",
".",
"config",
".",
"generate_client_request_id",
":",
"header_parameters",
"[",
"'x-ms-client-request-id'",
"]",
"=",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
"if",
"custom_headers",
":",
"header_parameters",
".",
"update",
"(",
"custom_headers",
")",
"if",
"self",
".",
"config",
".",
"accept_language",
"is",
"not",
"None",
":",
"header_parameters",
"[",
"'accept-language'",
"]",
"=",
"self",
".",
"_serialize",
".",
"header",
"(",
"\"self.config.accept_language\"",
",",
"self",
".",
"config",
".",
"accept_language",
",",
"'str'",
")",
"body_content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"parameters",
",",
"'UserGetMemberGroupsParameters'",
")",
"request",
"=",
"self",
".",
"_client",
".",
"post",
"(",
"url",
",",
"query_parameters",
",",
"header_parameters",
",",
"body_content",
")",
"response",
"=",
"self",
".",
"_client",
".",
"send",
"(",
"request",
",",
"stream",
"=",
"False",
",",
"**",
"operation_config",
")",
"if",
"response",
".",
"status_code",
"not",
"in",
"[",
"200",
"]",
":",
"raise",
"models",
".",
"GraphErrorException",
"(",
"self",
".",
"_deserialize",
",",
"response",
")",
"return",
"response",
"deserialized",
"=",
"models",
".",
"StrPaged",
"(",
"internal_paging",
",",
"self",
".",
"_deserialize",
".",
"dependencies",
")",
"if",
"raw",
":",
"header_dict",
"=",
"{",
"}",
"client_raw_response",
"=",
"models",
".",
"StrPaged",
"(",
"internal_paging",
",",
"self",
".",
"_deserialize",
".",
"dependencies",
",",
"header_dict",
")",
"return",
"client_raw_response",
"return",
"deserialized"
] | python | Gets a collection that contains the object IDs of the groups of which
the user is a member.
:param object_id: The object ID of the user for which to get group
membership.
:type object_id: str
:param security_enabled_only: If true, only membership in
security-enabled groups should be checked. Otherwise, membership in
all groups should be checked.
:type security_enabled_only: bool
:param additional_properties: Unmatched properties from the message
are deserialized this collection
:type additional_properties: dict[str, object]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of str
:rtype: ~azure.graphrbac.models.StrPaged[str]
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>` | false |
1,877,149 | def send_message(
self,
title=None,
body=None,
icon=None,
data=None,
sound=None,
badge=None,
api_key=None,
**kwargs):
"""
Send notification for all active devices in queryset and deactivate if
DELETE_INACTIVE_DEVICES setting is set to True.
"""
if self:
from .fcm import fcm_send_bulk_message
registration_ids = list(self.filter(active=True).values_list(
'registration_id',
flat=True
))
if len(registration_ids) == 0:
return [{'failure': len(self), 'success': 0}]
result = fcm_send_bulk_message(
registration_ids=registration_ids,
title=title,
body=body,
icon=icon,
data=data,
sound=sound,
badge=badge,
api_key=api_key,
**kwargs
)
self._deactivate_devices_with_error_results(
registration_ids,
result['results']
)
return result | [
"def",
"send_message",
"(",
"self",
",",
"title",
"=",
"None",
",",
"body",
"=",
"None",
",",
"icon",
"=",
"None",
",",
"data",
"=",
"None",
",",
"sound",
"=",
"None",
",",
"badge",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"if",
"self",
":",
"from",
".",
"fcm",
"import",
"fcm_send_bulk_message",
"registration_ids",
"=",
"list",
"(",
"self",
".",
"filter",
"(",
"active",
"=",
"True",
")",
".",
"values_list",
"(",
"'registration_id'",
",",
"flat",
"=",
"True",
")",
")",
"if",
"len",
"(",
"registration_ids",
")",
"==",
"0",
":",
"return",
"[",
"{",
"'failure'",
":",
"len",
"(",
"self",
")",
",",
"'success'",
":",
"0",
"}",
"]",
"result",
"=",
"fcm_send_bulk_message",
"(",
"registration_ids",
"=",
"registration_ids",
",",
"title",
"=",
"title",
",",
"body",
"=",
"body",
",",
"icon",
"=",
"icon",
",",
"data",
"=",
"data",
",",
"sound",
"=",
"sound",
",",
"badge",
"=",
"badge",
",",
"api_key",
"=",
"api_key",
",",
"**",
"kwargs",
")",
"self",
".",
"_deactivate_devices_with_error_results",
"(",
"registration_ids",
",",
"result",
"[",
"'results'",
"]",
")",
"return",
"result"
] | python | Send notification for all active devices in queryset and deactivate if
DELETE_INACTIVE_DEVICES setting is set to True. | false |
2,466,762 | def add_from_db(self, database, files):
"""Adds images and bounding boxes for the given files of a database that follows the :py:ref:`bob.bio.base.database.BioDatabase <bob.bio.base>` interface.
**Parameters:**
``database`` : a derivative of :py:class:`bob.bio.base.database.BioDatabase`
The database interface, which provides file names and annotations for the given ``files``
``files`` : :py:class:`bob.bio.base.database.BioFile` or compatible
The files (as returned by :py:meth:`bob.bio.base.database.BioDatabase.objects`) which should be added to the training list
"""
for f in files:
annotation = database.annotations(f)
image_path = database.original_file_name(f)
self.add_image(image_path, [annotation]) | [
"def",
"add_from_db",
"(",
"self",
",",
"database",
",",
"files",
")",
":",
"for",
"f",
"in",
"files",
":",
"annotation",
"=",
"database",
".",
"annotations",
"(",
"f",
")",
"image_path",
"=",
"database",
".",
"original_file_name",
"(",
"f",
")",
"self",
".",
"add_image",
"(",
"image_path",
",",
"[",
"annotation",
"]",
")"
] | python | Adds images and bounding boxes for the given files of a database that follows the :py:ref:`bob.bio.base.database.BioDatabase <bob.bio.base>` interface.
**Parameters:**
``database`` : a derivative of :py:class:`bob.bio.base.database.BioDatabase`
The database interface, which provides file names and annotations for the given ``files``
``files`` : :py:class:`bob.bio.base.database.BioFile` or compatible
The files (as returned by :py:meth:`bob.bio.base.database.BioDatabase.objects`) which should be added to the training list | false |
1,723,037 | def get_unique_ids_for_schema_and_table(self, schema, table):
"""
Given a schema and table, find matching models, and return
their unique_ids. A schema and table may have more than one
match if the relation matches both a source and a seed, for instance.
"""
def predicate(model):
return self._model_matches_schema_and_table(schema, table, model)
matching = list(self._filter_subgraph(self.nodes, predicate))
return [match.get('unique_id') for match in matching] | [
"def",
"get_unique_ids_for_schema_and_table",
"(",
"self",
",",
"schema",
",",
"table",
")",
":",
"def",
"predicate",
"(",
"model",
")",
":",
"return",
"self",
".",
"_model_matches_schema_and_table",
"(",
"schema",
",",
"table",
",",
"model",
")",
"matching",
"=",
"list",
"(",
"self",
".",
"_filter_subgraph",
"(",
"self",
".",
"nodes",
",",
"predicate",
")",
")",
"return",
"[",
"match",
".",
"get",
"(",
"'unique_id'",
")",
"for",
"match",
"in",
"matching",
"]"
] | python | Given a schema and table, find matching models, and return
their unique_ids. A schema and table may have more than one
match if the relation matches both a source and a seed, for instance. | false |
2,552,826 | def get_flash_region(self, offset, length):
"""
Retrieves the contents of a region of flash from the watch. This only works on watches running
non-release firmware.
Raises :exc:`.GetBytesError` on failure.
:return: The retrieved data
:rtype: bytes
"""
return self._get(GetBytesFlashRequest(offset=offset, length=length)) | [
"def",
"get_flash_region",
"(",
"self",
",",
"offset",
",",
"length",
")",
":",
"return",
"self",
".",
"_get",
"(",
"GetBytesFlashRequest",
"(",
"offset",
"=",
"offset",
",",
"length",
"=",
"length",
")",
")"
] | python | Retrieves the contents of a region of flash from the watch. This only works on watches running
non-release firmware.
Raises :exc:`.GetBytesError` on failure.
:return: The retrieved data
:rtype: bytes | false |
1,624,371 | def close_file_from_name(self, filename):
"""Close file from its name"""
filename = osp.abspath(to_text_string(filename))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
self.editorstacks[0].close_file(index) | [
"def",
"close_file_from_name",
"(",
"self",
",",
"filename",
")",
":",
"filename",
"=",
"osp",
".",
"abspath",
"(",
"to_text_string",
"(",
"filename",
")",
")",
"index",
"=",
"self",
".",
"editorstacks",
"[",
"0",
"]",
".",
"has_filename",
"(",
"filename",
")",
"if",
"index",
"is",
"not",
"None",
":",
"self",
".",
"editorstacks",
"[",
"0",
"]",
".",
"close_file",
"(",
"index",
")"
] | python | Close file from its name | false |
1,973,519 | def set_rgb_dim_level(self, channelIndex: int, rgb: RGBColorState, dimLevel: float):
""" sets the color and dimlevel of the lamp
Args:
channelIndex(int): the channelIndex of the lamp. Use self.topLightChannelIndex or self.bottomLightChannelIndex
rgb(RGBColorState): the color of the lamp
dimLevel(float): the dimLevel of the lamp. 0.0 = off, 1.0 = MAX
Returns:
the result of the _restCall
"""
data = {
"channelIndex": channelIndex,
"deviceId": self.id,
"simpleRGBColorState": rgb,
"dimLevel": dimLevel,
}
return self._restCall(
"device/control/setSimpleRGBColorDimLevel", body=json.dumps(data)
) | [
"def",
"set_rgb_dim_level",
"(",
"self",
",",
"channelIndex",
":",
"int",
",",
"rgb",
":",
"RGBColorState",
",",
"dimLevel",
":",
"float",
")",
":",
"data",
"=",
"{",
"\"channelIndex\"",
":",
"channelIndex",
",",
"\"deviceId\"",
":",
"self",
".",
"id",
",",
"\"simpleRGBColorState\"",
":",
"rgb",
",",
"\"dimLevel\"",
":",
"dimLevel",
",",
"}",
"return",
"self",
".",
"_restCall",
"(",
"\"device/control/setSimpleRGBColorDimLevel\"",
",",
"body",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")"
] | python | sets the color and dimlevel of the lamp
Args:
channelIndex(int): the channelIndex of the lamp. Use self.topLightChannelIndex or self.bottomLightChannelIndex
rgb(RGBColorState): the color of the lamp
dimLevel(float): the dimLevel of the lamp. 0.0 = off, 1.0 = MAX
Returns:
the result of the _restCall | false |
2,205,601 | def add_ring(self, ring):
"""Adds a ring to _rings if not already existing"""
if ring not in self._rings and isinstance(ring, RingDing0):
self._rings.append(ring) | [
"def",
"add_ring",
"(",
"self",
",",
"ring",
")",
":",
"if",
"ring",
"not",
"in",
"self",
".",
"_rings",
"and",
"isinstance",
"(",
"ring",
",",
"RingDing0",
")",
":",
"self",
".",
"_rings",
".",
"append",
"(",
"ring",
")"
] | python | Adds a ring to _rings if not already existing | false |
1,896,404 | def cli(ctx, packages, all, list, platform):
"""Uninstall packages."""
if packages:
_uninstall(packages, platform)
elif all: # pragma: no cover
packages = Resources(platform).packages
_uninstall(packages, platform)
elif list:
Resources(platform).list_packages(installed=True, notinstalled=False)
else:
click.secho(ctx.get_help()) | [
"def",
"cli",
"(",
"ctx",
",",
"packages",
",",
"all",
",",
"list",
",",
"platform",
")",
":",
"if",
"packages",
":",
"_uninstall",
"(",
"packages",
",",
"platform",
")",
"elif",
"all",
":",
"packages",
"=",
"Resources",
"(",
"platform",
")",
".",
"packages",
"_uninstall",
"(",
"packages",
",",
"platform",
")",
"elif",
"list",
":",
"Resources",
"(",
"platform",
")",
".",
"list_packages",
"(",
"installed",
"=",
"True",
",",
"notinstalled",
"=",
"False",
")",
"else",
":",
"click",
".",
"secho",
"(",
"ctx",
".",
"get_help",
"(",
")",
")"
] | python | Uninstall packages. | false |
1,876,013 | def __contains__(self, key):
""" Does service section specify this option? """
return self.config_parser.has_option(
self.service_target, self._get_key(key)) | [
"def",
"__contains__",
"(",
"self",
",",
"key",
")",
":",
"return",
"self",
".",
"config_parser",
".",
"has_option",
"(",
"self",
".",
"service_target",
",",
"self",
".",
"_get_key",
"(",
"key",
")",
")"
] | python | Does service section specify this option? | false |
2,188,933 | def multivariate_ess(samples, batch_size_generator=None):
r"""Estimate the multivariate Effective Sample Size for the samples of every problem.
This essentially applies :func:`estimate_multivariate_ess` to every problem.
Args:
samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many
batches and of which size we use in estimating the minimum ESS.
Returns:
ndarray: the multivariate ESS per problem
"""
samples_generator = _get_sample_generator(samples)
return np.array(multiprocess_mapping(_MultivariateESSMultiProcessing(batch_size_generator), samples_generator())) | [
"def",
"multivariate_ess",
"(",
"samples",
",",
"batch_size_generator",
"=",
"None",
")",
":",
"samples_generator",
"=",
"_get_sample_generator",
"(",
"samples",
")",
"return",
"np",
".",
"array",
"(",
"multiprocess_mapping",
"(",
"_MultivariateESSMultiProcessing",
"(",
"batch_size_generator",
")",
",",
"samples_generator",
"(",
")",
")",
")"
] | python | r"""Estimate the multivariate Effective Sample Size for the samples of every problem.
This essentially applies :func:`estimate_multivariate_ess` to every problem.
Args:
samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many
batches and of which size we use in estimating the minimum ESS.
Returns:
ndarray: the multivariate ESS per problem | false |
2,347,094 | def format_table(table, column_names=None, column_specs=None, max_col_width=32, auto_col_width=False):
"""
Table pretty printer. Expects tables to be given as arrays of arrays::
print(format_table([[1, "2"], [3, "456"]], column_names=['A', 'B']))
"""
orig_col_args = dict(column_names=column_names, column_specs=column_specs)
if len(table) > 0:
col_widths = [0] * len(table[0])
elif column_specs is not None:
col_widths = [0] * (len(column_specs) + 1)
elif column_names is not None:
col_widths = [0] * len(column_names)
my_col_names, id_column = [], None
if column_specs is not None:
column_names = ["Row"]
column_names.extend([col["name"] for col in column_specs])
column_specs = [{"name": "Row", "type": "float"}] + column_specs
if column_names is not None:
for i in range(len(column_names)):
if column_names[i].lower() == "id":
id_column = i
my_col = ansi_truncate(str(column_names[i]), max_col_width if i not in {0, id_column} else 99)
my_col_names.append(my_col)
col_widths[i] = max(col_widths[i], len(strip_ansi_codes(my_col)))
trunc_table = []
for row in table:
my_row = []
for i in range(len(row)):
my_item = ansi_truncate(str(row[i]), max_col_width if i not in {0, id_column} else 99)
my_row.append(my_item)
col_widths[i] = max(col_widths[i], len(strip_ansi_codes(my_item)))
trunc_table.append(my_row)
type_colormap = {"boolean": BLUE(),
"integer": YELLOW(),
"float": WHITE(),
"string": GREEN()}
for i in "uint8", "int16", "uint16", "int32", "uint32", "int64":
type_colormap[i] = type_colormap["integer"]
type_colormap["double"] = type_colormap["float"]
def col_head(i):
if column_specs is not None:
return BOLD() + type_colormap[column_specs[i]["type"]] + column_names[i] + ENDC()
else:
return BOLD() + WHITE() + column_names[i] + ENDC()
formatted_table = [border("┌") + border("┬").join(border("─") * i for i in col_widths) + border("┐")]
if len(my_col_names) > 0:
padded_column_names = [col_head(i) + " " * (col_widths[i] - len(my_col_names[i]))
for i in range(len(my_col_names))]
formatted_table.append(border("│") + border("│").join(padded_column_names) + border("│"))
formatted_table.append(border("├") + border("┼").join(border("─") * i for i in col_widths) + border("┤"))
for row in trunc_table:
padded_row = [row[i] + " " * (col_widths[i] - len(strip_ansi_codes(row[i]))) for i in range(len(row))]
formatted_table.append(border("│") + border("│").join(padded_row) + border("│"))
formatted_table.append(border("└") + border("┴").join(border("─") * i for i in col_widths) + border("┘"))
if auto_col_width:
if not sys.stdout.isatty():
raise AegeaException("Cannot auto-format table, output is not a terminal")
table_width = len(strip_ansi_codes(formatted_table[0]))
tty_cols, tty_rows = get_terminal_size()
if table_width > max(tty_cols, 80):
return format_table(table, max_col_width=max_col_width - 1, auto_col_width=True, **orig_col_args)
return "\n".join(formatted_table) | [
"def",
"format_table",
"(",
"table",
",",
"column_names",
"=",
"None",
",",
"column_specs",
"=",
"None",
",",
"max_col_width",
"=",
"32",
",",
"auto_col_width",
"=",
"False",
")",
":",
"orig_col_args",
"=",
"dict",
"(",
"column_names",
"=",
"column_names",
",",
"column_specs",
"=",
"column_specs",
")",
"if",
"len",
"(",
"table",
")",
">",
"0",
":",
"col_widths",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"table",
"[",
"0",
"]",
")",
"elif",
"column_specs",
"is",
"not",
"None",
":",
"col_widths",
"=",
"[",
"0",
"]",
"*",
"(",
"len",
"(",
"column_specs",
")",
"+",
"1",
")",
"elif",
"column_names",
"is",
"not",
"None",
":",
"col_widths",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"column_names",
")",
"my_col_names",
",",
"id_column",
"=",
"[",
"]",
",",
"None",
"if",
"column_specs",
"is",
"not",
"None",
":",
"column_names",
"=",
"[",
"\"Row\"",
"]",
"column_names",
".",
"extend",
"(",
"[",
"col",
"[",
"\"name\"",
"]",
"for",
"col",
"in",
"column_specs",
"]",
")",
"column_specs",
"=",
"[",
"{",
"\"name\"",
":",
"\"Row\"",
",",
"\"type\"",
":",
"\"float\"",
"}",
"]",
"+",
"column_specs",
"if",
"column_names",
"is",
"not",
"None",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"column_names",
")",
")",
":",
"if",
"column_names",
"[",
"i",
"]",
".",
"lower",
"(",
")",
"==",
"\"id\"",
":",
"id_column",
"=",
"i",
"my_col",
"=",
"ansi_truncate",
"(",
"str",
"(",
"column_names",
"[",
"i",
"]",
")",
",",
"max_col_width",
"if",
"i",
"not",
"in",
"{",
"0",
",",
"id_column",
"}",
"else",
"99",
")",
"my_col_names",
".",
"append",
"(",
"my_col",
")",
"col_widths",
"[",
"i",
"]",
"=",
"max",
"(",
"col_widths",
"[",
"i",
"]",
",",
"len",
"(",
"strip_ansi_codes",
"(",
"my_col",
")",
")",
")",
"trunc_table",
"=",
"[",
"]",
"for",
"row",
"in",
"table",
":",
"my_row",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"row",
")",
")",
":",
"my_item",
"=",
"ansi_truncate",
"(",
"str",
"(",
"row",
"[",
"i",
"]",
")",
",",
"max_col_width",
"if",
"i",
"not",
"in",
"{",
"0",
",",
"id_column",
"}",
"else",
"99",
")",
"my_row",
".",
"append",
"(",
"my_item",
")",
"col_widths",
"[",
"i",
"]",
"=",
"max",
"(",
"col_widths",
"[",
"i",
"]",
",",
"len",
"(",
"strip_ansi_codes",
"(",
"my_item",
")",
")",
")",
"trunc_table",
".",
"append",
"(",
"my_row",
")",
"type_colormap",
"=",
"{",
"\"boolean\"",
":",
"BLUE",
"(",
")",
",",
"\"integer\"",
":",
"YELLOW",
"(",
")",
",",
"\"float\"",
":",
"WHITE",
"(",
")",
",",
"\"string\"",
":",
"GREEN",
"(",
")",
"}",
"for",
"i",
"in",
"\"uint8\"",
",",
"\"int16\"",
",",
"\"uint16\"",
",",
"\"int32\"",
",",
"\"uint32\"",
",",
"\"int64\"",
":",
"type_colormap",
"[",
"i",
"]",
"=",
"type_colormap",
"[",
"\"integer\"",
"]",
"type_colormap",
"[",
"\"double\"",
"]",
"=",
"type_colormap",
"[",
"\"float\"",
"]",
"def",
"col_head",
"(",
"i",
")",
":",
"if",
"column_specs",
"is",
"not",
"None",
":",
"return",
"BOLD",
"(",
")",
"+",
"type_colormap",
"[",
"column_specs",
"[",
"i",
"]",
"[",
"\"type\"",
"]",
"]",
"+",
"column_names",
"[",
"i",
"]",
"+",
"ENDC",
"(",
")",
"else",
":",
"return",
"BOLD",
"(",
")",
"+",
"WHITE",
"(",
")",
"+",
"column_names",
"[",
"i",
"]",
"+",
"ENDC",
"(",
")",
"formatted_table",
"=",
"[",
"border",
"(",
"\"┌\"",
")",
"+",
"border",
"(",
"\"┬\"",
")",
".",
"join",
"(",
"border",
"(",
"\"─\"",
")",
"*",
"i",
"for",
"i",
"in",
"col_widths",
")",
"+",
"border",
"(",
"\"┐\"",
")",
"]",
"if",
"len",
"(",
"my_col_names",
")",
">",
"0",
":",
"padded_column_names",
"=",
"[",
"col_head",
"(",
"i",
")",
"+",
"\" \"",
"*",
"(",
"col_widths",
"[",
"i",
"]",
"-",
"len",
"(",
"my_col_names",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"my_col_names",
")",
")",
"]",
"formatted_table",
".",
"append",
"(",
"border",
"(",
"\"│\"",
")",
"+",
"border",
"(",
"\"│\"",
")",
".",
"join",
"(",
"padded_column_names",
")",
"+",
"border",
"(",
"\"│\"",
")",
")",
"formatted_table",
".",
"append",
"(",
"border",
"(",
"\"├\"",
")",
"+",
"border",
"(",
"\"┼\"",
")",
".",
"join",
"(",
"border",
"(",
"\"─\"",
")",
"*",
"i",
"for",
"i",
"in",
"col_widths",
")",
"+",
"border",
"(",
"\"┤\"",
")",
")",
"for",
"row",
"in",
"trunc_table",
":",
"padded_row",
"=",
"[",
"row",
"[",
"i",
"]",
"+",
"\" \"",
"*",
"(",
"col_widths",
"[",
"i",
"]",
"-",
"len",
"(",
"strip_ansi_codes",
"(",
"row",
"[",
"i",
"]",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"row",
")",
")",
"]",
"formatted_table",
".",
"append",
"(",
"border",
"(",
"\"│\"",
")",
"+",
"border",
"(",
"\"│\"",
")",
".",
"join",
"(",
"padded_row",
")",
"+",
"border",
"(",
"\"│\"",
")",
")",
"formatted_table",
".",
"append",
"(",
"border",
"(",
"\"└\"",
")",
"+",
"border",
"(",
"\"┴\"",
")",
".",
"join",
"(",
"border",
"(",
"\"─\"",
")",
"*",
"i",
"for",
"i",
"in",
"col_widths",
")",
"+",
"border",
"(",
"\"┘\"",
")",
")",
"if",
"auto_col_width",
":",
"if",
"not",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
":",
"raise",
"AegeaException",
"(",
"\"Cannot auto-format table, output is not a terminal\"",
")",
"table_width",
"=",
"len",
"(",
"strip_ansi_codes",
"(",
"formatted_table",
"[",
"0",
"]",
")",
")",
"tty_cols",
",",
"tty_rows",
"=",
"get_terminal_size",
"(",
")",
"if",
"table_width",
">",
"max",
"(",
"tty_cols",
",",
"80",
")",
":",
"return",
"format_table",
"(",
"table",
",",
"max_col_width",
"=",
"max_col_width",
"-",
"1",
",",
"auto_col_width",
"=",
"True",
",",
"**",
"orig_col_args",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"formatted_table",
")"
] | python | Table pretty printer. Expects tables to be given as arrays of arrays::
print(format_table([[1, "2"], [3, "456"]], column_names=['A', 'B'])) | false |
2,380,747 | def __init__(self, width, poly, reflect_in, xor_in, reflect_out, xor_out, table_idx_width = None):
"""The Crc constructor.
The parameters are as follows:
width
poly
reflect_in
xor_in
reflect_out
xor_out
"""
self.Width = width
self.Poly = poly
self.ReflectIn = reflect_in
self.XorIn = xor_in
self.ReflectOut = reflect_out
self.XorOut = xor_out
self.TableIdxWidth = table_idx_width
self.MSB_Mask = 0x1 << (self.Width - 1)
self.Mask = ((self.MSB_Mask - 1) << 1) | 1
if self.TableIdxWidth != None:
self.TableWidth = 1 << self.TableIdxWidth
else:
self.TableIdxWidth = 8
self.TableWidth = 1 << self.TableIdxWidth
self.DirectInit = self.XorIn
self.NonDirectInit = self.__get_nondirect_init(self.XorIn)
if self.Width < 8:
self.CrcShift = 8 - self.Width
else:
self.CrcShift = 0 | [
"def",
"__init__",
"(",
"self",
",",
"width",
",",
"poly",
",",
"reflect_in",
",",
"xor_in",
",",
"reflect_out",
",",
"xor_out",
",",
"table_idx_width",
"=",
"None",
")",
":",
"self",
".",
"Width",
"=",
"width",
"self",
".",
"Poly",
"=",
"poly",
"self",
".",
"ReflectIn",
"=",
"reflect_in",
"self",
".",
"XorIn",
"=",
"xor_in",
"self",
".",
"ReflectOut",
"=",
"reflect_out",
"self",
".",
"XorOut",
"=",
"xor_out",
"self",
".",
"TableIdxWidth",
"=",
"table_idx_width",
"self",
".",
"MSB_Mask",
"=",
"0x1",
"<<",
"(",
"self",
".",
"Width",
"-",
"1",
")",
"self",
".",
"Mask",
"=",
"(",
"(",
"self",
".",
"MSB_Mask",
"-",
"1",
")",
"<<",
"1",
")",
"|",
"1",
"if",
"self",
".",
"TableIdxWidth",
"!=",
"None",
":",
"self",
".",
"TableWidth",
"=",
"1",
"<<",
"self",
".",
"TableIdxWidth",
"else",
":",
"self",
".",
"TableIdxWidth",
"=",
"8",
"self",
".",
"TableWidth",
"=",
"1",
"<<",
"self",
".",
"TableIdxWidth",
"self",
".",
"DirectInit",
"=",
"self",
".",
"XorIn",
"self",
".",
"NonDirectInit",
"=",
"self",
".",
"__get_nondirect_init",
"(",
"self",
".",
"XorIn",
")",
"if",
"self",
".",
"Width",
"<",
"8",
":",
"self",
".",
"CrcShift",
"=",
"8",
"-",
"self",
".",
"Width",
"else",
":",
"self",
".",
"CrcShift",
"=",
"0"
] | python | The Crc constructor.
The parameters are as follows:
width
poly
reflect_in
xor_in
reflect_out
xor_out | false |
2,417,523 | def set_prev_sonorus(self):
"""
Выставляет параметры звонкости/глухости, для предыдущих согласных.
"""
prev = self.get_prev_letter()
if not prev:
return
if not (self.is_consonant() and prev.is_consonant()):
return
if self.is_sonorus() and self.is_paired_consonant():
if self._get_sound(False) != 'в':
prev.set_sonorus(True)
return
if self.is_deaf():
prev.set_sonorus(False)
return | [
"def",
"set_prev_sonorus",
"(",
"self",
")",
":",
"prev",
"=",
"self",
".",
"get_prev_letter",
"(",
")",
"if",
"not",
"prev",
":",
"return",
"if",
"not",
"(",
"self",
".",
"is_consonant",
"(",
")",
"and",
"prev",
".",
"is_consonant",
"(",
")",
")",
":",
"return",
"if",
"self",
".",
"is_sonorus",
"(",
")",
"and",
"self",
".",
"is_paired_consonant",
"(",
")",
":",
"if",
"self",
".",
"_get_sound",
"(",
"False",
")",
"!=",
"'в'",
":",
"prev",
".",
"set_sonorus",
"(",
"True",
")",
"return",
"if",
"self",
".",
"is_deaf",
"(",
")",
":",
"prev",
".",
"set_sonorus",
"(",
"False",
")",
"return"
] | python | Выставляет параметры звонкости/глухости, для предыдущих согласных. | false |
2,653,811 | def __init__(self, mapper=None):
"""
Initialize a Controller subclass. The `mapper` argument is
used by the Application class to specify the Routes mapper
being constructed.
"""
# Build up our mapping of action to method
self.wsgi_actions = dict((k, getattr(self, k))
for k in self._wsgi_actions)
self.wsgi_extensions = dict((k, [getattr(self, k)])
for k in self._wsgi_extensions)
# Storage place for method descriptors
self.wsgi_descriptors = {}
# Save the mapper
self.wsgi_mapper = mapper
# Set up our routes
if mapper:
for action, route in self.wsgi_actions.items():
self._route(action, route) | [
"def",
"__init__",
"(",
"self",
",",
"mapper",
"=",
"None",
")",
":",
"self",
".",
"wsgi_actions",
"=",
"dict",
"(",
"(",
"k",
",",
"getattr",
"(",
"self",
",",
"k",
")",
")",
"for",
"k",
"in",
"self",
".",
"_wsgi_actions",
")",
"self",
".",
"wsgi_extensions",
"=",
"dict",
"(",
"(",
"k",
",",
"[",
"getattr",
"(",
"self",
",",
"k",
")",
"]",
")",
"for",
"k",
"in",
"self",
".",
"_wsgi_extensions",
")",
"self",
".",
"wsgi_descriptors",
"=",
"{",
"}",
"self",
".",
"wsgi_mapper",
"=",
"mapper",
"if",
"mapper",
":",
"for",
"action",
",",
"route",
"in",
"self",
".",
"wsgi_actions",
".",
"items",
"(",
")",
":",
"self",
".",
"_route",
"(",
"action",
",",
"route",
")"
] | python | Initialize a Controller subclass. The `mapper` argument is
used by the Application class to specify the Routes mapper
being constructed. | false |
2,057,524 | def move_right_down(self, action=None, channel=0,
vertical_speed=1, horizontal_speed=1):
"""
Params:
action - start or stop
channel - channel number
vertical_speed - range is 1-8
horizontal_speed - range is 1-8
"""
ret = self.command(
'ptz.cgi?action={0}&channel={1}&code=RightDown&arg1=0'
'&arg2={2}&arg3=0'.format(action, channel, vertical_speed)
)
return ret.content.decode('utf-8') | [
"def",
"move_right_down",
"(",
"self",
",",
"action",
"=",
"None",
",",
"channel",
"=",
"0",
",",
"vertical_speed",
"=",
"1",
",",
"horizontal_speed",
"=",
"1",
")",
":",
"ret",
"=",
"self",
".",
"command",
"(",
"'ptz.cgi?action={0}&channel={1}&code=RightDown&arg1=0'",
"'&arg2={2}&arg3=0'",
".",
"format",
"(",
"action",
",",
"channel",
",",
"vertical_speed",
")",
")",
"return",
"ret",
".",
"content",
".",
"decode",
"(",
"'utf-8'",
")"
] | python | Params:
action - start or stop
channel - channel number
vertical_speed - range is 1-8
horizontal_speed - range is 1-8 | false |
1,923,194 | def __init__(self, Tb=None, Tc=None, Pc=None, omega=None, CASRN='',
eos=None):
self.CASRN = CASRN
self.Tb = Tb
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.eos = eos
self.Tmin = None
'''Minimum temperature at which no method can calculate vapor pressure
under.'''
self.Tmax = None
'''Maximum temperature at which no method can calculate vapor pressure
above; by definition the critical point.'''
self.method = None
'''The method was which was last used successfully to calculate a property;
set only after the first property calculation.'''
self.tabular_data = {}
'''tabular_data, dict: Stored (Ts, properties) for any
tabular data; indexed by provided or autogenerated name.'''
self.tabular_data_interpolators = {}
'''tabular_data_interpolators, dict: Stored (extrapolator,
spline) tuples which are interp1d instances for each set of tabular
data; indexed by tuple of (name, interpolation_T,
interpolation_property, interpolation_property_inv) to ensure that
if an interpolation transform is altered, the old interpolator which
had been created is no longer used.'''
self.sorted_valid_methods = []
'''sorted_valid_methods, list: Stored methods which were found valid
at a specific temperature; set by `T_dependent_property`.'''
self.user_methods = []
'''user_methods, list: Stored methods which were specified by the user
in a ranked order of preference; set by `T_dependent_property`.'''
self.all_methods = set()
'''Set of all methods available for a given CASRN and properties;
filled by :obj:`load_all_methods`.'''
self.load_all_methods() | [
"def",
"__init__",
"(",
"self",
",",
"Tb",
"=",
"None",
",",
"Tc",
"=",
"None",
",",
"Pc",
"=",
"None",
",",
"omega",
"=",
"None",
",",
"CASRN",
"=",
"''",
",",
"eos",
"=",
"None",
")",
":",
"self",
".",
"CASRN",
"=",
"CASRN",
"self",
".",
"Tb",
"=",
"Tb",
"self",
".",
"Tc",
"=",
"Tc",
"self",
".",
"Pc",
"=",
"Pc",
"self",
".",
"omega",
"=",
"omega",
"self",
".",
"eos",
"=",
"eos",
"self",
".",
"Tmin",
"=",
"None",
"self",
".",
"Tmax",
"=",
"None",
"self",
".",
"method",
"=",
"None",
"self",
".",
"tabular_data",
"=",
"{",
"}",
"self",
".",
"tabular_data_interpolators",
"=",
"{",
"}",
"self",
".",
"sorted_valid_methods",
"=",
"[",
"]",
"self",
".",
"user_methods",
"=",
"[",
"]",
"self",
".",
"all_methods",
"=",
"set",
"(",
")",
"self",
".",
"load_all_methods",
"(",
")"
] | python | Minimum temperature at which no method can calculate vapor pressure
under. | false |
2,707,472 | def item_related_name(self):
"""
The ManyToMany field on the item class pointing to this class.
If there is more than one field, this value will be None.
"""
if not hasattr(self, '_item_related_name'):
many_to_many_rels = \
get_section_many_to_many_relations(self.__class__)
if len(many_to_many_rels) != 1:
self._item_related_name = None
else:
self._item_related_name = many_to_many_rels[0].field.name
return self._item_related_name | [
"def",
"item_related_name",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_item_related_name'",
")",
":",
"many_to_many_rels",
"=",
"get_section_many_to_many_relations",
"(",
"self",
".",
"__class__",
")",
"if",
"len",
"(",
"many_to_many_rels",
")",
"!=",
"1",
":",
"self",
".",
"_item_related_name",
"=",
"None",
"else",
":",
"self",
".",
"_item_related_name",
"=",
"many_to_many_rels",
"[",
"0",
"]",
".",
"field",
".",
"name",
"return",
"self",
".",
"_item_related_name"
] | python | The ManyToMany field on the item class pointing to this class.
If there is more than one field, this value will be None. | false |
1,734,678 | def annotate(self, sent):
"""Annotate a squence of words with entity tags.
Args:
sent: sequence of strings/words.
"""
preds = []
words = []
for word, fv in self.sent2examples(sent):
probs = self.predictor(fv)
tags = probs.argsort()
tag = self.ID_TAG[tags[-1]]
words.append(word)
preds.append(tag)
# fix_chunks(preds)
annotations = zip(words, preds)
return annotations | [
"def",
"annotate",
"(",
"self",
",",
"sent",
")",
":",
"preds",
"=",
"[",
"]",
"words",
"=",
"[",
"]",
"for",
"word",
",",
"fv",
"in",
"self",
".",
"sent2examples",
"(",
"sent",
")",
":",
"probs",
"=",
"self",
".",
"predictor",
"(",
"fv",
")",
"tags",
"=",
"probs",
".",
"argsort",
"(",
")",
"tag",
"=",
"self",
".",
"ID_TAG",
"[",
"tags",
"[",
"-",
"1",
"]",
"]",
"words",
".",
"append",
"(",
"word",
")",
"preds",
".",
"append",
"(",
"tag",
")",
"annotations",
"=",
"zip",
"(",
"words",
",",
"preds",
")",
"return",
"annotations"
] | python | Annotate a squence of words with entity tags.
Args:
sent: sequence of strings/words. | false |
2,065,623 | def toHdlConversion(self, top, topName: str, saveTo: str) -> List[str]:
"""
:param top: object which is represenation of design
:param topName: name which should be used for ipcore
:param saveTo: path of directory where generated files should be stored
:return: list of file namens in correct compile order
"""
return toRtl(top,
saveTo=saveTo,
name=topName,
serializer=self.serializer,
targetPlatform=self.targetPlatform) | [
"def",
"toHdlConversion",
"(",
"self",
",",
"top",
",",
"topName",
":",
"str",
",",
"saveTo",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"toRtl",
"(",
"top",
",",
"saveTo",
"=",
"saveTo",
",",
"name",
"=",
"topName",
",",
"serializer",
"=",
"self",
".",
"serializer",
",",
"targetPlatform",
"=",
"self",
".",
"targetPlatform",
")"
] | python | :param top: object which is represenation of design
:param topName: name which should be used for ipcore
:param saveTo: path of directory where generated files should be stored
:return: list of file namens in correct compile order | false |
1,834,817 | def kl_apply(par_file, basis_file,par_to_file_dict,arr_shape):
""" Applies a KL parameterization transform from basis factors to model
input arrays. Companion function to kl_setup()
Parameters
----------
par_file : str
the csv file to get factor values from. Must contain
the following columns: name, new_val, org_val
basis_file : str
the binary file that contains the reduced basis
par_to_file_dict : dict
a mapping from KL parameter prefixes to array file names.
Note
----
This is the companion function to kl_setup.
This function should be called during the forward run
Example
-------
``>>>import pyemu``
``>>>pyemu.helpers.kl_apply("kl.dat","basis.dat",{"hk":"hk_layer_1.dat",(100,100))``
"""
df = pd.read_csv(par_file)
assert "name" in df.columns
assert "org_val" in df.columns
assert "new_val" in df.columns
df.loc[:,"prefix"] = df.name.apply(lambda x: x[:-4])
for prefix in df.prefix.unique():
assert prefix in par_to_file_dict.keys(),"missing prefix:{0}".\
format(prefix)
basis = pyemu.Matrix.from_binary(basis_file)
assert basis.shape[1] == arr_shape[0] * arr_shape[1]
arr_min = 1.0e-10 # a temp hack
#means = df.loc[df.name.apply(lambda x: x.endswith("mean")),:]
#print(means)
df = df.loc[df.name.apply(lambda x: not x.endswith("mean")),:]
for prefix,filename in par_to_file_dict.items():
factors = pyemu.Matrix.from_dataframe(df.loc[df.prefix==prefix,["new_val"]])
factors.autoalign = False
basis_prefix = basis[:factors.shape[0],:]
arr = (factors.T * basis_prefix).x.reshape(arr_shape)
#arr += means.loc[means.prefix==prefix,"new_val"].values
arr[arr<arr_min] = arr_min
np.savetxt(filename,arr,fmt="%20.8E") | [
"def",
"kl_apply",
"(",
"par_file",
",",
"basis_file",
",",
"par_to_file_dict",
",",
"arr_shape",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"par_file",
")",
"assert",
"\"name\"",
"in",
"df",
".",
"columns",
"assert",
"\"org_val\"",
"in",
"df",
".",
"columns",
"assert",
"\"new_val\"",
"in",
"df",
".",
"columns",
"df",
".",
"loc",
"[",
":",
",",
"\"prefix\"",
"]",
"=",
"df",
".",
"name",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"[",
":",
"-",
"4",
"]",
")",
"for",
"prefix",
"in",
"df",
".",
"prefix",
".",
"unique",
"(",
")",
":",
"assert",
"prefix",
"in",
"par_to_file_dict",
".",
"keys",
"(",
")",
",",
"\"missing prefix:{0}\"",
".",
"format",
"(",
"prefix",
")",
"basis",
"=",
"pyemu",
".",
"Matrix",
".",
"from_binary",
"(",
"basis_file",
")",
"assert",
"basis",
".",
"shape",
"[",
"1",
"]",
"==",
"arr_shape",
"[",
"0",
"]",
"*",
"arr_shape",
"[",
"1",
"]",
"arr_min",
"=",
"1.0e-10",
"df",
"=",
"df",
".",
"loc",
"[",
"df",
".",
"name",
".",
"apply",
"(",
"lambda",
"x",
":",
"not",
"x",
".",
"endswith",
"(",
"\"mean\"",
")",
")",
",",
":",
"]",
"for",
"prefix",
",",
"filename",
"in",
"par_to_file_dict",
".",
"items",
"(",
")",
":",
"factors",
"=",
"pyemu",
".",
"Matrix",
".",
"from_dataframe",
"(",
"df",
".",
"loc",
"[",
"df",
".",
"prefix",
"==",
"prefix",
",",
"[",
"\"new_val\"",
"]",
"]",
")",
"factors",
".",
"autoalign",
"=",
"False",
"basis_prefix",
"=",
"basis",
"[",
":",
"factors",
".",
"shape",
"[",
"0",
"]",
",",
":",
"]",
"arr",
"=",
"(",
"factors",
".",
"T",
"*",
"basis_prefix",
")",
".",
"x",
".",
"reshape",
"(",
"arr_shape",
")",
"arr",
"[",
"arr",
"<",
"arr_min",
"]",
"=",
"arr_min",
"np",
".",
"savetxt",
"(",
"filename",
",",
"arr",
",",
"fmt",
"=",
"\"%20.8E\"",
")"
] | python | Applies a KL parameterization transform from basis factors to model
input arrays. Companion function to kl_setup()
Parameters
----------
par_file : str
the csv file to get factor values from. Must contain
the following columns: name, new_val, org_val
basis_file : str
the binary file that contains the reduced basis
par_to_file_dict : dict
a mapping from KL parameter prefixes to array file names.
Note
----
This is the companion function to kl_setup.
This function should be called during the forward run
Example
-------
``>>>import pyemu``
``>>>pyemu.helpers.kl_apply("kl.dat","basis.dat",{"hk":"hk_layer_1.dat",(100,100))`` | false |
2,629,156 | def get_reports(self):
"""
Retrieve all reports submitted for this Sample.
:return: A list of :class:`.Report`
"""
url = '{}reports/'.format(self.url)
return Report._get_list_from_url(url, append_base_url=False) | [
"def",
"get_reports",
"(",
"self",
")",
":",
"url",
"=",
"'{}reports/'",
".",
"format",
"(",
"self",
".",
"url",
")",
"return",
"Report",
".",
"_get_list_from_url",
"(",
"url",
",",
"append_base_url",
"=",
"False",
")"
] | python | Retrieve all reports submitted for this Sample.
:return: A list of :class:`.Report` | false |
2,518,869 | def run_tfba(self, reaction):
"""Run FBA and tFBA on model."""
solver = self._get_solver(integer=True)
p = fluxanalysis.FluxBalanceProblem(self._mm, solver)
start_time = time.time()
p.add_thermodynamic()
try:
p.maximize(reaction)
except fluxanalysis.FluxBalanceError as e:
self.report_flux_balance_error(e)
logger.info('Solving took {:.2f} seconds'.format(
time.time() - start_time))
for reaction_id in self._mm.reactions:
yield reaction_id, p.get_flux(reaction_id) | [
"def",
"run_tfba",
"(",
"self",
",",
"reaction",
")",
":",
"solver",
"=",
"self",
".",
"_get_solver",
"(",
"integer",
"=",
"True",
")",
"p",
"=",
"fluxanalysis",
".",
"FluxBalanceProblem",
"(",
"self",
".",
"_mm",
",",
"solver",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"p",
".",
"add_thermodynamic",
"(",
")",
"try",
":",
"p",
".",
"maximize",
"(",
"reaction",
")",
"except",
"fluxanalysis",
".",
"FluxBalanceError",
"as",
"e",
":",
"self",
".",
"report_flux_balance_error",
"(",
"e",
")",
"logger",
".",
"info",
"(",
"'Solving took {:.2f} seconds'",
".",
"format",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
"for",
"reaction_id",
"in",
"self",
".",
"_mm",
".",
"reactions",
":",
"yield",
"reaction_id",
",",
"p",
".",
"get_flux",
"(",
"reaction_id",
")"
] | python | Run FBA and tFBA on model. | false |
1,751,446 | def echo_worker(self):
""" The `echo_worker` works through the `self.received_transfers` queue and spawns
`self.on_transfer` greenlets for all not-yet-seen transfers. """
log.debug('echo worker', qsize=self.received_transfers.qsize())
while self.stop_signal is None:
if self.received_transfers.qsize() > 0:
transfer = self.received_transfers.get()
if transfer in self.seen_transfers:
log.debug(
'duplicate transfer ignored',
initiator=pex(transfer.initiator),
amount=transfer.amount,
identifier=transfer.identifier,
)
else:
self.seen_transfers.append(transfer)
self.greenlets.add(gevent.spawn(self.on_transfer, transfer))
else:
gevent.sleep(.5) | [
"def",
"echo_worker",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"'echo worker'",
",",
"qsize",
"=",
"self",
".",
"received_transfers",
".",
"qsize",
"(",
")",
")",
"while",
"self",
".",
"stop_signal",
"is",
"None",
":",
"if",
"self",
".",
"received_transfers",
".",
"qsize",
"(",
")",
">",
"0",
":",
"transfer",
"=",
"self",
".",
"received_transfers",
".",
"get",
"(",
")",
"if",
"transfer",
"in",
"self",
".",
"seen_transfers",
":",
"log",
".",
"debug",
"(",
"'duplicate transfer ignored'",
",",
"initiator",
"=",
"pex",
"(",
"transfer",
".",
"initiator",
")",
",",
"amount",
"=",
"transfer",
".",
"amount",
",",
"identifier",
"=",
"transfer",
".",
"identifier",
",",
")",
"else",
":",
"self",
".",
"seen_transfers",
".",
"append",
"(",
"transfer",
")",
"self",
".",
"greenlets",
".",
"add",
"(",
"gevent",
".",
"spawn",
"(",
"self",
".",
"on_transfer",
",",
"transfer",
")",
")",
"else",
":",
"gevent",
".",
"sleep",
"(",
".5",
")"
] | python | The `echo_worker` works through the `self.received_transfers` queue and spawns
`self.on_transfer` greenlets for all not-yet-seen transfers. | false |
2,591,076 | def set_aad_metadata(uri, resource, client):
"""Set AAD metadata."""
set_config_value('authority_uri', uri)
set_config_value('aad_resource', resource)
set_config_value('aad_client', client) | [
"def",
"set_aad_metadata",
"(",
"uri",
",",
"resource",
",",
"client",
")",
":",
"set_config_value",
"(",
"'authority_uri'",
",",
"uri",
")",
"set_config_value",
"(",
"'aad_resource'",
",",
"resource",
")",
"set_config_value",
"(",
"'aad_client'",
",",
"client",
")"
] | python | Set AAD metadata. | false |
2,150,094 | def checker_for_type(t):
"""
Return "checker" function for the given type `t`.
This checker function will accept a single argument (of any type), and
return True if the argument matches type `t`, or False otherwise. For
example:
chkr = checker_for_type(int)
assert chkr.check(123) is True
assert chkr.check("5") is False
"""
try:
if t is True:
return true_checker
if t is False:
return false_checker
checker = memoized_type_checkers.get(t)
if checker is not None:
return checker
hashable = True
except TypeError:
# Exception may be raised if `t` is not hashable (e.g. a dict)
hashable = False
# The type checker needs to be created
checker = _create_checker_for_type(t)
if hashable:
memoized_type_checkers[t] = checker
return checker | [
"def",
"checker_for_type",
"(",
"t",
")",
":",
"try",
":",
"if",
"t",
"is",
"True",
":",
"return",
"true_checker",
"if",
"t",
"is",
"False",
":",
"return",
"false_checker",
"checker",
"=",
"memoized_type_checkers",
".",
"get",
"(",
"t",
")",
"if",
"checker",
"is",
"not",
"None",
":",
"return",
"checker",
"hashable",
"=",
"True",
"except",
"TypeError",
":",
"hashable",
"=",
"False",
"checker",
"=",
"_create_checker_for_type",
"(",
"t",
")",
"if",
"hashable",
":",
"memoized_type_checkers",
"[",
"t",
"]",
"=",
"checker",
"return",
"checker"
] | python | Return "checker" function for the given type `t`.
This checker function will accept a single argument (of any type), and
return True if the argument matches type `t`, or False otherwise. For
example:
chkr = checker_for_type(int)
assert chkr.check(123) is True
assert chkr.check("5") is False | false |
1,981,528 | def functions_to_table(mod, colwidth=[27, 48]):
r"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths.
"""
temp = mod.__dir__()
funcs = [i for i in temp if not i[0].startswith('_')]
funcs.sort()
row = '+' + '-'*colwidth[0] + '+' + '-'*colwidth[1] + '+'
fmt = '{0:1s} {1:' + str(colwidth[0]-2) + 's} {2:1s} {3:' \
+ str(colwidth[1]-2) + 's} {4:1s}'
lines = []
lines.append(row)
lines.append(fmt.format('|', 'Method', '|', 'Description', '|'))
lines.append(row.replace('-', '='))
for i, item in enumerate(funcs):
try:
s = getattr(mod, item).__doc__.strip()
end = s.find('\n')
if end > colwidth[1] - 2:
s = s[:colwidth[1] - 5] + '...'
lines.append(fmt.format('|', item, '|', s[:end], '|'))
lines.append(row)
except AttributeError:
pass
s = '\n'.join(lines)
return s | [
"def",
"functions_to_table",
"(",
"mod",
",",
"colwidth",
"=",
"[",
"27",
",",
"48",
"]",
")",
":",
"temp",
"=",
"mod",
".",
"__dir__",
"(",
")",
"funcs",
"=",
"[",
"i",
"for",
"i",
"in",
"temp",
"if",
"not",
"i",
"[",
"0",
"]",
".",
"startswith",
"(",
"'_'",
")",
"]",
"funcs",
".",
"sort",
"(",
")",
"row",
"=",
"'+'",
"+",
"'-'",
"*",
"colwidth",
"[",
"0",
"]",
"+",
"'+'",
"+",
"'-'",
"*",
"colwidth",
"[",
"1",
"]",
"+",
"'+'",
"fmt",
"=",
"'{0:1s} {1:'",
"+",
"str",
"(",
"colwidth",
"[",
"0",
"]",
"-",
"2",
")",
"+",
"'s} {2:1s} {3:'",
"+",
"str",
"(",
"colwidth",
"[",
"1",
"]",
"-",
"2",
")",
"+",
"'s} {4:1s}'",
"lines",
"=",
"[",
"]",
"lines",
".",
"append",
"(",
"row",
")",
"lines",
".",
"append",
"(",
"fmt",
".",
"format",
"(",
"'|'",
",",
"'Method'",
",",
"'|'",
",",
"'Description'",
",",
"'|'",
")",
")",
"lines",
".",
"append",
"(",
"row",
".",
"replace",
"(",
"'-'",
",",
"'='",
")",
")",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"funcs",
")",
":",
"try",
":",
"s",
"=",
"getattr",
"(",
"mod",
",",
"item",
")",
".",
"__doc__",
".",
"strip",
"(",
")",
"end",
"=",
"s",
".",
"find",
"(",
"'\\n'",
")",
"if",
"end",
">",
"colwidth",
"[",
"1",
"]",
"-",
"2",
":",
"s",
"=",
"s",
"[",
":",
"colwidth",
"[",
"1",
"]",
"-",
"5",
"]",
"+",
"'...'",
"lines",
".",
"append",
"(",
"fmt",
".",
"format",
"(",
"'|'",
",",
"item",
",",
"'|'",
",",
"s",
"[",
":",
"end",
"]",
",",
"'|'",
")",
")",
"lines",
".",
"append",
"(",
"row",
")",
"except",
"AttributeError",
":",
"pass",
"s",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
")",
"return",
"s"
] | python | r"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths. | false |
2,050,331 | def change_attributes(self, bounds, radii, colors):
"""Reinitialize the buffers, to accomodate the new
attributes. This is used to change the number of cylinders to
be displayed.
"""
self.n_cylinders = len(bounds)
self.is_empty = True if self.n_cylinders == 0 else False
if self.is_empty:
self.bounds = bounds
self.radii = radii
self.colors = colors
return # Do nothing
# We pass the starting position 8 times, and each of these has
# a mapping to the bounding box corner.
self.bounds = np.array(bounds, dtype='float32')
vertices, directions = self._gen_bounds(self.bounds)
self.radii = np.array(radii, dtype='float32')
prim_radii = self._gen_radii(self.radii)
self.colors = np.array(colors, dtype='uint8')
prim_colors = self._gen_colors(self.colors)
local = np.array([
# First face -- front
0.0, 0.0, 0.0,
0.0, 1.0, 0.0,
1.0, 1.0, 0.0,
0.0, 0.0, 0.0,
1.0, 1.0, 0.0,
1.0, 0.0, 0.0,
# Second face -- back
0.0, 0.0, 1.0,
0.0, 1.0, 1.0,
1.0, 1.0, 1.0,
0.0, 0.0, 1.0,
1.0, 1.0, 1.0,
1.0, 0.0, 1.0,
# Third face -- left
0.0, 0.0, 0.0,
0.0, 0.0, 1.0,
0.0, 1.0, 1.0,
0.0, 0.0, 0.0,
0.0, 1.0, 1.0,
0.0, 1.0, 0.0,
# Fourth face -- right
1.0, 0.0, 0.0,
1.0, 0.0, 1.0,
1.0, 1.0, 1.0,
1.0, 0.0, 0.0,
1.0, 1.0, 1.0,
1.0, 1.0, 0.0,
# Fifth face -- up
0.0, 1.0, 0.0,
0.0, 1.0, 1.0,
1.0, 1.0, 1.0,
0.0, 1.0, 0.0,
1.0, 1.0, 1.0,
1.0, 1.0, 0.0,
# Sixth face -- down
0.0, 0.0, 0.0,
0.0, 0.0, 1.0,
1.0, 0.0, 1.0,
0.0, 0.0, 0.0,
1.0, 0.0, 1.0,
1.0, 0.0, 0.0,
]).astype('float32')
local = np.tile(local, self.n_cylinders)
self._verts_vbo = VertexBuffer(vertices,GL_DYNAMIC_DRAW)
self._directions_vbo = VertexBuffer(directions, GL_DYNAMIC_DRAW)
self._local_vbo = VertexBuffer(local,GL_DYNAMIC_DRAW)
self._color_vbo = VertexBuffer(prim_colors, GL_DYNAMIC_DRAW)
self._radii_vbo = VertexBuffer(prim_radii, GL_DYNAMIC_DRAW) | [
"def",
"change_attributes",
"(",
"self",
",",
"bounds",
",",
"radii",
",",
"colors",
")",
":",
"self",
".",
"n_cylinders",
"=",
"len",
"(",
"bounds",
")",
"self",
".",
"is_empty",
"=",
"True",
"if",
"self",
".",
"n_cylinders",
"==",
"0",
"else",
"False",
"if",
"self",
".",
"is_empty",
":",
"self",
".",
"bounds",
"=",
"bounds",
"self",
".",
"radii",
"=",
"radii",
"self",
".",
"colors",
"=",
"colors",
"return",
"self",
".",
"bounds",
"=",
"np",
".",
"array",
"(",
"bounds",
",",
"dtype",
"=",
"'float32'",
")",
"vertices",
",",
"directions",
"=",
"self",
".",
"_gen_bounds",
"(",
"self",
".",
"bounds",
")",
"self",
".",
"radii",
"=",
"np",
".",
"array",
"(",
"radii",
",",
"dtype",
"=",
"'float32'",
")",
"prim_radii",
"=",
"self",
".",
"_gen_radii",
"(",
"self",
".",
"radii",
")",
"self",
".",
"colors",
"=",
"np",
".",
"array",
"(",
"colors",
",",
"dtype",
"=",
"'uint8'",
")",
"prim_colors",
"=",
"self",
".",
"_gen_colors",
"(",
"self",
".",
"colors",
")",
"local",
"=",
"np",
".",
"array",
"(",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"]",
")",
".",
"astype",
"(",
"'float32'",
")",
"local",
"=",
"np",
".",
"tile",
"(",
"local",
",",
"self",
".",
"n_cylinders",
")",
"self",
".",
"_verts_vbo",
"=",
"VertexBuffer",
"(",
"vertices",
",",
"GL_DYNAMIC_DRAW",
")",
"self",
".",
"_directions_vbo",
"=",
"VertexBuffer",
"(",
"directions",
",",
"GL_DYNAMIC_DRAW",
")",
"self",
".",
"_local_vbo",
"=",
"VertexBuffer",
"(",
"local",
",",
"GL_DYNAMIC_DRAW",
")",
"self",
".",
"_color_vbo",
"=",
"VertexBuffer",
"(",
"prim_colors",
",",
"GL_DYNAMIC_DRAW",
")",
"self",
".",
"_radii_vbo",
"=",
"VertexBuffer",
"(",
"prim_radii",
",",
"GL_DYNAMIC_DRAW",
")"
] | python | Reinitialize the buffers, to accomodate the new
attributes. This is used to change the number of cylinders to
be displayed. | false |
2,523,862 | def parse_changesets(text):
"""
Returns dictionary with *start*, *main* and *end* ids.
Examples::
>>> parse_changesets('aaabbb')
{'start': None, 'main': 'aaabbb', 'end': None}
>>> parse_changesets('aaabbb..cccddd')
{'start': 'aaabbb', 'main': None, 'end': 'cccddd'}
"""
text = text.strip()
CID_RE = r'[a-zA-Z0-9]+'
if not '..' in text:
m = re.match(r'^(?P<cid>%s)$' % CID_RE, text)
if m:
return {
'start': None,
'main': text,
'end': None,
}
else:
RE = r'^(?P<start>%s)?\.{2,3}(?P<end>%s)?$' % (CID_RE, CID_RE)
m = re.match(RE, text)
if m:
result = m.groupdict()
result['main'] = None
return result
raise ValueError("IDs not recognized") | [
"def",
"parse_changesets",
"(",
"text",
")",
":",
"text",
"=",
"text",
".",
"strip",
"(",
")",
"CID_RE",
"=",
"r'[a-zA-Z0-9]+'",
"if",
"not",
"'..'",
"in",
"text",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r'^(?P<cid>%s)$'",
"%",
"CID_RE",
",",
"text",
")",
"if",
"m",
":",
"return",
"{",
"'start'",
":",
"None",
",",
"'main'",
":",
"text",
",",
"'end'",
":",
"None",
",",
"}",
"else",
":",
"RE",
"=",
"r'^(?P<start>%s)?\\.{2,3}(?P<end>%s)?$'",
"%",
"(",
"CID_RE",
",",
"CID_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"RE",
",",
"text",
")",
"if",
"m",
":",
"result",
"=",
"m",
".",
"groupdict",
"(",
")",
"result",
"[",
"'main'",
"]",
"=",
"None",
"return",
"result",
"raise",
"ValueError",
"(",
"\"IDs not recognized\"",
")"
] | python | Returns dictionary with *start*, *main* and *end* ids.
Examples::
>>> parse_changesets('aaabbb')
{'start': None, 'main': 'aaabbb', 'end': None}
>>> parse_changesets('aaabbb..cccddd')
{'start': 'aaabbb', 'main': None, 'end': 'cccddd'} | false |
2,103,875 | def hex_escape_str(original_str):
"""
Function to make sure we can generate proper string reports.
If character is not printable, call repr for that character.
Finally join the result.
:param original_str: Original fail reason as string.
:return: string
"""
new = []
for char in original_str:
if str(char) in string.printable:
new.append(str(char))
continue
if IS_PYTHON3:
new.append(str(char).encode("unicode_escape").decode("ascii"))
else:
new.append(repr(char).replace("'", ""))
return "".join(new) | [
"def",
"hex_escape_str",
"(",
"original_str",
")",
":",
"new",
"=",
"[",
"]",
"for",
"char",
"in",
"original_str",
":",
"if",
"str",
"(",
"char",
")",
"in",
"string",
".",
"printable",
":",
"new",
".",
"append",
"(",
"str",
"(",
"char",
")",
")",
"continue",
"if",
"IS_PYTHON3",
":",
"new",
".",
"append",
"(",
"str",
"(",
"char",
")",
".",
"encode",
"(",
"\"unicode_escape\"",
")",
".",
"decode",
"(",
"\"ascii\"",
")",
")",
"else",
":",
"new",
".",
"append",
"(",
"repr",
"(",
"char",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
")",
"return",
"\"\"",
".",
"join",
"(",
"new",
")"
] | python | Function to make sure we can generate proper string reports.
If character is not printable, call repr for that character.
Finally join the result.
:param original_str: Original fail reason as string.
:return: string | false |
2,376,250 | def is_error_of_type(exc, ref_type):
"""
Helper function to determine if some exception is of some type, by also looking at its declared __cause__
:param exc:
:param ref_type:
:return:
"""
if isinstance(exc, ref_type):
return True
elif hasattr(exc, '__cause__') and exc.__cause__ is not None:
return is_error_of_type(exc.__cause__, ref_type) | [
"def",
"is_error_of_type",
"(",
"exc",
",",
"ref_type",
")",
":",
"if",
"isinstance",
"(",
"exc",
",",
"ref_type",
")",
":",
"return",
"True",
"elif",
"hasattr",
"(",
"exc",
",",
"'__cause__'",
")",
"and",
"exc",
".",
"__cause__",
"is",
"not",
"None",
":",
"return",
"is_error_of_type",
"(",
"exc",
".",
"__cause__",
",",
"ref_type",
")"
] | python | Helper function to determine if some exception is of some type, by also looking at its declared __cause__
:param exc:
:param ref_type:
:return: | false |
2,639,553 | def get_context(namespace, context_id):
"""Get stored context object."""
context_obj = get_state(context_id, namespace=namespace)
if not context_obj:
raise ContextError("Context '{}' not found in namespace '{}'".format(
context_id, namespace))
return context_obj | [
"def",
"get_context",
"(",
"namespace",
",",
"context_id",
")",
":",
"context_obj",
"=",
"get_state",
"(",
"context_id",
",",
"namespace",
"=",
"namespace",
")",
"if",
"not",
"context_obj",
":",
"raise",
"ContextError",
"(",
"\"Context '{}' not found in namespace '{}'\"",
".",
"format",
"(",
"context_id",
",",
"namespace",
")",
")",
"return",
"context_obj"
] | python | Get stored context object. | false |
1,577,227 | def join_host_port(host, port):
"""Joins a hostname and port together.
This is a minimal implementation intended to cope with IPv6 literals. For
example, _join_host_port('::1', 80) == '[::1]:80'.
:Args:
- host - A hostname.
- port - An integer port.
"""
if ':' in host and not host.startswith('['):
return '[%s]:%d' % (host, port)
return '%s:%d' % (host, port) | [
"def",
"join_host_port",
"(",
"host",
",",
"port",
")",
":",
"if",
"':'",
"in",
"host",
"and",
"not",
"host",
".",
"startswith",
"(",
"'['",
")",
":",
"return",
"'[%s]:%d'",
"%",
"(",
"host",
",",
"port",
")",
"return",
"'%s:%d'",
"%",
"(",
"host",
",",
"port",
")"
] | python | Joins a hostname and port together.
This is a minimal implementation intended to cope with IPv6 literals. For
example, _join_host_port('::1', 80) == '[::1]:80'.
:Args:
- host - A hostname.
- port - An integer port. | false |
1,772,356 | def formatted_ghost_file(self):
"""
Returns a properly formatted ghost file name.
:returns: formatted ghost_file name (string)
"""
# replace specials characters in 'drive:\filename' in Linux and Dynamips in MS Windows or viceversa.
ghost_file = "{}-{}.ghost".format(os.path.basename(self._image), self._ram)
ghost_file = ghost_file.replace('\\', '-').replace('/', '-').replace(':', '-')
return ghost_file | [
"def",
"formatted_ghost_file",
"(",
"self",
")",
":",
"ghost_file",
"=",
"\"{}-{}.ghost\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"_image",
")",
",",
"self",
".",
"_ram",
")",
"ghost_file",
"=",
"ghost_file",
".",
"replace",
"(",
"'\\\\'",
",",
"'-'",
")",
".",
"replace",
"(",
"'/'",
",",
"'-'",
")",
".",
"replace",
"(",
"':'",
",",
"'-'",
")",
"return",
"ghost_file"
] | python | Returns a properly formatted ghost file name.
:returns: formatted ghost_file name (string) | false |
1,975,569 | def acctradinginfo_query(self, order_type, code, price, order_id=None, adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0):
"""
查询账户下最大可买卖数量
:param order_type: 订单类型,参见OrderType
:param code: 证券代码,例如'HK.00700'
:param price: 报价,3位精度
:param order_id: 订单号。如果是新下单,则可以传None。如果是改单则要传单号。
:param adjust_limit: 调整方向和调整幅度百分比限制,正数代表向上调整,负数代表向下调整,具体值代表调整幅度限制,如:0.015代表向上调整且幅度不超过1.5%;-0.01代表向下调整且幅度不超过1%。默认0表示不调整
:param trd_env: 交易环境,参见TrdEnv
:param acc_id: 业务账号,默认0表示第1个
:param acc_index: int,交易业务子账户ID列表所对应的下标,默认0,表示第1个业务ID
:return: (ret, data)
ret == RET_OK, data为pd.DataFrame,数据列如下
ret != RET_OK, data为错误信息
======================= =========== ======================================================================================
参数 类型 说明
======================= =========== ======================================================================================
max_cash_buy float 不使用融资,仅自己的现金最大可买整手股数
max_cash_and_margin_buy float 使用融资,自己的现金 + 融资资金总共的最大可买整手股数
max_position_sell float 不使用融券(卖空),仅自己的持仓最大可卖整手股数
max_sell_short float 使用融券(卖空),最大可卖空整手股数,不包括多仓
max_buy_back float 卖空后,需要买回的最大整手股数。因为卖空后,必须先买回已卖空的股数,还掉股票,才能再继续买多。
======================= =========== ======================================================================================
"""
ret, msg = self._check_trd_env(trd_env)
if ret != RET_OK:
return ret, msg
ret, msg, acc_id = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)
if ret != RET_OK:
return ret, msg
ret, content = self._split_stock_code(code)
if ret != RET_OK:
return ret, content
market_str, stock_code = content
query_processor = self._get_sync_query_processor(
AccTradingInfoQuery.pack_req,
AccTradingInfoQuery.unpack_rsp)
kargs = {
'order_type': order_type,
'code': str(stock_code),
'price': price,
'order_id': order_id,
'adjust_limit': adjust_limit,
'trd_mkt': self.__trd_mkt,
'sec_mkt_str': market_str,
'trd_env': trd_env,
'acc_id': acc_id,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, data = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ['max_cash_buy', 'max_cash_and_margin_buy', 'max_position_sell', 'max_sell_short', 'max_buy_back']
acctradinginfo_table = pd.DataFrame(data, columns=col_list)
return RET_OK, acctradinginfo_table | [
"def",
"acctradinginfo_query",
"(",
"self",
",",
"order_type",
",",
"code",
",",
"price",
",",
"order_id",
"=",
"None",
",",
"adjust_limit",
"=",
"0",
",",
"trd_env",
"=",
"TrdEnv",
".",
"REAL",
",",
"acc_id",
"=",
"0",
",",
"acc_index",
"=",
"0",
")",
":",
"ret",
",",
"msg",
"=",
"self",
".",
"_check_trd_env",
"(",
"trd_env",
")",
"if",
"ret",
"!=",
"RET_OK",
":",
"return",
"ret",
",",
"msg",
"ret",
",",
"msg",
",",
"acc_id",
"=",
"self",
".",
"_check_acc_id_and_acc_index",
"(",
"trd_env",
",",
"acc_id",
",",
"acc_index",
")",
"if",
"ret",
"!=",
"RET_OK",
":",
"return",
"ret",
",",
"msg",
"ret",
",",
"content",
"=",
"self",
".",
"_split_stock_code",
"(",
"code",
")",
"if",
"ret",
"!=",
"RET_OK",
":",
"return",
"ret",
",",
"content",
"market_str",
",",
"stock_code",
"=",
"content",
"query_processor",
"=",
"self",
".",
"_get_sync_query_processor",
"(",
"AccTradingInfoQuery",
".",
"pack_req",
",",
"AccTradingInfoQuery",
".",
"unpack_rsp",
")",
"kargs",
"=",
"{",
"'order_type'",
":",
"order_type",
",",
"'code'",
":",
"str",
"(",
"stock_code",
")",
",",
"'price'",
":",
"price",
",",
"'order_id'",
":",
"order_id",
",",
"'adjust_limit'",
":",
"adjust_limit",
",",
"'trd_mkt'",
":",
"self",
".",
"__trd_mkt",
",",
"'sec_mkt_str'",
":",
"market_str",
",",
"'trd_env'",
":",
"trd_env",
",",
"'acc_id'",
":",
"acc_id",
",",
"'conn_id'",
":",
"self",
".",
"get_sync_conn_id",
"(",
")",
"}",
"ret_code",
",",
"msg",
",",
"data",
"=",
"query_processor",
"(",
"**",
"kargs",
")",
"if",
"ret_code",
"!=",
"RET_OK",
":",
"return",
"RET_ERROR",
",",
"msg",
"col_list",
"=",
"[",
"'max_cash_buy'",
",",
"'max_cash_and_margin_buy'",
",",
"'max_position_sell'",
",",
"'max_sell_short'",
",",
"'max_buy_back'",
"]",
"acctradinginfo_table",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
",",
"columns",
"=",
"col_list",
")",
"return",
"RET_OK",
",",
"acctradinginfo_table"
] | python | 查询账户下最大可买卖数量
:param order_type: 订单类型,参见OrderType
:param code: 证券代码,例如'HK.00700'
:param price: 报价,3位精度
:param order_id: 订单号。如果是新下单,则可以传None。如果是改单则要传单号。
:param adjust_limit: 调整方向和调整幅度百分比限制,正数代表向上调整,负数代表向下调整,具体值代表调整幅度限制,如:0.015代表向上调整且幅度不超过1.5%;-0.01代表向下调整且幅度不超过1%。默认0表示不调整
:param trd_env: 交易环境,参见TrdEnv
:param acc_id: 业务账号,默认0表示第1个
:param acc_index: int,交易业务子账户ID列表所对应的下标,默认0,表示第1个业务ID
:return: (ret, data)
ret == RET_OK, data为pd.DataFrame,数据列如下
ret != RET_OK, data为错误信息
======================= =========== ======================================================================================
参数 类型 说明
======================= =========== ======================================================================================
max_cash_buy float 不使用融资,仅自己的现金最大可买整手股数
max_cash_and_margin_buy float 使用融资,自己的现金 + 融资资金总共的最大可买整手股数
max_position_sell float 不使用融券(卖空),仅自己的持仓最大可卖整手股数
max_sell_short float 使用融券(卖空),最大可卖空整手股数,不包括多仓
max_buy_back float 卖空后,需要买回的最大整手股数。因为卖空后,必须先买回已卖空的股数,还掉股票,才能再继续买多。
======================= =========== ====================================================================================== | false |
1,767,070 | def ppp_value(simdata, trueval, round=3):
"""
Calculates posterior predictive p-values on data simulated from the posterior
predictive distribution, returning the quantile of the observed data relative to
simulated.
The posterior predictive p-value is computed by:
.. math:: Pr(T(y^{\text{sim}} > T(y) | y)
where T is a test statistic of interest and :math:`y^{\text{sim}}` is the simulated
data.
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
round: int
Rounding of returned quantile (defaults to 3)
"""
if ndim(trueval) == 1 and ndim(simdata == 2):
# Iterate over more than one set of data
return [post_pred_checks(simdata[:, i], trueval[i])
for i in range(len(trueval))]
return (simdata > trueval).mean() | [
"def",
"ppp_value",
"(",
"simdata",
",",
"trueval",
",",
"round",
"=",
"3",
")",
":",
"if",
"ndim",
"(",
"trueval",
")",
"==",
"1",
"and",
"ndim",
"(",
"simdata",
"==",
"2",
")",
":",
"return",
"[",
"post_pred_checks",
"(",
"simdata",
"[",
":",
",",
"i",
"]",
",",
"trueval",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"trueval",
")",
")",
"]",
"return",
"(",
"simdata",
">",
"trueval",
")",
".",
"mean",
"(",
")"
] | python | Calculates posterior predictive p-values on data simulated from the posterior
predictive distribution, returning the quantile of the observed data relative to
simulated.
The posterior predictive p-value is computed by:
.. math:: Pr(T(y^{\text{sim}} > T(y) | y)
where T is a test statistic of interest and :math:`y^{\text{sim}}` is the simulated
data.
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
round: int
Rounding of returned quantile (defaults to 3) | false |
2,487,804 | def cleanDir(self):
''' Remove existing json datafiles in the target directory. '''
if os.path.isdir(self.outdir):
baddies = ['tout.json','nout.json','hout.json']
for file in baddies:
filepath = os.path.join(self.outdir,file)
if os.path.isfile(filepath):
os.remove(filepath) | [
"def",
"cleanDir",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"outdir",
")",
":",
"baddies",
"=",
"[",
"'tout.json'",
",",
"'nout.json'",
",",
"'hout.json'",
"]",
"for",
"file",
"in",
"baddies",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"outdir",
",",
"file",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"os",
".",
"remove",
"(",
"filepath",
")"
] | python | Remove existing json datafiles in the target directory. | false |
2,062,700 | def __iter__(self):
"""
Iterates over all keys in the :class:`Map` scoped by this view's
datatype.
"""
for key in self.map.value:
name, datatype = key
if datatype == self.datatype:
yield name | [
"def",
"__iter__",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
".",
"map",
".",
"value",
":",
"name",
",",
"datatype",
"=",
"key",
"if",
"datatype",
"==",
"self",
".",
"datatype",
":",
"yield",
"name"
] | python | Iterates over all keys in the :class:`Map` scoped by this view's
datatype. | false |
2,696,418 | def convert_level(self, record):
"""Converts a logging level into a logbook level."""
level = record.levelno
if level >= logging.CRITICAL:
return levels.CRITICAL
if level >= logging.ERROR:
return levels.ERROR
if level >= logging.WARNING:
return levels.WARNING
if level >= logging.INFO:
return levels.INFO
return levels.DEBUG | [
"def",
"convert_level",
"(",
"self",
",",
"record",
")",
":",
"level",
"=",
"record",
".",
"levelno",
"if",
"level",
">=",
"logging",
".",
"CRITICAL",
":",
"return",
"levels",
".",
"CRITICAL",
"if",
"level",
">=",
"logging",
".",
"ERROR",
":",
"return",
"levels",
".",
"ERROR",
"if",
"level",
">=",
"logging",
".",
"WARNING",
":",
"return",
"levels",
".",
"WARNING",
"if",
"level",
">=",
"logging",
".",
"INFO",
":",
"return",
"levels",
".",
"INFO",
"return",
"levels",
".",
"DEBUG"
] | python | Converts a logging level into a logbook level. | false |
1,610,679 | def _normalize_cursor(self, cursor, orders):
"""Helper: convert cursor to a list of values based on orders."""
if cursor is None:
return
if not orders:
raise ValueError(_NO_ORDERS_FOR_CURSOR)
document_fields, before = cursor
order_keys = [order.field.field_path for order in orders]
if isinstance(document_fields, document.DocumentSnapshot):
snapshot = document_fields
document_fields = snapshot.to_dict()
document_fields["__name__"] = snapshot.reference
if isinstance(document_fields, dict):
# Transform to list using orders
values = []
data = document_fields
for order_key in order_keys:
try:
values.append(field_path_module.get_nested_value(order_key, data))
except KeyError:
msg = _MISSING_ORDER_BY.format(order_key, data)
raise ValueError(msg)
document_fields = values
if len(document_fields) != len(orders):
msg = _MISMATCH_CURSOR_W_ORDER_BY.format(document_fields, order_keys)
raise ValueError(msg)
_transform_bases = (transforms.Sentinel, transforms._ValueList)
for index, key_field in enumerate(zip(order_keys, document_fields)):
key, field = key_field
if isinstance(field, _transform_bases):
msg = _INVALID_CURSOR_TRANSFORM
raise ValueError(msg)
if key == "__name__" and isinstance(field, six.string_types):
document_fields[index] = self._parent.document(field)
return document_fields, before | [
"def",
"_normalize_cursor",
"(",
"self",
",",
"cursor",
",",
"orders",
")",
":",
"if",
"cursor",
"is",
"None",
":",
"return",
"if",
"not",
"orders",
":",
"raise",
"ValueError",
"(",
"_NO_ORDERS_FOR_CURSOR",
")",
"document_fields",
",",
"before",
"=",
"cursor",
"order_keys",
"=",
"[",
"order",
".",
"field",
".",
"field_path",
"for",
"order",
"in",
"orders",
"]",
"if",
"isinstance",
"(",
"document_fields",
",",
"document",
".",
"DocumentSnapshot",
")",
":",
"snapshot",
"=",
"document_fields",
"document_fields",
"=",
"snapshot",
".",
"to_dict",
"(",
")",
"document_fields",
"[",
"\"__name__\"",
"]",
"=",
"snapshot",
".",
"reference",
"if",
"isinstance",
"(",
"document_fields",
",",
"dict",
")",
":",
"values",
"=",
"[",
"]",
"data",
"=",
"document_fields",
"for",
"order_key",
"in",
"order_keys",
":",
"try",
":",
"values",
".",
"append",
"(",
"field_path_module",
".",
"get_nested_value",
"(",
"order_key",
",",
"data",
")",
")",
"except",
"KeyError",
":",
"msg",
"=",
"_MISSING_ORDER_BY",
".",
"format",
"(",
"order_key",
",",
"data",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"document_fields",
"=",
"values",
"if",
"len",
"(",
"document_fields",
")",
"!=",
"len",
"(",
"orders",
")",
":",
"msg",
"=",
"_MISMATCH_CURSOR_W_ORDER_BY",
".",
"format",
"(",
"document_fields",
",",
"order_keys",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"_transform_bases",
"=",
"(",
"transforms",
".",
"Sentinel",
",",
"transforms",
".",
"_ValueList",
")",
"for",
"index",
",",
"key_field",
"in",
"enumerate",
"(",
"zip",
"(",
"order_keys",
",",
"document_fields",
")",
")",
":",
"key",
",",
"field",
"=",
"key_field",
"if",
"isinstance",
"(",
"field",
",",
"_transform_bases",
")",
":",
"msg",
"=",
"_INVALID_CURSOR_TRANSFORM",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"key",
"==",
"\"__name__\"",
"and",
"isinstance",
"(",
"field",
",",
"six",
".",
"string_types",
")",
":",
"document_fields",
"[",
"index",
"]",
"=",
"self",
".",
"_parent",
".",
"document",
"(",
"field",
")",
"return",
"document_fields",
",",
"before"
] | python | Helper: convert cursor to a list of values based on orders. | false |
1,643,128 | def delete_snapshots(name, *names, **kwargs):
'''
Delete one or more snapshots of the given VM.
:param name: domain name
:param names: names of the snapshots to remove
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' virt.delete_snapshots <domain> all=True
salt '*' virt.delete_snapshots <domain> <snapshot>
salt '*' virt.delete_snapshots <domain> <snapshot1> <snapshot2> ...
'''
deleted = dict()
conn = __get_conn(**kwargs)
domain = _get_domain(conn, name)
for snap in domain.listAllSnapshots():
if snap.getName() in names or not names:
deleted[snap.getName()] = _parse_snapshot_description(snap)
snap.delete()
conn.close()
available = {name: [_parse_snapshot_description(snap) for snap in domain.listAllSnapshots()] or 'N/A'}
return {'available': available, 'deleted': deleted} | [
"def",
"delete_snapshots",
"(",
"name",
",",
"*",
"names",
",",
"**",
"kwargs",
")",
":",
"deleted",
"=",
"dict",
"(",
")",
"conn",
"=",
"__get_conn",
"(",
"**",
"kwargs",
")",
"domain",
"=",
"_get_domain",
"(",
"conn",
",",
"name",
")",
"for",
"snap",
"in",
"domain",
".",
"listAllSnapshots",
"(",
")",
":",
"if",
"snap",
".",
"getName",
"(",
")",
"in",
"names",
"or",
"not",
"names",
":",
"deleted",
"[",
"snap",
".",
"getName",
"(",
")",
"]",
"=",
"_parse_snapshot_description",
"(",
"snap",
")",
"snap",
".",
"delete",
"(",
")",
"conn",
".",
"close",
"(",
")",
"available",
"=",
"{",
"name",
":",
"[",
"_parse_snapshot_description",
"(",
"snap",
")",
"for",
"snap",
"in",
"domain",
".",
"listAllSnapshots",
"(",
")",
"]",
"or",
"'N/A'",
"}",
"return",
"{",
"'available'",
":",
"available",
",",
"'deleted'",
":",
"deleted",
"}"
] | python | Delete one or more snapshots of the given VM.
:param name: domain name
:param names: names of the snapshots to remove
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' virt.delete_snapshots <domain> all=True
salt '*' virt.delete_snapshots <domain> <snapshot>
salt '*' virt.delete_snapshots <domain> <snapshot1> <snapshot2> ... | false |
1,958,828 | def _Close(self):
"""Closes the file-like object."""
if self._database_object:
self._database_object.Close()
self._blob = None
self._current_offset = 0
self._size = 0
self._table_name = None | [
"def",
"_Close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_database_object",
":",
"self",
".",
"_database_object",
".",
"Close",
"(",
")",
"self",
".",
"_blob",
"=",
"None",
"self",
".",
"_current_offset",
"=",
"0",
"self",
".",
"_size",
"=",
"0",
"self",
".",
"_table_name",
"=",
"None"
] | python | Closes the file-like object. | false |
1,597,260 | def _validate_max(self, max_value, field, value):
""" {'nullable': False } """
try:
if value > max_value:
self._error(field, errors.MAX_VALUE)
except TypeError:
pass | [
"def",
"_validate_max",
"(",
"self",
",",
"max_value",
",",
"field",
",",
"value",
")",
":",
"try",
":",
"if",
"value",
">",
"max_value",
":",
"self",
".",
"_error",
"(",
"field",
",",
"errors",
".",
"MAX_VALUE",
")",
"except",
"TypeError",
":",
"pass"
] | python | {'nullable': False } | false |
2,055,314 | def get_brandings(self):
"""
Get all account brandings
@return List of brandings
"""
connection = Connection(self.token)
connection.set_url(self.production, self.BRANDINGS_URL)
return connection.get_request() | [
"def",
"get_brandings",
"(",
"self",
")",
":",
"connection",
"=",
"Connection",
"(",
"self",
".",
"token",
")",
"connection",
".",
"set_url",
"(",
"self",
".",
"production",
",",
"self",
".",
"BRANDINGS_URL",
")",
"return",
"connection",
".",
"get_request",
"(",
")"
] | python | Get all account brandings
@return List of brandings | false |
2,057,201 | def auth_user_id(self, value):
"""The auth_user_id property.
Args:
value (string). the property value.
"""
if value == self._defaults['ai.user.authUserId'] and 'ai.user.authUserId' in self._values:
del self._values['ai.user.authUserId']
else:
self._values['ai.user.authUserId'] = value | [
"def",
"auth_user_id",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"==",
"self",
".",
"_defaults",
"[",
"'ai.user.authUserId'",
"]",
"and",
"'ai.user.authUserId'",
"in",
"self",
".",
"_values",
":",
"del",
"self",
".",
"_values",
"[",
"'ai.user.authUserId'",
"]",
"else",
":",
"self",
".",
"_values",
"[",
"'ai.user.authUserId'",
"]",
"=",
"value"
] | python | The auth_user_id property.
Args:
value (string). the property value. | false |
2,663,701 | def create_thumbnail(img, width, height):
"""
创建缩略图
缩略图的意思就是缩小
:param img: 图片对象
:param width: 宽
:param height: 高
:return:
"""
size = (width, height)
img.thumbnail(size)
return img | [
"def",
"create_thumbnail",
"(",
"img",
",",
"width",
",",
"height",
")",
":",
"size",
"=",
"(",
"width",
",",
"height",
")",
"img",
".",
"thumbnail",
"(",
"size",
")",
"return",
"img"
] | python | 创建缩略图
缩略图的意思就是缩小
:param img: 图片对象
:param width: 宽
:param height: 高
:return: | false |
2,061,403 | def read_until_yieldable(self):
"""Read in additional chunks until it is yieldable."""
while not self.yieldable():
read_content, read_position = _get_next_chunk(self.fp, self.read_position, self.chunk_size)
self.add_to_buffer(read_content, read_position) | [
"def",
"read_until_yieldable",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"yieldable",
"(",
")",
":",
"read_content",
",",
"read_position",
"=",
"_get_next_chunk",
"(",
"self",
".",
"fp",
",",
"self",
".",
"read_position",
",",
"self",
".",
"chunk_size",
")",
"self",
".",
"add_to_buffer",
"(",
"read_content",
",",
"read_position",
")"
] | python | Read in additional chunks until it is yieldable. | false |
2,691,956 | def __iadd__(self, other):
"""Put ModelElement(s) in this content.
:param ModelElement(s) other: other element(s) to put to this.
:return: self.
:raise: TypeError if other is not a ModelElement(s)."""
return self.__i(
other=other, func=lambda melt: self.__setitem__(melt.name, melt)
) | [
"def",
"__iadd__",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"__i",
"(",
"other",
"=",
"other",
",",
"func",
"=",
"lambda",
"melt",
":",
"self",
".",
"__setitem__",
"(",
"melt",
".",
"name",
",",
"melt",
")",
")"
] | python | Put ModelElement(s) in this content.
:param ModelElement(s) other: other element(s) to put to this.
:return: self.
:raise: TypeError if other is not a ModelElement(s). | false |
2,407,666 | def prepare_hmet(self):
"""
Prepare HMET data for simulation
"""
if self._prepare_lsm_hmet:
netcdf_file_path = None
hmet_ascii_output_folder = None
if self.output_netcdf:
netcdf_file_path = '{0}_hmet.nc'.format(self.project_manager.name)
if self.hotstart_minimal_mode:
netcdf_file_path = '{0}_hmet_hotstart.nc'.format(self.project_manager.name)
else:
hmet_ascii_output_folder = 'hmet_data_{0}to{1}'
if self.hotstart_minimal_mode:
hmet_ascii_output_folder += "_hotstart"
self.event_manager.prepare_hmet_lsm(self.lsm_data_var_map_array,
hmet_ascii_output_folder,
netcdf_file_path)
self.simulation_modified_input_cards += ["HMET_NETCDF",
"HMET_ASCII"]
else:
log.info("HMET preparation skipped due to missing parameters ...") | [
"def",
"prepare_hmet",
"(",
"self",
")",
":",
"if",
"self",
".",
"_prepare_lsm_hmet",
":",
"netcdf_file_path",
"=",
"None",
"hmet_ascii_output_folder",
"=",
"None",
"if",
"self",
".",
"output_netcdf",
":",
"netcdf_file_path",
"=",
"'{0}_hmet.nc'",
".",
"format",
"(",
"self",
".",
"project_manager",
".",
"name",
")",
"if",
"self",
".",
"hotstart_minimal_mode",
":",
"netcdf_file_path",
"=",
"'{0}_hmet_hotstart.nc'",
".",
"format",
"(",
"self",
".",
"project_manager",
".",
"name",
")",
"else",
":",
"hmet_ascii_output_folder",
"=",
"'hmet_data_{0}to{1}'",
"if",
"self",
".",
"hotstart_minimal_mode",
":",
"hmet_ascii_output_folder",
"+=",
"\"_hotstart\"",
"self",
".",
"event_manager",
".",
"prepare_hmet_lsm",
"(",
"self",
".",
"lsm_data_var_map_array",
",",
"hmet_ascii_output_folder",
",",
"netcdf_file_path",
")",
"self",
".",
"simulation_modified_input_cards",
"+=",
"[",
"\"HMET_NETCDF\"",
",",
"\"HMET_ASCII\"",
"]",
"else",
":",
"log",
".",
"info",
"(",
"\"HMET preparation skipped due to missing parameters ...\"",
")"
] | python | Prepare HMET data for simulation | false |
1,643,373 | def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo | [
"def",
"_determine_storage_repo",
"(",
"session",
",",
"resource_pool",
",",
"vm_",
")",
":",
"storage_repo",
"=",
"''",
"if",
"'storage_repo'",
"in",
"vm_",
".",
"keys",
"(",
")",
":",
"storage_repo",
"=",
"_get_sr",
"(",
"vm_",
"[",
"'storage_repo'",
"]",
",",
"session",
")",
"else",
":",
"storage_repo",
"=",
"None",
"if",
"resource_pool",
":",
"default_sr",
"=",
"session",
".",
"xenapi",
".",
"pool",
".",
"get_default_SR",
"(",
"resource_pool",
")",
"sr_record",
"=",
"session",
".",
"xenapi",
".",
"SR",
".",
"get_record",
"(",
"default_sr",
")",
"log",
".",
"debug",
"(",
"'storage repository: %s'",
",",
"sr_record",
"[",
"'name_label'",
"]",
")",
"storage_repo",
"=",
"default_sr",
"else",
":",
"storage_repo",
"=",
"None",
"log",
".",
"debug",
"(",
"'storage repository: %s'",
",",
"storage_repo",
")",
"return",
"storage_repo"
] | python | Called by create() used to determine storage repo for create | false |
1,620,616 | def read_persistent_volume(self, name, **kwargs):
"""
read the specified PersistentVolume
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_persistent_volume(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PersistentVolume (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1PersistentVolume
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_persistent_volume_with_http_info(name, **kwargs)
else:
(data) = self.read_persistent_volume_with_http_info(name, **kwargs)
return data | [
"def",
"read_persistent_volume",
"(",
"self",
",",
"name",
",",
"**",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"read_persistent_volume_with_http_info",
"(",
"name",
",",
"**",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"read_persistent_volume_with_http_info",
"(",
"name",
",",
"**",
"kwargs",
")",
"return",
"data"
] | python | read the specified PersistentVolume
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_persistent_volume(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PersistentVolume (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1PersistentVolume
If the method is called asynchronously,
returns the request thread. | false |
2,183,935 | def write(self, records=None, path=None, fields=None, append=False,
gzip=None):
"""
Write the table to disk.
The basic usage has no arguments and writes the table's data
to the attached file. The parameters accommodate a variety of
use cases, such as using *fields* to refresh a table to a
new schema or *records* and *append* to incrementally build a
table.
Args:
records: an iterable of :class:`Record` objects to write;
if `None` the table's existing data is used
path: the destination file path; if `None` use the
path of the file attached to the table
fields (:class:`Relation`): table schema to use for
writing, otherwise use the current one
append: if `True`, append rather than overwrite
gzip: compress with gzip if non-empty
Examples:
>>> table.write()
>>> table.write(results, path='new/path/result')
"""
if path is None:
if not self.is_attached():
raise ItsdbError('no path given for detached table')
else:
path = self.path
path = _normalize_table_path(path)
dirpath, name = os.path.split(path)
if fields is None:
fields = self.fields
if records is None:
records = iter(self)
_write_table(
dirpath,
name,
records,
fields,
append=append,
gzip=gzip,
encoding=self.encoding)
if self.is_attached() and path == _normalize_table_path(self.path):
self.path = _table_filename(path)
self._sync_with_file() | [
"def",
"write",
"(",
"self",
",",
"records",
"=",
"None",
",",
"path",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"append",
"=",
"False",
",",
"gzip",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"if",
"not",
"self",
".",
"is_attached",
"(",
")",
":",
"raise",
"ItsdbError",
"(",
"'no path given for detached table'",
")",
"else",
":",
"path",
"=",
"self",
".",
"path",
"path",
"=",
"_normalize_table_path",
"(",
"path",
")",
"dirpath",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"if",
"fields",
"is",
"None",
":",
"fields",
"=",
"self",
".",
"fields",
"if",
"records",
"is",
"None",
":",
"records",
"=",
"iter",
"(",
"self",
")",
"_write_table",
"(",
"dirpath",
",",
"name",
",",
"records",
",",
"fields",
",",
"append",
"=",
"append",
",",
"gzip",
"=",
"gzip",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"if",
"self",
".",
"is_attached",
"(",
")",
"and",
"path",
"==",
"_normalize_table_path",
"(",
"self",
".",
"path",
")",
":",
"self",
".",
"path",
"=",
"_table_filename",
"(",
"path",
")",
"self",
".",
"_sync_with_file",
"(",
")"
] | python | Write the table to disk.
The basic usage has no arguments and writes the table's data
to the attached file. The parameters accommodate a variety of
use cases, such as using *fields* to refresh a table to a
new schema or *records* and *append* to incrementally build a
table.
Args:
records: an iterable of :class:`Record` objects to write;
if `None` the table's existing data is used
path: the destination file path; if `None` use the
path of the file attached to the table
fields (:class:`Relation`): table schema to use for
writing, otherwise use the current one
append: if `True`, append rather than overwrite
gzip: compress with gzip if non-empty
Examples:
>>> table.write()
>>> table.write(results, path='new/path/result') | false |
2,119,123 | def get_unit_spike_feature_names(self, unit_id=None):
'''This function returns the names of spike features for a single
unit or across all units (depending on the given unit_id).
Returns
----------
spike_features: list
A list of string names for each feature in the specified unit.
unit_id: int
The unit_id for which the feature names will be returned. If None,
the function will return all feature names across all units).
'''
if unit_id is None:
feature_names = []
for unit_id in self.get_unit_ids():
curr_feature_names = self.get_unit_spike_feature_names(unit_id)
for curr_feature_name in curr_feature_names:
feature_names.append(curr_feature_name)
feature_names = sorted(list(set(feature_names)))
return feature_names
if isinstance(unit_id, (int, np.integer)):
if unit_id in self.get_unit_ids():
if unit_id not in self._unit_features:
self._unit_features[unit_id] = {}
feature_names = sorted(self._unit_features[unit_id].keys())
return feature_names
else:
raise ValueError(str(unit_id) + " is not a valid unit_id")
else:
raise ValueError(str(unit_id) + " must be an int") | [
"def",
"get_unit_spike_feature_names",
"(",
"self",
",",
"unit_id",
"=",
"None",
")",
":",
"if",
"unit_id",
"is",
"None",
":",
"feature_names",
"=",
"[",
"]",
"for",
"unit_id",
"in",
"self",
".",
"get_unit_ids",
"(",
")",
":",
"curr_feature_names",
"=",
"self",
".",
"get_unit_spike_feature_names",
"(",
"unit_id",
")",
"for",
"curr_feature_name",
"in",
"curr_feature_names",
":",
"feature_names",
".",
"append",
"(",
"curr_feature_name",
")",
"feature_names",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"feature_names",
")",
")",
")",
"return",
"feature_names",
"if",
"isinstance",
"(",
"unit_id",
",",
"(",
"int",
",",
"np",
".",
"integer",
")",
")",
":",
"if",
"unit_id",
"in",
"self",
".",
"get_unit_ids",
"(",
")",
":",
"if",
"unit_id",
"not",
"in",
"self",
".",
"_unit_features",
":",
"self",
".",
"_unit_features",
"[",
"unit_id",
"]",
"=",
"{",
"}",
"feature_names",
"=",
"sorted",
"(",
"self",
".",
"_unit_features",
"[",
"unit_id",
"]",
".",
"keys",
"(",
")",
")",
"return",
"feature_names",
"else",
":",
"raise",
"ValueError",
"(",
"str",
"(",
"unit_id",
")",
"+",
"\" is not a valid unit_id\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"str",
"(",
"unit_id",
")",
"+",
"\" must be an int\"",
")"
] | python | This function returns the names of spike features for a single
unit or across all units (depending on the given unit_id).
Returns
----------
spike_features: list
A list of string names for each feature in the specified unit.
unit_id: int
The unit_id for which the feature names will be returned. If None,
the function will return all feature names across all units). | false |
2,213,343 | def request_set_sensor_unreachable(self, req, sensor_name):
"""Set sensor status to unreachable"""
sensor = self.get_sensor(sensor_name)
ts, status, value = sensor.read()
sensor.set_value(value, sensor.UNREACHABLE, ts)
return('ok',) | [
"def",
"request_set_sensor_unreachable",
"(",
"self",
",",
"req",
",",
"sensor_name",
")",
":",
"sensor",
"=",
"self",
".",
"get_sensor",
"(",
"sensor_name",
")",
"ts",
",",
"status",
",",
"value",
"=",
"sensor",
".",
"read",
"(",
")",
"sensor",
".",
"set_value",
"(",
"value",
",",
"sensor",
".",
"UNREACHABLE",
",",
"ts",
")",
"return",
"(",
"'ok'",
",",
")"
] | python | Set sensor status to unreachable | false |
2,361,850 | def associate(self, queue):
"""Merge this queue with another.
Both queues will use a shared command list and either one can be used
to fill or flush the shared queue.
"""
assert isinstance(queue, GlirQueue)
if queue._shared is self._shared:
return
# merge commands
self._shared._commands.extend(queue.clear())
self._shared._verbose |= queue._shared._verbose
self._shared._associations[queue] = None
# update queue and all related queues to use the same _shared object
for ch in queue._shared._associations:
ch._shared = self._shared
self._shared._associations[ch] = None
queue._shared = self._shared | [
"def",
"associate",
"(",
"self",
",",
"queue",
")",
":",
"assert",
"isinstance",
"(",
"queue",
",",
"GlirQueue",
")",
"if",
"queue",
".",
"_shared",
"is",
"self",
".",
"_shared",
":",
"return",
"self",
".",
"_shared",
".",
"_commands",
".",
"extend",
"(",
"queue",
".",
"clear",
"(",
")",
")",
"self",
".",
"_shared",
".",
"_verbose",
"|=",
"queue",
".",
"_shared",
".",
"_verbose",
"self",
".",
"_shared",
".",
"_associations",
"[",
"queue",
"]",
"=",
"None",
"for",
"ch",
"in",
"queue",
".",
"_shared",
".",
"_associations",
":",
"ch",
".",
"_shared",
"=",
"self",
".",
"_shared",
"self",
".",
"_shared",
".",
"_associations",
"[",
"ch",
"]",
"=",
"None",
"queue",
".",
"_shared",
"=",
"self",
".",
"_shared"
] | python | Merge this queue with another.
Both queues will use a shared command list and either one can be used
to fill or flush the shared queue. | false |
2,023,114 | def path_new_using_map(
m: tcod.map.Map, dcost: float = 1.41
) -> tcod.path.AStar:
"""Return a new AStar using the given Map.
Args:
m (Map): A Map instance.
dcost (float): The path-finding cost of diagonal movement.
Can be set to 0 to disable diagonal movement.
Returns:
AStar: A new AStar instance.
"""
return tcod.path.AStar(m, dcost) | [
"def",
"path_new_using_map",
"(",
"m",
":",
"tcod",
".",
"map",
".",
"Map",
",",
"dcost",
":",
"float",
"=",
"1.41",
")",
"->",
"tcod",
".",
"path",
".",
"AStar",
":",
"return",
"tcod",
".",
"path",
".",
"AStar",
"(",
"m",
",",
"dcost",
")"
] | python | Return a new AStar using the given Map.
Args:
m (Map): A Map instance.
dcost (float): The path-finding cost of diagonal movement.
Can be set to 0 to disable diagonal movement.
Returns:
AStar: A new AStar instance. | false |
1,776,804 | def get_max_sequence_id(self):
"""GetMaxSequenceId.
Read the max sequence id of all the identities.
:rtype: long
"""
response = self._send(http_method='GET',
location_id='e4a70778-cb2c-4e85-b7cc-3f3c7ae2d408',
version='5.0')
return self._deserialize('long', response) | [
"def",
"get_max_sequence_id",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'e4a70778-cb2c-4e85-b7cc-3f3c7ae2d408'",
",",
"version",
"=",
"'5.0'",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'long'",
",",
"response",
")"
] | python | GetMaxSequenceId.
Read the max sequence id of all the identities.
:rtype: long | false |
2,463,375 | def verifyExpanded(self, samplerate):
"""Checks the expanded parameters for invalidating conditions
:param samplerate: generation samplerate (Hz), passed on to component verification
:type samplerate: int
:returns: str -- error message, if any, 0 otherwise"""
results = self.expandFunction(self.verifyComponents, args=(samplerate,))
msg = [x for x in results if x]
if len(msg) > 0:
return msg[0]
else:
return 0 | [
"def",
"verifyExpanded",
"(",
"self",
",",
"samplerate",
")",
":",
"results",
"=",
"self",
".",
"expandFunction",
"(",
"self",
".",
"verifyComponents",
",",
"args",
"=",
"(",
"samplerate",
",",
")",
")",
"msg",
"=",
"[",
"x",
"for",
"x",
"in",
"results",
"if",
"x",
"]",
"if",
"len",
"(",
"msg",
")",
">",
"0",
":",
"return",
"msg",
"[",
"0",
"]",
"else",
":",
"return",
"0"
] | python | Checks the expanded parameters for invalidating conditions
:param samplerate: generation samplerate (Hz), passed on to component verification
:type samplerate: int
:returns: str -- error message, if any, 0 otherwise | false |
2,654,394 | def render_te_response(self, data):
"""Render data to JsonResponse"""
if 'submit_label' in data and 'url' not in data:
data['url'] = self.request.get_full_path()
return JsonResponse(data) | [
"def",
"render_te_response",
"(",
"self",
",",
"data",
")",
":",
"if",
"'submit_label'",
"in",
"data",
"and",
"'url'",
"not",
"in",
"data",
":",
"data",
"[",
"'url'",
"]",
"=",
"self",
".",
"request",
".",
"get_full_path",
"(",
")",
"return",
"JsonResponse",
"(",
"data",
")"
] | python | Render data to JsonResponse | false |
1,993,226 | def get_file(db, user_id, api_path, include_content, decrypt_func):
"""
Get file data for the given user_id and path.
Include content only if include_content=True.
"""
query_fields = _file_default_fields()
if include_content:
query_fields.append(files.c.content)
return _get_file(db, user_id, api_path, query_fields, decrypt_func) | [
"def",
"get_file",
"(",
"db",
",",
"user_id",
",",
"api_path",
",",
"include_content",
",",
"decrypt_func",
")",
":",
"query_fields",
"=",
"_file_default_fields",
"(",
")",
"if",
"include_content",
":",
"query_fields",
".",
"append",
"(",
"files",
".",
"c",
".",
"content",
")",
"return",
"_get_file",
"(",
"db",
",",
"user_id",
",",
"api_path",
",",
"query_fields",
",",
"decrypt_func",
")"
] | python | Get file data for the given user_id and path.
Include content only if include_content=True. | false |
2,175,432 | def _get_samples(self, samples):
"""
Internal function. Prelude for each step() to read in perhaps
non empty list of samples to process. Input is a list of sample names,
output is a list of sample objects."""
## if samples not entered use all samples
if not samples:
samples = self.samples.keys()
## Be nice and allow user to pass in only one sample as a string,
## rather than a one element list. When you make the string into a list
## you have to wrap it in square braces or else list makes a list of
## each character individually.
if isinstance(samples, str):
samples = list([samples])
## if sample keys, replace with sample obj
assert isinstance(samples, list), \
"to subselect samples enter as a list, e.g., [A, B]."
newsamples = [self.samples.get(key) for key in samples \
if self.samples.get(key)]
strnewsamples = [i.name for i in newsamples]
## are there any samples that did not make it into the dict?
badsamples = set(samples).difference(set(strnewsamples))
if badsamples:
outstring = ", ".join(badsamples)
raise IPyradError(\
"Unrecognized Sample name(s) not linked to {}: {}"\
.format(self.name, outstring))
## require Samples
assert newsamples, \
"No Samples passed in and none in assembly {}".format(self.name)
return newsamples | [
"def",
"_get_samples",
"(",
"self",
",",
"samples",
")",
":",
"if",
"not",
"samples",
":",
"samples",
"=",
"self",
".",
"samples",
".",
"keys",
"(",
")",
"if",
"isinstance",
"(",
"samples",
",",
"str",
")",
":",
"samples",
"=",
"list",
"(",
"[",
"samples",
"]",
")",
"assert",
"isinstance",
"(",
"samples",
",",
"list",
")",
",",
"\"to subselect samples enter as a list, e.g., [A, B].\"",
"newsamples",
"=",
"[",
"self",
".",
"samples",
".",
"get",
"(",
"key",
")",
"for",
"key",
"in",
"samples",
"if",
"self",
".",
"samples",
".",
"get",
"(",
"key",
")",
"]",
"strnewsamples",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"newsamples",
"]",
"badsamples",
"=",
"set",
"(",
"samples",
")",
".",
"difference",
"(",
"set",
"(",
"strnewsamples",
")",
")",
"if",
"badsamples",
":",
"outstring",
"=",
"\", \"",
".",
"join",
"(",
"badsamples",
")",
"raise",
"IPyradError",
"(",
"\"Unrecognized Sample name(s) not linked to {}: {}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"outstring",
")",
")",
"assert",
"newsamples",
",",
"\"No Samples passed in and none in assembly {}\"",
".",
"format",
"(",
"self",
".",
"name",
")",
"return",
"newsamples"
] | python | Internal function. Prelude for each step() to read in perhaps
non empty list of samples to process. Input is a list of sample names,
output is a list of sample objects. | false |
1,762,239 | def _run_delly(bam_files, chrom, ref_file, work_dir, items):
"""Run delly, calling structural variations for the specified type.
"""
batch = sshared.get_cur_batch(items)
ext = "-%s-svs" % batch if batch else "-svs"
out_file = os.path.join(work_dir, "%s%s-%s.bcf"
% (os.path.splitext(os.path.basename(bam_files[0]))[0], ext, chrom))
final_file = "%s.vcf.gz" % (utils.splitext_plus(out_file)[0])
cores = min(utils.get_in(items[0], ("config", "algorithm", "num_cores"), 1),
len(bam_files))
if not utils.file_exists(out_file) and not utils.file_exists(final_file):
with file_transaction(items[0], out_file) as tx_out_file:
if sshared.has_variant_regions(items, out_file, chrom):
exclude = ["-x", _delly_exclude_file(items, out_file, chrom)]
cmd = ["delly", "call", "-g", ref_file, "-o", tx_out_file] + exclude + bam_files
multi_cmd = "export OMP_NUM_THREADS=%s && export LC_ALL=C && " % cores
try:
do.run(multi_cmd + " ".join(cmd), "delly structural variant")
except subprocess.CalledProcessError as msg:
# Small input samples, write an empty vcf
if "Sample has not enough data to estimate library parameters" in str(msg):
pass
# delly returns an error exit code if there are no variants
elif "No structural variants found" not in str(msg):
raise
return [_bgzip_and_clean(out_file, items)] | [
"def",
"_run_delly",
"(",
"bam_files",
",",
"chrom",
",",
"ref_file",
",",
"work_dir",
",",
"items",
")",
":",
"batch",
"=",
"sshared",
".",
"get_cur_batch",
"(",
"items",
")",
"ext",
"=",
"\"-%s-svs\"",
"%",
"batch",
"if",
"batch",
"else",
"\"-svs\"",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s%s-%s.bcf\"",
"%",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"bam_files",
"[",
"0",
"]",
")",
")",
"[",
"0",
"]",
",",
"ext",
",",
"chrom",
")",
")",
"final_file",
"=",
"\"%s.vcf.gz\"",
"%",
"(",
"utils",
".",
"splitext_plus",
"(",
"out_file",
")",
"[",
"0",
"]",
")",
"cores",
"=",
"min",
"(",
"utils",
".",
"get_in",
"(",
"items",
"[",
"0",
"]",
",",
"(",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"num_cores\"",
")",
",",
"1",
")",
",",
"len",
"(",
"bam_files",
")",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
"and",
"not",
"utils",
".",
"file_exists",
"(",
"final_file",
")",
":",
"with",
"file_transaction",
"(",
"items",
"[",
"0",
"]",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"if",
"sshared",
".",
"has_variant_regions",
"(",
"items",
",",
"out_file",
",",
"chrom",
")",
":",
"exclude",
"=",
"[",
"\"-x\"",
",",
"_delly_exclude_file",
"(",
"items",
",",
"out_file",
",",
"chrom",
")",
"]",
"cmd",
"=",
"[",
"\"delly\"",
",",
"\"call\"",
",",
"\"-g\"",
",",
"ref_file",
",",
"\"-o\"",
",",
"tx_out_file",
"]",
"+",
"exclude",
"+",
"bam_files",
"multi_cmd",
"=",
"\"export OMP_NUM_THREADS=%s && export LC_ALL=C && \"",
"%",
"cores",
"try",
":",
"do",
".",
"run",
"(",
"multi_cmd",
"+",
"\" \"",
".",
"join",
"(",
"cmd",
")",
",",
"\"delly structural variant\"",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"msg",
":",
"if",
"\"Sample has not enough data to estimate library parameters\"",
"in",
"str",
"(",
"msg",
")",
":",
"pass",
"elif",
"\"No structural variants found\"",
"not",
"in",
"str",
"(",
"msg",
")",
":",
"raise",
"return",
"[",
"_bgzip_and_clean",
"(",
"out_file",
",",
"items",
")",
"]"
] | python | Run delly, calling structural variations for the specified type. | false |
1,670,753 | def __init__(self,
output_sizes,
activation=tf.nn.relu,
activate_final=False,
initializers=None,
partitioners=None,
regularizers=None,
use_bias=True,
use_dropout=False,
custom_getter=None,
name="mlp"):
"""Constructs an MLP module.
Args:
output_sizes: An iterable of output dimensionalities as defined in
`basic.Linear`. Output size can be defined either as number or via a
callable. In the latter case, since the function invocation is deferred
to graph construction time, the user must only ensure that entries can
be called when build is called. Each entry in the iterable defines
properties in the corresponding linear layer.
activation: An activation op. The activation is applied to intermediate
layers, and optionally to the output of the final layer.
activate_final: Boolean determining if the activation is applied to
the output of the final layer. Default `False`.
initializers: Optional dict containing ops to initialize the linear
layers' weights (with key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition the
linear layers' weights (with key 'w') or biases (with key 'b').
regularizers: Optional dict containing regularizers for the linear layers'
weights (with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
use_bias: Whether to include bias parameters in the linear layers.
Default `True`.
use_dropout: Whether to perform dropout on the linear layers.
Default `False`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of the module.
Raises:
KeyError: If initializers contains any keys other than 'w' or 'b'.
KeyError: If regularizers contains any keys other than 'w' or 'b'.
ValueError: If output_sizes is empty.
TypeError: If `activation` is not callable; or if `output_sizes` is not
iterable.
"""
super(MLP, self).__init__(custom_getter=custom_getter, name=name)
if not isinstance(output_sizes, collections.Iterable):
raise TypeError("output_sizes must be iterable")
output_sizes = tuple(output_sizes)
if not output_sizes:
raise ValueError("output_sizes must not be empty")
self._output_sizes = output_sizes
self._num_layers = len(self._output_sizes)
self._input_shape = None
self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)
self._initializers = util.check_initializers(
initializers, self.possible_keys)
self._partitioners = util.check_partitioners(
partitioners, self.possible_keys)
self._regularizers = util.check_regularizers(
regularizers, self.possible_keys)
if not callable(activation):
raise TypeError("Input 'activation' must be callable")
self._activation = activation
self._activate_final = activate_final
self._use_bias = use_bias
self._use_dropout = use_dropout
self._instantiate_layers() | [
"def",
"__init__",
"(",
"self",
",",
"output_sizes",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"activate_final",
"=",
"False",
",",
"initializers",
"=",
"None",
",",
"partitioners",
"=",
"None",
",",
"regularizers",
"=",
"None",
",",
"use_bias",
"=",
"True",
",",
"use_dropout",
"=",
"False",
",",
"custom_getter",
"=",
"None",
",",
"name",
"=",
"\"mlp\"",
")",
":",
"super",
"(",
"MLP",
",",
"self",
")",
".",
"__init__",
"(",
"custom_getter",
"=",
"custom_getter",
",",
"name",
"=",
"name",
")",
"if",
"not",
"isinstance",
"(",
"output_sizes",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"\"output_sizes must be iterable\"",
")",
"output_sizes",
"=",
"tuple",
"(",
"output_sizes",
")",
"if",
"not",
"output_sizes",
":",
"raise",
"ValueError",
"(",
"\"output_sizes must not be empty\"",
")",
"self",
".",
"_output_sizes",
"=",
"output_sizes",
"self",
".",
"_num_layers",
"=",
"len",
"(",
"self",
".",
"_output_sizes",
")",
"self",
".",
"_input_shape",
"=",
"None",
"self",
".",
"possible_keys",
"=",
"self",
".",
"get_possible_initializer_keys",
"(",
"use_bias",
"=",
"use_bias",
")",
"self",
".",
"_initializers",
"=",
"util",
".",
"check_initializers",
"(",
"initializers",
",",
"self",
".",
"possible_keys",
")",
"self",
".",
"_partitioners",
"=",
"util",
".",
"check_partitioners",
"(",
"partitioners",
",",
"self",
".",
"possible_keys",
")",
"self",
".",
"_regularizers",
"=",
"util",
".",
"check_regularizers",
"(",
"regularizers",
",",
"self",
".",
"possible_keys",
")",
"if",
"not",
"callable",
"(",
"activation",
")",
":",
"raise",
"TypeError",
"(",
"\"Input 'activation' must be callable\"",
")",
"self",
".",
"_activation",
"=",
"activation",
"self",
".",
"_activate_final",
"=",
"activate_final",
"self",
".",
"_use_bias",
"=",
"use_bias",
"self",
".",
"_use_dropout",
"=",
"use_dropout",
"self",
".",
"_instantiate_layers",
"(",
")"
] | python | Constructs an MLP module.
Args:
output_sizes: An iterable of output dimensionalities as defined in
`basic.Linear`. Output size can be defined either as number or via a
callable. In the latter case, since the function invocation is deferred
to graph construction time, the user must only ensure that entries can
be called when build is called. Each entry in the iterable defines
properties in the corresponding linear layer.
activation: An activation op. The activation is applied to intermediate
layers, and optionally to the output of the final layer.
activate_final: Boolean determining if the activation is applied to
the output of the final layer. Default `False`.
initializers: Optional dict containing ops to initialize the linear
layers' weights (with key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition the
linear layers' weights (with key 'w') or biases (with key 'b').
regularizers: Optional dict containing regularizers for the linear layers'
weights (with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
use_bias: Whether to include bias parameters in the linear layers.
Default `True`.
use_dropout: Whether to perform dropout on the linear layers.
Default `False`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of the module.
Raises:
KeyError: If initializers contains any keys other than 'w' or 'b'.
KeyError: If regularizers contains any keys other than 'w' or 'b'.
ValueError: If output_sizes is empty.
TypeError: If `activation` is not callable; or if `output_sizes` is not
iterable. | false |
1,734,656 | def min_count(self, n=1):
""" Returns a vocabulary after eliminating the words that appear < `n`.
Args:
n (integer): specifies the minimum word frequency allowed.
"""
word_count = {w:c for w,c in iteritems(self.word_count) if c >= n}
return CountedVocabulary(word_count=word_count) | [
"def",
"min_count",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"word_count",
"=",
"{",
"w",
":",
"c",
"for",
"w",
",",
"c",
"in",
"iteritems",
"(",
"self",
".",
"word_count",
")",
"if",
"c",
">=",
"n",
"}",
"return",
"CountedVocabulary",
"(",
"word_count",
"=",
"word_count",
")"
] | python | Returns a vocabulary after eliminating the words that appear < `n`.
Args:
n (integer): specifies the minimum word frequency allowed. | false |
2,559,774 | def check_call_out(command):
"""
Run the given command (with shell=False) and return the output as a
string. Strip the output of enclosing whitespace.
If the return code is non-zero, throw GitInvocationError.
"""
# start external command process
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# get outputs
out, _ = p.communicate()
# throw exception if process failed
if p.returncode != 0:
raise GitInvocationError, 'failed to run "%s"' % " ".join(command)
return out.strip() | [
"def",
"check_call_out",
"(",
"command",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
",",
"_",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"p",
".",
"returncode",
"!=",
"0",
":",
"raise",
"GitInvocationError",
",",
"'failed to run \"%s\"'",
"%",
"\" \"",
".",
"join",
"(",
"command",
")",
"return",
"out",
".",
"strip",
"(",
")"
] | python | Run the given command (with shell=False) and return the output as a
string. Strip the output of enclosing whitespace.
If the return code is non-zero, throw GitInvocationError. | false |
2,419,905 | def call(self, route, parameters, mimetype="application/ld+json", defaults=None):
""" Call an endpoint given the parameters
:param route: Named of the route which is called
:type route: str
:param parameters: Dictionary of parameters
:type parameters: dict
:param mimetype: Mimetype to require
:type mimetype: str
:rtype: text
"""
if not defaults:
defaults = {}
parameters = {
key: str(parameters[key])
for key in parameters
if parameters[key] is not None and
parameters[key] != defaults.get(key, None)
}
parameters.update(self.routes[route].query_dict)
request = requests.get(
self.routes[route].path,
params=parameters,
headers={
"Accept": mimetype,
"Accept-Charset": "utf-8",
"User-Agent": "MyCapytain/{MyCapVersion} {DefaultRequestUA}".format(
MyCapVersion=__version__,
DefaultRequestUA=requests.utils.default_user_agent()
)
}
)
request.raise_for_status()
if request.encoding is None:
request.encoding = "utf-8"
return request | [
"def",
"call",
"(",
"self",
",",
"route",
",",
"parameters",
",",
"mimetype",
"=",
"\"application/ld+json\"",
",",
"defaults",
"=",
"None",
")",
":",
"if",
"not",
"defaults",
":",
"defaults",
"=",
"{",
"}",
"parameters",
"=",
"{",
"key",
":",
"str",
"(",
"parameters",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"parameters",
"if",
"parameters",
"[",
"key",
"]",
"is",
"not",
"None",
"and",
"parameters",
"[",
"key",
"]",
"!=",
"defaults",
".",
"get",
"(",
"key",
",",
"None",
")",
"}",
"parameters",
".",
"update",
"(",
"self",
".",
"routes",
"[",
"route",
"]",
".",
"query_dict",
")",
"request",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"routes",
"[",
"route",
"]",
".",
"path",
",",
"params",
"=",
"parameters",
",",
"headers",
"=",
"{",
"\"Accept\"",
":",
"mimetype",
",",
"\"Accept-Charset\"",
":",
"\"utf-8\"",
",",
"\"User-Agent\"",
":",
"\"MyCapytain/{MyCapVersion} {DefaultRequestUA}\"",
".",
"format",
"(",
"MyCapVersion",
"=",
"__version__",
",",
"DefaultRequestUA",
"=",
"requests",
".",
"utils",
".",
"default_user_agent",
"(",
")",
")",
"}",
")",
"request",
".",
"raise_for_status",
"(",
")",
"if",
"request",
".",
"encoding",
"is",
"None",
":",
"request",
".",
"encoding",
"=",
"\"utf-8\"",
"return",
"request"
] | python | Call an endpoint given the parameters
:param route: Named of the route which is called
:type route: str
:param parameters: Dictionary of parameters
:type parameters: dict
:param mimetype: Mimetype to require
:type mimetype: str
:rtype: text | false |
2,599,377 | def relative_to_all(features, groups, bin_edges, weight_func,
use_orig_distr,
group_ids, num_groups,
return_networkx_graph, out_weights_path):
"""
Computes the given function (aka weight or distance) between histogram from each of the groups to a "grand histogram" derived from all groups.
Parameters
----------
features : ndarray or str
1d array of scalar values, either provided directly as a 1d numpy array,
or as a path to a file containing these values
groups : ndarray or str
Membership array of same length as `features`, each value specifying which group that particular node belongs to.
Input can be either provided directly as a 1d numpy array,or as a path to a file containing these values.
For example, if you have cortical thickness values for 1000 vertices (`features` is ndarray of length 1000),
belonging to 100 patches, the groups array (of length 1000) could have numbers 1 to 100 (number of unique values)
specifying which element belongs to which cortical patch.
Grouping with numerical values (contiguous from 1 to num_patches) is strongly recommended for simplicity,
but this could also be a list of strings of length p, in which case a tuple is returned,
identifying which weight belongs to which pair of patches.
bin_edges : list or ndarray
Array of bin edges within which to compute the histogram in.
weight_func : callable
Function to compute the edge weight between groups/nodes.
use_orig_distr : bool, optional
When using a user-defined callable, this flag
1) allows skipping of pre-processing (trimming outliers) and histogram construction,
2) enables the application of arbitrary callable (user-defined) on the original distributions coming from the two groups/ROIs/nodes directly.
Example: ``diff_in_medians = lambda x, y: abs(np.median(x)-np.median(y))``
This option is valid only when weight_method is a valid callable,
which must take two inputs (possibly of different lengths) and return a single scalar.
group_ids : list
List of unique group ids to construct the nodes from (must all be present in the `groups` argument)
num_groups : int
Number of unique groups in the `group_ids`
return_networkx_graph : bool, optional
Specifies the need for a networkx graph populated with weights computed. Default: False.
out_weights_path : str, optional
Where to save the extracted weight matrix. If networkx output is returned, it would be saved in GraphML format.
Default: nothing saved unless instructed.
Returns
-------
distance_vector : ndarray
vector of distances between the grand histogram and the individual ROIs
Raises
------
ValueError
If one or more of the arrays are empty.
"""
# notice the use of all features without regard to group membership
hist_whole = compute_histogram(features, bin_edges, use_orig_distr)
# to identify the central node capturing distribution from all roi's
whole_node = 'whole'
if return_networkx_graph:
graph = nx.Graph()
graph.add_nodes_from(group_ids)
graph.add_node(whole_node)
else:
edge_weights = np.full([num_groups, 1], np.nan)
for src in range(num_groups):
index_roi = groups == group_ids[src]
hist_roi = compute_histogram(features[index_roi], bin_edges, use_orig_distr)
edge_value = weight_func(hist_whole, hist_roi)
if return_networkx_graph:
graph.add_edge(group_ids[src], whole_node, weight=float(edge_value))
else:
edge_weights[src] = edge_value
if return_networkx_graph:
if out_weights_path is not None:
graph.write_graphml(out_weights_path)
return graph
else:
if out_weights_path is not None:
np.savetxt(out_weights_path, edge_weights, delimiter=',', fmt='%.9f')
return edge_weights | [
"def",
"relative_to_all",
"(",
"features",
",",
"groups",
",",
"bin_edges",
",",
"weight_func",
",",
"use_orig_distr",
",",
"group_ids",
",",
"num_groups",
",",
"return_networkx_graph",
",",
"out_weights_path",
")",
":",
"hist_whole",
"=",
"compute_histogram",
"(",
"features",
",",
"bin_edges",
",",
"use_orig_distr",
")",
"whole_node",
"=",
"'whole'",
"if",
"return_networkx_graph",
":",
"graph",
"=",
"nx",
".",
"Graph",
"(",
")",
"graph",
".",
"add_nodes_from",
"(",
"group_ids",
")",
"graph",
".",
"add_node",
"(",
"whole_node",
")",
"else",
":",
"edge_weights",
"=",
"np",
".",
"full",
"(",
"[",
"num_groups",
",",
"1",
"]",
",",
"np",
".",
"nan",
")",
"for",
"src",
"in",
"range",
"(",
"num_groups",
")",
":",
"index_roi",
"=",
"groups",
"==",
"group_ids",
"[",
"src",
"]",
"hist_roi",
"=",
"compute_histogram",
"(",
"features",
"[",
"index_roi",
"]",
",",
"bin_edges",
",",
"use_orig_distr",
")",
"edge_value",
"=",
"weight_func",
"(",
"hist_whole",
",",
"hist_roi",
")",
"if",
"return_networkx_graph",
":",
"graph",
".",
"add_edge",
"(",
"group_ids",
"[",
"src",
"]",
",",
"whole_node",
",",
"weight",
"=",
"float",
"(",
"edge_value",
")",
")",
"else",
":",
"edge_weights",
"[",
"src",
"]",
"=",
"edge_value",
"if",
"return_networkx_graph",
":",
"if",
"out_weights_path",
"is",
"not",
"None",
":",
"graph",
".",
"write_graphml",
"(",
"out_weights_path",
")",
"return",
"graph",
"else",
":",
"if",
"out_weights_path",
"is",
"not",
"None",
":",
"np",
".",
"savetxt",
"(",
"out_weights_path",
",",
"edge_weights",
",",
"delimiter",
"=",
"','",
",",
"fmt",
"=",
"'%.9f'",
")",
"return",
"edge_weights"
] | python | Computes the given function (aka weight or distance) between histogram from each of the groups to a "grand histogram" derived from all groups.
Parameters
----------
features : ndarray or str
1d array of scalar values, either provided directly as a 1d numpy array,
or as a path to a file containing these values
groups : ndarray or str
Membership array of same length as `features`, each value specifying which group that particular node belongs to.
Input can be either provided directly as a 1d numpy array,or as a path to a file containing these values.
For example, if you have cortical thickness values for 1000 vertices (`features` is ndarray of length 1000),
belonging to 100 patches, the groups array (of length 1000) could have numbers 1 to 100 (number of unique values)
specifying which element belongs to which cortical patch.
Grouping with numerical values (contiguous from 1 to num_patches) is strongly recommended for simplicity,
but this could also be a list of strings of length p, in which case a tuple is returned,
identifying which weight belongs to which pair of patches.
bin_edges : list or ndarray
Array of bin edges within which to compute the histogram in.
weight_func : callable
Function to compute the edge weight between groups/nodes.
use_orig_distr : bool, optional
When using a user-defined callable, this flag
1) allows skipping of pre-processing (trimming outliers) and histogram construction,
2) enables the application of arbitrary callable (user-defined) on the original distributions coming from the two groups/ROIs/nodes directly.
Example: ``diff_in_medians = lambda x, y: abs(np.median(x)-np.median(y))``
This option is valid only when weight_method is a valid callable,
which must take two inputs (possibly of different lengths) and return a single scalar.
group_ids : list
List of unique group ids to construct the nodes from (must all be present in the `groups` argument)
num_groups : int
Number of unique groups in the `group_ids`
return_networkx_graph : bool, optional
Specifies the need for a networkx graph populated with weights computed. Default: False.
out_weights_path : str, optional
Where to save the extracted weight matrix. If networkx output is returned, it would be saved in GraphML format.
Default: nothing saved unless instructed.
Returns
-------
distance_vector : ndarray
vector of distances between the grand histogram and the individual ROIs
Raises
------
ValueError
If one or more of the arrays are empty. | false |
2,353,350 | def parseProcCmd(self, fields=('pid', 'user', 'cmd',), threads=False):
"""Execute ps command with custom output format with columns from
fields and return result as a nested list.
The Standard Format Specifiers from ps man page must be used for the
fields parameter.
@param fields: List of fields included in the output.
Default: pid, user, cmd
@param threads: If True, include threads in output.
@return: List of headers and list of rows and columns.
"""
args = []
headers = [f.lower() for f in fields]
args.append('--no-headers')
args.append('-e')
if threads:
args.append('-T')
field_ranges = []
fmt_strs = []
start = 0
for header in headers:
field_width = psFieldWidth.get(header, psDefaultFieldWidth)
fmt_strs.append('%s:%d' % (header, field_width))
end = start + field_width + 1
field_ranges.append((start,end))
start = end
args.append('-o')
args.append(','.join(fmt_strs))
lines = self.execProcCmd(*args)
if len(lines) > 0:
stats = []
for line in lines:
cols = []
for (start, end) in field_ranges:
cols.append(line[start:end].strip())
stats.append(cols)
return {'headers': headers, 'stats': stats}
else:
return None | [
"def",
"parseProcCmd",
"(",
"self",
",",
"fields",
"=",
"(",
"'pid'",
",",
"'user'",
",",
"'cmd'",
",",
")",
",",
"threads",
"=",
"False",
")",
":",
"args",
"=",
"[",
"]",
"headers",
"=",
"[",
"f",
".",
"lower",
"(",
")",
"for",
"f",
"in",
"fields",
"]",
"args",
".",
"append",
"(",
"'--no-headers'",
")",
"args",
".",
"append",
"(",
"'-e'",
")",
"if",
"threads",
":",
"args",
".",
"append",
"(",
"'-T'",
")",
"field_ranges",
"=",
"[",
"]",
"fmt_strs",
"=",
"[",
"]",
"start",
"=",
"0",
"for",
"header",
"in",
"headers",
":",
"field_width",
"=",
"psFieldWidth",
".",
"get",
"(",
"header",
",",
"psDefaultFieldWidth",
")",
"fmt_strs",
".",
"append",
"(",
"'%s:%d'",
"%",
"(",
"header",
",",
"field_width",
")",
")",
"end",
"=",
"start",
"+",
"field_width",
"+",
"1",
"field_ranges",
".",
"append",
"(",
"(",
"start",
",",
"end",
")",
")",
"start",
"=",
"end",
"args",
".",
"append",
"(",
"'-o'",
")",
"args",
".",
"append",
"(",
"','",
".",
"join",
"(",
"fmt_strs",
")",
")",
"lines",
"=",
"self",
".",
"execProcCmd",
"(",
"*",
"args",
")",
"if",
"len",
"(",
"lines",
")",
">",
"0",
":",
"stats",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"cols",
"=",
"[",
"]",
"for",
"(",
"start",
",",
"end",
")",
"in",
"field_ranges",
":",
"cols",
".",
"append",
"(",
"line",
"[",
"start",
":",
"end",
"]",
".",
"strip",
"(",
")",
")",
"stats",
".",
"append",
"(",
"cols",
")",
"return",
"{",
"'headers'",
":",
"headers",
",",
"'stats'",
":",
"stats",
"}",
"else",
":",
"return",
"None"
] | python | Execute ps command with custom output format with columns from
fields and return result as a nested list.
The Standard Format Specifiers from ps man page must be used for the
fields parameter.
@param fields: List of fields included in the output.
Default: pid, user, cmd
@param threads: If True, include threads in output.
@return: List of headers and list of rows and columns. | false |
1,978,067 | def sample(self, bqm, beta_range=None, num_reads=10, num_sweeps=1000):
"""Sample from low-energy spin states using simulated annealing.
Args:
bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
beta_range (tuple, optional): Beginning and end of the beta schedule
(beta is the inverse temperature) as a 2-tuple. The schedule is applied
linearly in beta. Default is chosen based on the total bias associated
with each node.
num_reads (int, optional, default=10):
Number of reads. Each sample is the result of a single run of
the simulated annealing algorithm.
num_sweeps (int, optional, default=1000):
Number of sweeps or steps.
Returns:
:obj:`.SampleSet`
Note:
This is a reference implementation, not optimized for speed
and therefore not an appropriate sampler for benchmarking.
"""
# input checking
# h, J are handled by the @ising decorator
# beta_range, sweeps are handled by ising_simulated_annealing
if not isinstance(num_reads, int):
raise TypeError("'samples' should be a positive integer")
if num_reads < 1:
raise ValueError("'samples' should be a positive integer")
h, J, offset = bqm.to_ising()
# run the simulated annealing algorithm
samples = []
energies = []
for __ in range(num_reads):
sample, energy = ising_simulated_annealing(h, J, beta_range, num_sweeps)
samples.append(sample)
energies.append(energy)
response = SampleSet.from_samples(samples, Vartype.SPIN, energies)
response.change_vartype(bqm.vartype, offset, inplace=True)
return response | [
"def",
"sample",
"(",
"self",
",",
"bqm",
",",
"beta_range",
"=",
"None",
",",
"num_reads",
"=",
"10",
",",
"num_sweeps",
"=",
"1000",
")",
":",
"if",
"not",
"isinstance",
"(",
"num_reads",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"'samples' should be a positive integer\"",
")",
"if",
"num_reads",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"'samples' should be a positive integer\"",
")",
"h",
",",
"J",
",",
"offset",
"=",
"bqm",
".",
"to_ising",
"(",
")",
"samples",
"=",
"[",
"]",
"energies",
"=",
"[",
"]",
"for",
"__",
"in",
"range",
"(",
"num_reads",
")",
":",
"sample",
",",
"energy",
"=",
"ising_simulated_annealing",
"(",
"h",
",",
"J",
",",
"beta_range",
",",
"num_sweeps",
")",
"samples",
".",
"append",
"(",
"sample",
")",
"energies",
".",
"append",
"(",
"energy",
")",
"response",
"=",
"SampleSet",
".",
"from_samples",
"(",
"samples",
",",
"Vartype",
".",
"SPIN",
",",
"energies",
")",
"response",
".",
"change_vartype",
"(",
"bqm",
".",
"vartype",
",",
"offset",
",",
"inplace",
"=",
"True",
")",
"return",
"response"
] | python | Sample from low-energy spin states using simulated annealing.
Args:
bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
beta_range (tuple, optional): Beginning and end of the beta schedule
(beta is the inverse temperature) as a 2-tuple. The schedule is applied
linearly in beta. Default is chosen based on the total bias associated
with each node.
num_reads (int, optional, default=10):
Number of reads. Each sample is the result of a single run of
the simulated annealing algorithm.
num_sweeps (int, optional, default=1000):
Number of sweeps or steps.
Returns:
:obj:`.SampleSet`
Note:
This is a reference implementation, not optimized for speed
and therefore not an appropriate sampler for benchmarking. | false |
2,170,595 | def download(self):
"""
Request url and return his content
The Requested content will be cached into the default temp directory.
"""
if os.path.isfile(self.archive_path):
print("Use %r" % self.archive_path)
with open(self.archive_path, "rb") as f:
content = f.read()
else:
print("Request: %r..." % self.URL)
# Warning: HTTPS requests do not do any verification of the server's certificate.
f = urlopen(self.URL)
content = f.read()
with open(self.archive_path, "wb") as out_file:
out_file.write(content)
# Check SHA hash:
current_sha1 = hashlib.sha1(content).hexdigest()
assert current_sha1 == self.DOWNLOAD_SHA1, "Download sha1 value is wrong! SHA1 is: %r" % current_sha1
print("Download SHA1: %r, ok." % current_sha1) | [
"def",
"download",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"archive_path",
")",
":",
"print",
"(",
"\"Use %r\"",
"%",
"self",
".",
"archive_path",
")",
"with",
"open",
"(",
"self",
".",
"archive_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"else",
":",
"print",
"(",
"\"Request: %r...\"",
"%",
"self",
".",
"URL",
")",
"f",
"=",
"urlopen",
"(",
"self",
".",
"URL",
")",
"content",
"=",
"f",
".",
"read",
"(",
")",
"with",
"open",
"(",
"self",
".",
"archive_path",
",",
"\"wb\"",
")",
"as",
"out_file",
":",
"out_file",
".",
"write",
"(",
"content",
")",
"current_sha1",
"=",
"hashlib",
".",
"sha1",
"(",
"content",
")",
".",
"hexdigest",
"(",
")",
"assert",
"current_sha1",
"==",
"self",
".",
"DOWNLOAD_SHA1",
",",
"\"Download sha1 value is wrong! SHA1 is: %r\"",
"%",
"current_sha1",
"print",
"(",
"\"Download SHA1: %r, ok.\"",
"%",
"current_sha1",
")"
] | python | Request url and return his content
The Requested content will be cached into the default temp directory. | false |
2,579,891 | def from_ewif_file(path: str, password: str) -> SigningKeyType:
"""
Return SigningKey instance from Duniter EWIF file
:param path: Path to EWIF file
:param password: Password of the encrypted seed
"""
with open(path, 'r') as fh:
wif_content = fh.read()
# check data field
regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE)
match = search(regex, wif_content)
if not match:
raise Exception('Error: Bad format EWIF v1 file')
# capture ewif key
ewif_hex = match.groups()[0]
return SigningKey.from_ewif_hex(ewif_hex, password) | [
"def",
"from_ewif_file",
"(",
"path",
":",
"str",
",",
"password",
":",
"str",
")",
"->",
"SigningKeyType",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"fh",
":",
"wif_content",
"=",
"fh",
".",
"read",
"(",
")",
"regex",
"=",
"compile",
"(",
"'Data: ([1-9A-HJ-NP-Za-km-z]+)'",
",",
"MULTILINE",
")",
"match",
"=",
"search",
"(",
"regex",
",",
"wif_content",
")",
"if",
"not",
"match",
":",
"raise",
"Exception",
"(",
"'Error: Bad format EWIF v1 file'",
")",
"ewif_hex",
"=",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"return",
"SigningKey",
".",
"from_ewif_hex",
"(",
"ewif_hex",
",",
"password",
")"
] | python | Return SigningKey instance from Duniter EWIF file
:param path: Path to EWIF file
:param password: Password of the encrypted seed | false |
1,760,473 | def _setup_genome_annotations(g, args, ann_groups):
"""Configure genome annotations to install based on datatarget.
"""
available_anns = g.get("annotations", []) + g.pop("annotations_available", [])
anns = []
for orig_target in args.datatarget:
if orig_target in ann_groups:
targets = ann_groups[orig_target]
else:
targets = [orig_target]
for target in targets:
if target in available_anns:
anns.append(target)
g["annotations"] = anns
if "variation" not in args.datatarget and "validation" in g:
del g["validation"]
return g | [
"def",
"_setup_genome_annotations",
"(",
"g",
",",
"args",
",",
"ann_groups",
")",
":",
"available_anns",
"=",
"g",
".",
"get",
"(",
"\"annotations\"",
",",
"[",
"]",
")",
"+",
"g",
".",
"pop",
"(",
"\"annotations_available\"",
",",
"[",
"]",
")",
"anns",
"=",
"[",
"]",
"for",
"orig_target",
"in",
"args",
".",
"datatarget",
":",
"if",
"orig_target",
"in",
"ann_groups",
":",
"targets",
"=",
"ann_groups",
"[",
"orig_target",
"]",
"else",
":",
"targets",
"=",
"[",
"orig_target",
"]",
"for",
"target",
"in",
"targets",
":",
"if",
"target",
"in",
"available_anns",
":",
"anns",
".",
"append",
"(",
"target",
")",
"g",
"[",
"\"annotations\"",
"]",
"=",
"anns",
"if",
"\"variation\"",
"not",
"in",
"args",
".",
"datatarget",
"and",
"\"validation\"",
"in",
"g",
":",
"del",
"g",
"[",
"\"validation\"",
"]",
"return",
"g"
] | python | Configure genome annotations to install based on datatarget. | false |
2,075,411 | def pythonize(self, val):
"""Convert value into a address ip format::
* If value is a list, try to take the last element
* match ip address and port (if available)
:param val: value to convert
:type val:
:return: address/port corresponding to value
:rtype: dict
"""
val = unique_value(val)
matches = re.match(r"^([^:]*)(?::(\d+))?$", val)
if matches is None:
raise ValueError
addr = {'address': matches.group(1)}
if matches.group(2) is not None:
addr['port'] = int(matches.group(2))
return addr | [
"def",
"pythonize",
"(",
"self",
",",
"val",
")",
":",
"val",
"=",
"unique_value",
"(",
"val",
")",
"matches",
"=",
"re",
".",
"match",
"(",
"r\"^([^:]*)(?::(\\d+))?$\"",
",",
"val",
")",
"if",
"matches",
"is",
"None",
":",
"raise",
"ValueError",
"addr",
"=",
"{",
"'address'",
":",
"matches",
".",
"group",
"(",
"1",
")",
"}",
"if",
"matches",
".",
"group",
"(",
"2",
")",
"is",
"not",
"None",
":",
"addr",
"[",
"'port'",
"]",
"=",
"int",
"(",
"matches",
".",
"group",
"(",
"2",
")",
")",
"return",
"addr"
] | python | Convert value into a address ip format::
* If value is a list, try to take the last element
* match ip address and port (if available)
:param val: value to convert
:type val:
:return: address/port corresponding to value
:rtype: dict | false |
2,351,241 | def set_output_variables(self, write_ascii=True, write_binary=False, **kwargs):
"""Set the output configuration, minimising output as much as possible
There are a number of configuration parameters which control which variables
are written to file and in which format. Limiting the variables that are
written to file can greatly speed up the running of MAGICC. By default,
calling this function without specifying any variables will disable all output
by setting all of MAGICC's ``out_xx`` flags to ``0``.
This convenience function should not be confused with ``set_config`` or
``update_config`` which allow the user to set/update the configuration flags
directly, without the more convenient syntax and default behaviour provided by
this function.
Parameters
----------
write_ascii : bool
If true, MAGICC is configured to write output files as human readable ascii files.
write_binary : bool
If true, MAGICC is configured to write binary output files. These files are much faster
to process and write, but are not human readable.
**kwargs:
List of variables to write out. A list of possible options are as follows. This
may not be a complete list.
'emissions',
'gwpemissions',
'sum_gwpemissions',
'concentrations',
'carboncycle',
'forcing',
'surfaceforcing',
'permafrost',
'temperature',
'sealevel',
'parameters',
'misc',
'lifetimes',
'timeseriesmix',
'rcpdata',
'summaryidx',
'inverseemis',
'tempoceanlayers',
'oceanarea',
'heatuptake',
'warnings',
'precipinput',
'aogcmtuning',
'ccycletuning',
'observationaltuning',
'keydata_1',
'keydata_2'
"""
assert (
write_ascii or write_binary
), "write_binary and/or write_ascii must be configured"
if write_binary and write_ascii:
ascii_binary = "BOTH"
elif write_ascii:
ascii_binary = "ASCII"
else:
ascii_binary = "BINARY"
# defaults
outconfig = {
"out_emissions": 0,
"out_gwpemissions": 0,
"out_sum_gwpemissions": 0,
"out_concentrations": 0,
"out_carboncycle": 0,
"out_forcing": 0,
"out_surfaceforcing": 0,
"out_permafrost": 0,
"out_temperature": 0,
"out_sealevel": 0,
"out_parameters": 0,
"out_misc": 0,
"out_timeseriesmix": 0,
"out_rcpdata": 0,
"out_summaryidx": 0,
"out_inverseemis": 0,
"out_tempoceanlayers": 0,
"out_heatuptake": 0,
"out_ascii_binary": ascii_binary,
"out_warnings": 0,
"out_precipinput": 0,
"out_aogcmtuning": 0,
"out_ccycletuning": 0,
"out_observationaltuning": 0,
"out_keydata_1": 0,
"out_keydata_2": 0,
}
if self.version == 7:
outconfig["out_oceanarea"] = 0
outconfig["out_lifetimes"] = 0
for kw in kwargs:
val = 1 if kwargs[kw] else 0 # convert values to 0/1 instead of booleans
outconfig["out_" + kw.lower()] = val
self.update_config(**outconfig) | [
"def",
"set_output_variables",
"(",
"self",
",",
"write_ascii",
"=",
"True",
",",
"write_binary",
"=",
"False",
",",
"**",
"kwargs",
")",
":",
"assert",
"(",
"write_ascii",
"or",
"write_binary",
")",
",",
"\"write_binary and/or write_ascii must be configured\"",
"if",
"write_binary",
"and",
"write_ascii",
":",
"ascii_binary",
"=",
"\"BOTH\"",
"elif",
"write_ascii",
":",
"ascii_binary",
"=",
"\"ASCII\"",
"else",
":",
"ascii_binary",
"=",
"\"BINARY\"",
"outconfig",
"=",
"{",
"\"out_emissions\"",
":",
"0",
",",
"\"out_gwpemissions\"",
":",
"0",
",",
"\"out_sum_gwpemissions\"",
":",
"0",
",",
"\"out_concentrations\"",
":",
"0",
",",
"\"out_carboncycle\"",
":",
"0",
",",
"\"out_forcing\"",
":",
"0",
",",
"\"out_surfaceforcing\"",
":",
"0",
",",
"\"out_permafrost\"",
":",
"0",
",",
"\"out_temperature\"",
":",
"0",
",",
"\"out_sealevel\"",
":",
"0",
",",
"\"out_parameters\"",
":",
"0",
",",
"\"out_misc\"",
":",
"0",
",",
"\"out_timeseriesmix\"",
":",
"0",
",",
"\"out_rcpdata\"",
":",
"0",
",",
"\"out_summaryidx\"",
":",
"0",
",",
"\"out_inverseemis\"",
":",
"0",
",",
"\"out_tempoceanlayers\"",
":",
"0",
",",
"\"out_heatuptake\"",
":",
"0",
",",
"\"out_ascii_binary\"",
":",
"ascii_binary",
",",
"\"out_warnings\"",
":",
"0",
",",
"\"out_precipinput\"",
":",
"0",
",",
"\"out_aogcmtuning\"",
":",
"0",
",",
"\"out_ccycletuning\"",
":",
"0",
",",
"\"out_observationaltuning\"",
":",
"0",
",",
"\"out_keydata_1\"",
":",
"0",
",",
"\"out_keydata_2\"",
":",
"0",
",",
"}",
"if",
"self",
".",
"version",
"==",
"7",
":",
"outconfig",
"[",
"\"out_oceanarea\"",
"]",
"=",
"0",
"outconfig",
"[",
"\"out_lifetimes\"",
"]",
"=",
"0",
"for",
"kw",
"in",
"kwargs",
":",
"val",
"=",
"1",
"if",
"kwargs",
"[",
"kw",
"]",
"else",
"0",
"outconfig",
"[",
"\"out_\"",
"+",
"kw",
".",
"lower",
"(",
")",
"]",
"=",
"val",
"self",
".",
"update_config",
"(",
"**",
"outconfig",
")"
] | python | Set the output configuration, minimising output as much as possible
There are a number of configuration parameters which control which variables
are written to file and in which format. Limiting the variables that are
written to file can greatly speed up the running of MAGICC. By default,
calling this function without specifying any variables will disable all output
by setting all of MAGICC's ``out_xx`` flags to ``0``.
This convenience function should not be confused with ``set_config`` or
``update_config`` which allow the user to set/update the configuration flags
directly, without the more convenient syntax and default behaviour provided by
this function.
Parameters
----------
write_ascii : bool
If true, MAGICC is configured to write output files as human readable ascii files.
write_binary : bool
If true, MAGICC is configured to write binary output files. These files are much faster
to process and write, but are not human readable.
**kwargs:
List of variables to write out. A list of possible options are as follows. This
may not be a complete list.
'emissions',
'gwpemissions',
'sum_gwpemissions',
'concentrations',
'carboncycle',
'forcing',
'surfaceforcing',
'permafrost',
'temperature',
'sealevel',
'parameters',
'misc',
'lifetimes',
'timeseriesmix',
'rcpdata',
'summaryidx',
'inverseemis',
'tempoceanlayers',
'oceanarea',
'heatuptake',
'warnings',
'precipinput',
'aogcmtuning',
'ccycletuning',
'observationaltuning',
'keydata_1',
'keydata_2' | false |