text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_public_network_ip(ips, public_subnet):
"""
Given a public subnet, chose the one IP from the remote host that exists
within the subnet range.
"""
for ip in ips:
if net.ip_in_subnet(ip, public_subnet):
return ip
msg = "IPs (%s) are not valid for any of subnet specified %s" % (str(ips), str(public_subnet))
raise RuntimeError(msg) | 0.005208 |
def run(self):
"""
Writes data in JSON format into the task's output target.
The data objects have the following attributes:
* `_id` is the default Elasticsearch id field,
* `text`: the text,
* `date`: the day when the data was created.
"""
today = datetime.date.today()
with self.output().open('w') as output:
for i in range(5):
output.write(json.dumps({'_id': i, 'text': 'Hi %s' % i,
'date': str(today)}))
output.write('\n') | 0.003413 |
def encode_example(self, example_data):
"""See base class for details."""
np_dtype = np.dtype(self._dtype.as_numpy_dtype)
# Convert to numpy if possible
if not isinstance(example_data, np.ndarray):
example_data = np.array(example_data, dtype=np_dtype)
# Ensure the shape and dtype match
if example_data.dtype != np_dtype:
raise ValueError('Dtype {} do not match {}'.format(
example_data.dtype, np_dtype))
utils.assert_shape_match(example_data.shape, self._shape)
# For booleans, convert to integer (tf.train.Example does not support bool)
if example_data.dtype == np.bool_:
example_data = example_data.astype(int)
return example_data | 0.005706 |
def _interpolate_stream_track_kick(self):
"""Build interpolations of the stream track near the kick"""
if hasattr(self,'_kick_interpolatedThetasTrack'): #pragma: no cover
self._store_closest()
return None #Already did this
# Setup the trackpoints where the kick will be computed, covering the
# full length of the stream
self._kick_interpolatedThetasTrack= \
numpy.linspace(self._gap_thetasTrack[0],
self._gap_thetasTrack[-1],
self._nKickPoints)
TrackX= self._gap_ObsTrack[:,0]*numpy.cos(self._gap_ObsTrack[:,5])
TrackY= self._gap_ObsTrack[:,0]*numpy.sin(self._gap_ObsTrack[:,5])
TrackZ= self._gap_ObsTrack[:,3]
TrackvX, TrackvY, TrackvZ=\
bovy_coords.cyl_to_rect_vec(self._gap_ObsTrack[:,1],
self._gap_ObsTrack[:,2],
self._gap_ObsTrack[:,4],
self._gap_ObsTrack[:,5])
#Interpolate
self._kick_interpTrackX=\
interpolate.InterpolatedUnivariateSpline(self._gap_thetasTrack,
TrackX,k=3)
self._kick_interpTrackY=\
interpolate.InterpolatedUnivariateSpline(self._gap_thetasTrack,
TrackY,k=3)
self._kick_interpTrackZ=\
interpolate.InterpolatedUnivariateSpline(self._gap_thetasTrack,
TrackZ,k=3)
self._kick_interpTrackvX=\
interpolate.InterpolatedUnivariateSpline(self._gap_thetasTrack,
TrackvX,k=3)
self._kick_interpTrackvY=\
interpolate.InterpolatedUnivariateSpline(self._gap_thetasTrack,
TrackvY,k=3)
self._kick_interpTrackvZ=\
interpolate.InterpolatedUnivariateSpline(self._gap_thetasTrack,
TrackvZ,k=3)
#Now store an interpolated version of the stream track
self._kick_interpolatedObsTrackXY= numpy.empty((len(self._kick_interpolatedThetasTrack),6))
self._kick_interpolatedObsTrackXY[:,0]=\
self._kick_interpTrackX(self._kick_interpolatedThetasTrack)
self._kick_interpolatedObsTrackXY[:,1]=\
self._kick_interpTrackY(self._kick_interpolatedThetasTrack)
self._kick_interpolatedObsTrackXY[:,2]=\
self._kick_interpTrackZ(self._kick_interpolatedThetasTrack)
self._kick_interpolatedObsTrackXY[:,3]=\
self._kick_interpTrackvX(self._kick_interpolatedThetasTrack)
self._kick_interpolatedObsTrackXY[:,4]=\
self._kick_interpTrackvY(self._kick_interpolatedThetasTrack)
self._kick_interpolatedObsTrackXY[:,5]=\
self._kick_interpTrackvZ(self._kick_interpolatedThetasTrack)
#Also in cylindrical coordinates
self._kick_interpolatedObsTrack= \
numpy.empty((len(self._kick_interpolatedThetasTrack),6))
tR,tphi,tZ= bovy_coords.rect_to_cyl(self._kick_interpolatedObsTrackXY[:,0],
self._kick_interpolatedObsTrackXY[:,1],
self._kick_interpolatedObsTrackXY[:,2])
tvR,tvT,tvZ=\
bovy_coords.rect_to_cyl_vec(self._kick_interpolatedObsTrackXY[:,3],
self._kick_interpolatedObsTrackXY[:,4],
self._kick_interpolatedObsTrackXY[:,5],
tR,tphi,tZ,cyl=True)
self._kick_interpolatedObsTrack[:,0]= tR
self._kick_interpolatedObsTrack[:,1]= tvR
self._kick_interpolatedObsTrack[:,2]= tvT
self._kick_interpolatedObsTrack[:,3]= tZ
self._kick_interpolatedObsTrack[:,4]= tvZ
self._kick_interpolatedObsTrack[:,5]= tphi
self._store_closest()
return None | 0.020209 |
def get_recent_matches(self, card_type="micro_card"):
"""
Calling the Recent Matches API.
Arg:
card_type: optional, default to micro_card. Accepted values are
micro_card & summary_card.
Return:
json data
"""
recent_matches_url = self.api_path + "recent_matches/"
params = {}
params["card_type"] = card_type
response = self.get_response(recent_matches_url, params)
return response | 0.004049 |
def get_title(self, lang: str=None) -> Literal:
""" Get the title of the object
:param lang: Lang to retrieve
:return: Title string representation
:rtype: Literal
"""
return self.metadata.get_single(key=DC.title, lang=lang) or \
self.metadata.get_single(key=DCT.title, lang=lang) | 0.011765 |
def _disconnect_hanging_devices(self):
"""Periodic callback that checks for devices that haven't been used and disconnects them."""
now = monotonic()
for uuid, data in self._connections.items():
if (now - data['last_touch']) > self.client_timeout:
self._logger.info("Disconnect inactive client %s from device 0x%X", data['client'], uuid)
self._loop.add_callback(self._disconnect_from_device, uuid, data['key'], data['client'], unsolicited=True) | 0.009747 |
def restrict_chat_member(self, *args, **kwargs):
"""See :func:`restrict_chat_member`"""
return restrict_chat_member(*args, **self._merge_overrides(**kwargs)).run() | 0.01676 |
def get_variations(self):
"""Gets the OpenType font variations for the font options object.
See :meth:`set_variations` for details about the
string format.
:return: the font variations for the font options object. The
returned string belongs to the ``options`` and must not be modified.
It is valid until either the font options object is destroyed or the
font variations in this object is modified with :meth:`set_variations`.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
variations = cairo.cairo_font_options_get_variations(self._pointer)
if variations != ffi.NULL:
return ffi.string(variations).decode('utf8', 'replace') | 0.002703 |
def get_keyboard_mapping(conn):
"""
Return a keyboard mapping cookie that can be used to fetch the table of
keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
"""
mn, mx = get_min_max_keycode(conn)
return conn.core.GetKeyboardMapping(mn, mx - mn + 1) | 0.003215 |
def set_node_attributes(G, values, name=None):
"""Set node attributes from dictionary of nodes and values
Parameters
----------
G : DyNetx Graph
name : string
Attribute name
values: dict
Dictionary of attribute values keyed by node. If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every node in `G`.
"""
# Set node attributes based on type of `values`
if name is not None: # `values` must not be a dict of dict
try: # `values` is a dict
for n, v in values.items():
try:
G.node[n][name] = values[n]
except KeyError:
pass
except AttributeError: # `values` is a constant
for n in G:
G.node[n][name] = values
else: # `values` must be dict of dict
for n, d in values.items():
try:
G.node[n].update(d)
except KeyError:
pass | 0.00185 |
def request_token(self) -> None:
"""
Requests a new Client Credentials Flow authentication token from the Spotify API
and stores it in the `token` property of the object.
Raises:
requests.HTTPError: If an HTTP error occurred during the request.
"""
response: requests.Response = requests.post(
self._TOKEN_URL,
auth=HTTPBasicAuth(self._client_id, self._client_key),
data={"grant_type": self._GRANT_TYPE},
verify=True
)
response.raise_for_status()
self._token = response.json()
self._token_expires_at = time.time() + self._token["expires_in"] | 0.004399 |
def values(service, id, ranges):
"""Fetch and return spreadsheet cell values with Google sheets API."""
params = {'majorDimension': 'ROWS', 'valueRenderOption': 'UNFORMATTED_VALUE',
'dateTimeRenderOption': 'FORMATTED_STRING'}
params.update(spreadsheetId=id, ranges=ranges)
response = service.spreadsheets().values().batchGet(**params).execute()
return response['valueRanges'] | 0.00489 |
def _stop_scan(self):
"""Stop scanning for BLE devices
"""
try:
response = self._send_command(6, 4, [])
if response.payload[0] != 0:
# Error code 129 means we just were not currently scanning
if response.payload[0] != 129:
self._logger.error('Error stopping scan for devices, error=%d', response.payload[0])
return False, {'reason': "Could not stop scan for ble devices"}
except InternalTimeoutError:
return False, {'reason': "Timeout waiting for response"}
except DeviceNotConfiguredError:
return True, {'reason': "Device not connected (did you disconnect the dongle?"}
return True, None | 0.005298 |
def format_time_point(
time_point_string):
"""
:param str time_point_string: String representation of a time point
to format
:return: Formatted time point
:rtype: str
:raises ValueError: If *time_point_string* is not formatted by
dateutil.parser.parse
See :py:meth:`datetime.datetime.isoformat` function for supported formats.
"""
time_point = dateutil.parser.parse(time_point_string)
if not is_aware(time_point):
time_point = make_aware(time_point)
time_point = local_time_point(time_point)
return time_point.strftime("%Y-%m-%dT%H:%M:%S") | 0.001613 |
def _inject_target(self, target_adaptor):
"""Inject a target, respecting all sources of dependencies."""
target_cls = self._target_types[target_adaptor.type_alias]
declared_deps = target_adaptor.dependencies
implicit_deps = (Address.parse(s,
relative_to=target_adaptor.address.spec_path,
subproject_roots=self._address_mapper.subproject_roots)
for s in target_cls.compute_dependency_specs(kwargs=target_adaptor.kwargs()))
for dep in declared_deps:
self._dependent_address_map[dep].add(target_adaptor.address)
for dep in implicit_deps:
self._implicit_dependent_address_map[dep].add(target_adaptor.address) | 0.008197 |
def del_application(self, application, sync=True):
"""
delete application from this company
:param application: the subnet to be deleted from this company
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the application object on list to be removed on next save().
:return:
"""
LOGGER.debug("Company.del_application")
if not sync:
self.applications_2_rm.append(application)
else:
if application.id is None:
application.sync()
if self.id is not None and application.id is not None:
params = {
'id': self.id,
'applicationID': application.id
}
args = {'http_operation': 'GET', 'operation_path': 'update/applications/delete', 'parameters': params}
response = CompanyService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'Company.del_application - Problem while updating company ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.applications_ids.remove(application.id)
application.sync()
else:
LOGGER.warning(
'Company.del_application - Problem while updating company ' + self.name + '. Reason: application ' +
application.name + ' id is None or self.id is None'
) | 0.004106 |
def nextparent(self, parent, depth):
'''
Add lines to current line by traversing the grandparent object again
and once we reach our current line counting every line that is prefixed
with the parent directory.
'''
if depth > 1: # can't jump to parent of root node!
pdir = os.path.dirname(self.name)
line = 0
for c, d in parent.traverse():
if line > parent.curline and c.name.startswith(pdir):
parent.curline += 1
line += 1
else: # otherwise just skip to next directory
line = -1 # skip hidden parent node
for c, d in parent.traverse():
if line > parent.curline:
parent.curline += 1
if os.path.isdir(c.name) and c.name in parent.children[0:]:
break
line += 1 | 0.002167 |
def _mic_required(target_info):
"""
Checks the MsvAvFlags field of the supplied TargetInfo structure to determine in the MIC flags is set
:param target_info: The TargetInfo structure to check
:return: a boolean value indicating that the MIC flag is set
"""
if target_info is not None and target_info[TargetInfo.NTLMSSP_AV_FLAGS] is not None:
flags = struct.unpack('<I', target_info[TargetInfo.NTLMSSP_AV_FLAGS][1])[0]
return bool(flags & 0x00000002) | 0.00818 |
def __extract_model_summary_value(model, value):
"""
Extract a model summary field value
"""
field_value = None
if isinstance(value, _precomputed_field):
field_value = value.field
else:
field_value = model._get(value)
if isinstance(field_value, float):
try:
field_value = round(field_value, 4)
except:
pass
return field_value | 0.004843 |
def activities_list(self, since=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/activity_stream#list-activities"
api_path = "/api/v2/activities.json"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if since:
api_query.update({
"since": since,
})
return self.call(api_path, query=api_query, **kwargs) | 0.00625 |
def plugin(cls, name):
"""
Retrieves the plugin based on the inputted name.
:param name | <str>
:return <Plugin>
"""
cls.loadPlugins()
plugs = getattr(cls, '_%s__plugins' % cls.__name__, {})
return plugs.get(nstr(name)) | 0.012903 |
def arg_type_to_string(arg_type) -> str:
"""
Converts the argument type to a string
:param arg_type:
:return:
String representation of the argument type. Multiple return types are
turned into a comma delimited list of type names
"""
union_params = (
getattr(arg_type, '__union_params__', None) or
getattr(arg_type, '__args__', None)
)
if union_params and isinstance(union_params, (list, tuple)):
return ', '.join([arg_type_to_string(item) for item in union_params])
try:
return arg_type.__name__
except AttributeError:
return '{}'.format(arg_type) | 0.001546 |
def filter(filter_creator):
"""
Creates a decorator that can be used as a filter.
.. warning::
This is currently not compatible with most other decorators, if
you are using a decorator that isn't part of `hurler` you should
take caution.
"""
filter_func = [None]
def function_getter(function):
if isinstance(function, Filter):
function.add_filter(filter)
return function
else:
return Filter(
filter=filter_func[0],
callback=function,
)
def filter_decorator(*args, **kwargs):
filter_function = filter_creator(*args, **kwargs)
filter_func[0] = filter_function
return function_getter
return filter_decorator | 0.001271 |
def UNTL_to_encodedUNTL(subject):
"""Normalize a UNTL subject heading to be used in SOLR."""
subject = normalize_UNTL(subject)
subject = subject.replace(' ', '_')
subject = subject.replace('_-_', '/')
return subject | 0.004255 |
def should_compile_incrementally(self, vts, ctx):
"""Check to see if the compile should try to re-use the existing analysis.
Returns true if we should try to compile the target incrementally.
"""
if not vts.is_incremental:
return False
if not self._clear_invalid_analysis:
return True
return os.path.exists(ctx.analysis_file) | 0.008287 |
def check_network_role(self, public_key):
""" Check the public key of a node on the network to see if they are
permitted to participate. The roles being checked are the
following, from first to last:
"network"
"default"
The first role that is set will be the one used to enforce if the
node is allowed.
Args:
public_key (string): The public key belonging to a node on the
network
"""
state_root = self._current_root_func()
if state_root == INIT_ROOT_KEY:
LOGGER.debug("Chain head is not set yet. Permit all.")
return True
self._cache.update_view(state_root)
role = self._cache.get_role("network", state_root)
if role is None:
policy_name = "default"
else:
policy_name = role.policy_name
policy = self._cache.get_policy(policy_name, state_root)
if policy is not None:
if not self._allowed(public_key, policy):
LOGGER.debug("Node is not permitted: %s.", public_key)
return False
return True | 0.001667 |
def do_delete(self, args):
'''delete the entire contents of the current namespace'''
namespace = self.config['namespace']
if not args.assume_yes:
response = raw_input('Delete everything in {0!r}? Enter namespace: '
.format(namespace))
if response != namespace:
self.stdout.write('not deleting anything\n')
return
self.stdout.write('deleting namespace {0!r}\n'.format(namespace))
self.task_master.clear() | 0.005629 |
def addVariantAnnotationSet(self, variantAnnotationSet):
"""
Adds the specified variantAnnotationSet to this dataset.
"""
id_ = variantAnnotationSet.getId()
self._variantAnnotationSetIdMap[id_] = variantAnnotationSet
self._variantAnnotationSetIds.append(id_) | 0.006536 |
def GetCellValueNoFail (self, column, row = None):
""" get a cell, if it does not exist fail
note that column at row START AT 1 same as excel
"""
if row == None:
(row, column) = ParseCellSpec(column)
cell = GetCellValue(self, column, row)
if cell == None:
raise ValueError("cell %d:%d does not exist" % (column, row))
return cell | 0.02008 |
def exists(path):
"""Determine if a Path or string is an existing path on the file system."""
try:
return path.expanduser().absolute().exists()
except AttributeError:
return os.path.exists(os.path.abspath(os.path.expanduser(str(path)))) | 0.003788 |
def containsTie(self):
"""
Returns True if the underlying weighted majority graph contains a tie between any pair of
candidates and returns False otherwise.
"""
# If a value of 0 is present in the wmgMap, we assume that it represents a tie.
for cand in self.wmgMap.keys():
if 0 in self.wmgMap[cand].values():
return True
return False | 0.009569 |
def matrix(self):
"""Build matrix representation of Householder transformation.
Builds the matrix representation
:math:`H = I - \\beta vv^*`.
**Use with care!** This routine may be helpful for testing purposes but
should not be used in production codes for high dimensions since
the resulting matrix is dense.
"""
n = self.v.shape[0]
return numpy.eye(n, n) - self.beta * numpy.dot(self.v, self.v.T.conj()) | 0.004175 |
def ispercolating(am, inlets, outlets, mode='site'):
r"""
Determines if a percolating clusters exists in the network spanning
the given inlet and outlet sites
Parameters
----------
am : adjacency_matrix
The adjacency matrix with the ``data`` attribute indicating
if a bond is occupied or not
inlets : array_like
An array of indices indicating which sites are part of the inlets
outlets : array_like
An array of indices indicating which sites are part of the outlets
mode : string
Indicates which type of percolation to apply, either `'site'` or
`'bond'`
"""
if am.format is not 'coo':
am = am.to_coo()
ij = sp.vstack((am.col, am.row)).T
if mode.startswith('site'):
occupied_sites = sp.zeros(shape=am.shape[0], dtype=bool)
occupied_sites[ij[am.data].flatten()] = True
clusters = site_percolation(ij, occupied_sites)
elif mode.startswith('bond'):
occupied_bonds = am.data
clusters = bond_percolation(ij, occupied_bonds)
ins = sp.unique(clusters.sites[inlets])
if ins[0] == -1:
ins = ins[1:]
outs = sp.unique(clusters.sites[outlets])
if outs[0] == -1:
outs = outs[1:]
hits = sp.in1d(ins, outs)
return sp.any(hits) | 0.000765 |
def to_jd(year, month, day):
'''Determine Julian day count from Islamic date'''
return (day + ceil(29.5 * (month - 1)) + (year - 1) * 354 + trunc((3 + (11 * year)) / 30) + EPOCH) - 1 | 0.010526 |
def imread(path, grayscale=False, size=None, interpolate="bilinear",
channel_first=False, as_uint16=False, num_channels=-1, **kwargs):
"""
Read image from ``path``.
If you specify the ``size``, the output array is resized.
Default output shape is (height, width, channel) for RGB image and (height, width) for gray-scale image.
Args:
path (String or File Object): Input image path.
grayscale (bool): If True, the img is rescaled to gray-scale. Default is False.
size (tuple of int): Output shape. The order is (width, height). If None, the image is not resized. Default is None.
interpolate (str): Interpolation method.
This argument is depend on the backend.
If you want to specify this argument, you should pay much attention to which backend you use now.
What you can select is below:
- pil backend: ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"].
- cv2 backend: ["nearest", "bilinear", "bicubic", "lanczos"].
Default is "bilinear" for both backends.
channel_first (bool): If True, the shape of the output array is (channel, height, width) for RGB image. Default is False.
as_uint16 (bool): If True, this function tries to read img as np.uint16. Default is False.
num_channels (int): channel size of output array.
Default is -1 which preserves raw image shape.
Returns:
numpy.ndarray :
if as_uint16=True output dtype is np.uint16, else np.uint8 (default).
"""
return backend_manager.module.imread(path, grayscale=grayscale, size=size, interpolate=interpolate,
channel_first=channel_first, as_uint16=as_uint16, num_channels=num_channels,
**kwargs) | 0.005397 |
def script_args(f):
"""single decorator for adding script args"""
args = [
magic_arguments.argument(
'--out', type=str,
help="""The variable in which to store stdout from the script.
If the script is backgrounded, this will be the stdout *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--err', type=str,
help="""The variable in which to store stderr from the script.
If the script is backgrounded, this will be the stderr *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--bg', action="store_true",
help="""Whether to run the script in the background.
If given, the only way to see the output of the command is
with --out/err.
"""
),
magic_arguments.argument(
'--proc', type=str,
help="""The variable in which to store Popen instance.
This is used only when --bg option is given.
"""
),
]
for arg in args:
f = arg(f)
return f | 0.000832 |
def plot_fit(self, **kwargs):
""" Plots the fit of the model
Returns
----------
None (plots data and the fit)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
plt.figure(figsize=figsize)
date_index = self.index[max(self.p, self.q):]
t_params = self.transform_z()
sigma2, Y, ___ = self._model(self.latent_variables.get_z_values())
plt.plot(date_index, np.abs(Y-t_params[-1]), label=self.data_name + ' Absolute Demeaned Values')
plt.plot(date_index, np.power(sigma2,0.5), label='GARCH(' + str(self.p) + ',' + str(self.q) + ') Conditional Volatility',c='black')
plt.title(self.data_name + " Volatility Plot")
plt.legend(loc=2)
plt.show() | 0.010091 |
def indent(s, c=" ", n=4):
"""Indent the string 's' with the character 'c', 'n' times.
Parameters
----------
s : str
String to indent
c : str, default space
String to use as indentation
n : int, default 4
Number of chars to indent
"""
indentation = c * n
return "\n".join([indentation + l for l in s.splitlines()]) | 0.005305 |
def _get_commit(self):
"""
:return:
Commit object we point to, works for detached and non-detached
SymbolicReferences. The symbolic reference will be dereferenced recursively."""
obj = self._get_object()
if obj.type == 'tag':
obj = obj.object
# END dereference tag
if obj.type != Commit.type:
raise TypeError("Symbolic Reference pointed to object %r, commit was required" % obj)
# END handle type
return obj | 0.007692 |
def get_bandwidth(self, cache_level, read_streams, write_streams, threads_per_core, cores=None):
"""
Return best fitting bandwidth according to number of threads, read and write streams.
:param cache_level: integer of cache (0 is L1, 1 is L2 ...)
:param read_streams: number of read streams expected
:param write_streams: number of write streams expected
:param threads_per_core: number of threads that are run on each core
:param cores: if not given, will choose maximum bandwidth for single NUMA domain
"""
# try to find best fitting kernel (closest to read/write ratio):
# write allocate has to be handled in kernel information (all writes are also reads)
# TODO support for non-write-allocate architectures
try:
target_ratio = read_streams/write_streams
except ZeroDivisionError:
target_ratio = float('inf')
measurement_kernel = 'load'
measurement_kernel_info = self['benchmarks']['kernels'][measurement_kernel]
measurement_kernel_ratio = float('inf')
for kernel_name, kernel_info in sorted(self['benchmarks']['kernels'].items()):
try:
kernel_ratio = ((kernel_info['read streams']['streams'] +
kernel_info['write streams']['streams'] -
kernel_info['read+write streams']['streams']) /
kernel_info['write streams']['streams'])
except ZeroDivisionError:
kernel_ratio = float('inf')
if abs(kernel_ratio - target_ratio) < abs(measurement_kernel_ratio - target_ratio):
measurement_kernel = kernel_name
measurement_kernel_info = kernel_info
measurement_kernel_ratio = kernel_ratio
# choose smt, and then use max/saturation bw
bw_level = self['memory hierarchy'][cache_level]['level']
bw_measurements = \
self['benchmarks']['measurements'][bw_level][threads_per_core]
assert threads_per_core == bw_measurements['threads per core'], \
'malformed measurement dictionary in machine file.'
if cores is not None:
# Used by Roofline model
run_index = bw_measurements['cores'].index(cores)
bw = bw_measurements['results'][measurement_kernel][run_index]
else:
# Used by ECM model
# Choose maximum number of cores to get bandwidth for
max_cores = min(self['memory hierarchy'][cache_level]['cores per group'],
self['cores per NUMA domain'])
bw = max(bw_measurements['results'][measurement_kernel][:max_cores])
# Correct bandwidth due to miss-measurement of write allocation
# TODO support non-temporal stores and non-write-allocate architectures
if cache_level == 0:
# L1 does not have write-allocate, so everything is measured correctly
factor = 1.0
else:
factor = (float(measurement_kernel_info['read streams']['bytes']) +
2.0*float(measurement_kernel_info['write streams']['bytes']) -
float(measurement_kernel_info['read+write streams']['bytes'])) / \
(float(measurement_kernel_info['read streams']['bytes']) +
float(measurement_kernel_info['write streams']['bytes']))
bw = bw * factor
return bw, measurement_kernel | 0.00424 |
def _encrypt(self, data, recipients,
default_key=None,
passphrase=None,
armor=True,
encrypt=True,
symmetric=False,
always_trust=True,
output=None,
throw_keyids=False,
hidden_recipients=None,
cipher_algo='AES256',
digest_algo='SHA512',
compress_algo='ZLIB'):
"""Encrypt the message read from the file-like object **data**.
:param str data: The file or bytestream to encrypt.
:param str recipients: The recipients to encrypt to. Recipients must
be specified keyID/fingerprint.
.. warning:: Care should be taken in Python2 to make sure that the
given fingerprints for **recipients** are in fact strings
and not unicode objects.
:param str default_key: The keyID/fingerprint of the key to use for
signing. If given, **data** will be encrypted
*and* signed.
:param str passphrase: If given, and **default_key** is also given,
use this passphrase to unlock the secret
portion of the **default_key** to sign the
encrypted **data**. Otherwise, if
**default_key** is not given, but **symmetric**
is ``True``, then use this passphrase as the
passphrase for symmetric encryption. Signing
and symmetric encryption should *not* be
combined when sending the **data** to other
recipients, else the passphrase to the secret
key would be shared with them.
:param bool armor: If True, ascii armor the output; otherwise, the
output will be in binary format. (Default: True)
:param bool encrypt: If True, encrypt the **data** using the
**recipients** public keys. (Default: True)
:param bool symmetric: If True, encrypt the **data** to **recipients**
using a symmetric key. See the **passphrase**
parameter. Symmetric encryption and public key
encryption can be used simultaneously, and will
result in a ciphertext which is decryptable
with either the symmetric **passphrase** or one
of the corresponding private keys.
:param bool always_trust: If True, ignore trust warnings on
**recipients** keys. If False, display trust
warnings. (default: True)
:type output: str or file-like object
:param output: The output file to write to. If not specified, the
encrypted output is returned, and thus should be stored
as an object in Python. For example:
>>> import shutil
>>> import gnupg
>>> if os.path.exists("doctests"):
... shutil.rmtree("doctests")
>>> gpg = gnupg.GPG(homedir="doctests")
>>> key_settings = gpg.gen_key_input(key_type='RSA',
... key_length=1024,
... key_usage='ESCA',
... passphrase='foo')
>>> key = gpg.gen_key(key_settings)
>>> message = "The crow flies at midnight."
>>> encrypted = str(gpg.encrypt(message, key.fingerprint))
>>> assert encrypted != message
>>> assert not encrypted.isspace()
>>> decrypted = str(gpg.decrypt(encrypted, passphrase='foo'))
>>> assert not decrypted.isspace()
>>> decrypted
'The crow flies at midnight.'
:param bool throw_keyids: If True, make all **recipients** keyids be
zero'd out in packet information. This is the same as using
**hidden_recipients** for all **recipients**. (Default: False).
:param list hidden_recipients: A list of recipients that should have
their keyids zero'd out in packet information.
:param str cipher_algo: The cipher algorithm to use. To see available
algorithms with your version of GnuPG, do:
:command:`$ gpg --with-colons --list-config
ciphername`. The default **cipher_algo**, if
unspecified, is ``'AES256'``.
:param str digest_algo: The hash digest to use. Again, to see which
hashes your GnuPG is capable of using, do:
:command:`$ gpg --with-colons --list-config
digestname`. The default, if unspecified, is
``'SHA512'``.
:param str compress_algo: The compression algorithm to use. Can be one
of ``'ZLIB'``, ``'BZIP2'``, ``'ZIP'``, or
``'Uncompressed'``.
"""
args = []
## FIXME: GnuPG appears to ignore the --output directive when being
## programmatically driven. We'll handle the IO ourselves to fix this
## for now.
output_filename = None
if output:
if getattr(output, 'fileno', None) is not None:
## avoid overwrite confirmation message
if getattr(output, 'name', None) is not None:
output_filename = output.name
if os.path.exists(output.name):
os.remove(output.name)
#args.append('--output %s' % output.name)
else:
output_filename = output
if os.path.exists(output):
os.remove(output)
#args.append('--output %s' % output)
if armor: args.append('--armor')
if always_trust: args.append('--always-trust')
if cipher_algo: args.append('--cipher-algo %s' % cipher_algo)
if compress_algo: args.append('--compress-algo %s' % compress_algo)
if default_key:
args.append('--sign')
args.append('--default-key %s' % default_key)
if digest_algo:
args.append('--digest-algo %s' % digest_algo)
## both can be used at the same time for an encrypted file which
## is decryptable with a passphrase or secretkey.
if symmetric: args.append('--symmetric')
if encrypt: args.append('--encrypt')
if throw_keyids: args.append('--throw-keyids')
if len(recipients) >= 1:
log.debug("GPG.encrypt() called for recipients '%s' with type '%s'"
% (recipients, type(recipients)))
if isinstance(recipients, (list, tuple)):
for recp in recipients:
if not _util._py3k:
if isinstance(recp, unicode):
try:
assert _parsers._is_hex(str(recp))
except AssertionError:
log.info("Can't accept recipient string: %s"
% recp)
else:
self._add_recipient_string(args, hidden_recipients, str(recp))
continue
## will give unicode in 2.x as '\uXXXX\uXXXX'
if isinstance(hidden_recipients, (list, tuple)):
if [s for s in hidden_recipients if recp in str(s)]:
args.append('--hidden-recipient %r' % recp)
else:
args.append('--recipient %r' % recp)
else:
args.append('--recipient %r' % recp)
continue
if isinstance(recp, str):
self._add_recipient_string(args, hidden_recipients, recp)
elif (not _util._py3k) and isinstance(recp, basestring):
for recp in recipients.split('\x20'):
self._add_recipient_string(args, hidden_recipients, recp)
elif _util._py3k and isinstance(recp, str):
for recp in recipients.split(' '):
self._add_recipient_string(args, hidden_recipients, recp)
## ...and now that we've proven py3k is better...
else:
log.debug("Don't know what to do with recipients: %r"
% recipients)
result = self._result_map['crypt'](self)
log.debug("Got data '%s' with type '%s'." % (data, type(data)))
self._handle_io(args, data, result, passphrase=passphrase, binary=True)
# Avoid writing raw encrypted bytes to terminal loggers and breaking
# them in that adorable way where they spew hieroglyphics until reset:
if armor:
log.debug("\n%s" % result.data)
if output_filename:
log.info("Writing encrypted output to file: %s" % output_filename)
with open(output_filename, 'wb') as fh:
fh.write(result.data)
fh.flush()
log.info("Encrypted output written successfully.")
return result | 0.003477 |
def _all_params(arr):
"""
Ensures that the argument is a list that either is empty or contains only GPParamSpec's
:param arr: list
:return:
"""
if not isinstance([], list):
raise TypeError("non-list value found for parameters")
return all(isinstance(x, GPParamSpec) for x in arr) | 0.008646 |
def execute(self, raise_on_error=True):
"Execute all the commands in the current pipeline"
stack = self.command_stack
if not stack:
return []
execute = self._execute_pipeline
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection('batch')
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
try:
return execute(conn, stack, raise_on_error)
except ConnectionError:
conn.disconnect()
return execute(conn, stack, raise_on_error)
finally:
self.reset() | 0.002766 |
def get_response_code(url, timeout=10):
'''
Visit the URL and return the HTTP response code in 'int'
'''
try:
req = urllib2.urlopen(url, timeout=timeout)
except HTTPError, e:
return e.getcode()
except Exception, _:
fail("Couldn't reach the URL '%s'" % url)
else:
return req.getcode() | 0.005764 |
def value(self):
"""
A tuple of values. This attribute can be set with any iterable; the
iterable is then evaluated into a tuple and stored at the bound field.
Whenever values are written to this attribute, they are passed through
the :meth:`~.AbstractCDataType.coerce` method of the
:attr:`~.AbstractField.type_` of the field. To revert the
:attr:`value` to its default, use the ``del`` operator.
"""
try:
return self._value
except AttributeError:
self.value = self._field.default()
return self._value | 0.003231 |
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
value = getattr(object, key)
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results | 0.002532 |
def matrix_and_line_shell(figsize=(5, 12), strip=False):
"""
Helper function to construct an empty figure that has space for a matrix,
a summary line plot directly below it, a colorbar axis, and an optional
"strip" axis that parallels the matrix (and shares its y-axis) where data
can be added to create callbacks.
Returns a tuple of (fig, matrix_ax, line_ax, strip_ax, colorbar_ax) that
can then be used to plot upon.
:param figsize: Tuple of (width, height), in inches, for the figure to be
:param strip: If `strip` is False, then the returned `strip_ax` will be
None and no strip axes will be created.
"""
fig = plt.figure(figsize=figsize)
# Constants to keep track
if strip:
STRIP_COLS = 1
else:
STRIP_COLS = 0
ROWS = 4
COLS = 8 + STRIP_COLS
MAT_COLS = 7
MAT_ROWS = 3
LINE_ROWS = ROWS - MAT_ROWS
mat_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, STRIP_COLS),
rowspan=MAT_ROWS,
colspan=MAT_COLS,
)
line_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(MAT_ROWS, STRIP_COLS),
rowspan=LINE_ROWS,
colspan=MAT_COLS,
sharex=mat_ax)
if strip:
strip_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, 0),
rowspan=MAT_ROWS,
colspan=STRIP_COLS,
sharey=mat_ax,
)
else:
strip_ax = None
cax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(ROWS - MAT_ROWS, MAT_COLS + STRIP_COLS),
rowspan=1,
colspan=1,
)
fig.subplots_adjust(hspace=0.1, wspace=0.2, right=0.88, left=0.23)
return fig, mat_ax, line_ax, strip_ax, cax | 0.000576 |
def prime_gen() -> int:
# credit to David Eppstein, Wolfgang Beneicke, Paul Hofstra
"""
A generator for prime numbers starting from 2.
"""
D = {}
yield 2
for q in itertools.islice(itertools.count(3), 0, None, 2):
p = D.pop(q, None)
if p is None:
D[q * q] = 2 * q
yield q
else:
x = p + q
while x in D:
x += p
D[x] = p | 0.002252 |
def get_language_pack(locale):
"""Get/cache a language pack
Returns the langugage pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
"""
pack = ALL_LANGUAGE_PACKS.get(locale)
if not pack:
filename = DIR + '/{}/LC_MESSAGES/messages.json'.format(locale)
try:
with open(filename) as f:
pack = json.load(f)
ALL_LANGUAGE_PACKS[locale] = pack
except Exception:
# Assuming english, client side falls back on english
pass
return pack | 0.001642 |
def retrieveVals(self):
"""Retrieve values for graphs."""
if self._stats is None:
serverInfo = MemcachedInfo(self._host, self._port, self._socket_file)
stats = serverInfo.getStats()
else:
stats = self._stats
if stats is None:
raise Exception("Undetermined error accesing stats.")
stats['set_hits'] = stats.get('total_items')
if stats.has_key('cmd_set') and stats.has_key('total_items'):
stats['set_misses'] = stats['cmd_set'] - stats['total_items']
self.saveState(stats)
if self.hasGraph('memcached_connections'):
self.setGraphVal('memcached_connections', 'conn',
stats.get('curr_connections'))
if self.hasGraph('memcached_items'):
self.setGraphVal('memcached_items', 'items',
stats.get('curr_items'))
if self.hasGraph('memcached_memory'):
self.setGraphVal('memcached_memory', 'bytes',
stats.get('bytes'))
if self.hasGraph('memcached_connrate'):
self.setGraphVal('memcached_connrate', 'conn',
stats.get('total_connections'))
if self.hasGraph('memcached_traffic'):
self.setGraphVal('memcached_traffic', 'rxbytes',
stats.get('bytes_read'))
self.setGraphVal('memcached_traffic', 'txbytes',
stats.get('bytes_written'))
if self.hasGraph('memcached_reqrate'):
self.setGraphVal('memcached_reqrate', 'set',
stats.get('cmd_set'))
self.setGraphVal('memcached_reqrate', 'get',
stats.get('cmd_get'))
if self.graphHasField('memcached_reqrate', 'del'):
self.setGraphVal('memcached_reqrate', 'del',
safe_sum([stats.get('delete_hits'),
stats.get('delete_misses')]))
if self.graphHasField('memcached_reqrate', 'cas'):
self.setGraphVal('memcached_reqrate', 'cas',
safe_sum([stats.get('cas_hits'),
stats.get('cas_misses'),
stats.get('cas_badval')]))
if self.graphHasField('memcached_reqrate', 'incr'):
self.setGraphVal('memcached_reqrate', 'incr',
safe_sum([stats.get('incr_hits'),
stats.get('incr_misses')]))
if self.graphHasField('memcached_reqrate', 'decr'):
self.setGraphVal('memcached_reqrate', 'decr',
safe_sum([stats.get('decr_hits'),
stats.get('decr_misses')]))
if self.hasGraph('memcached_statget'):
self.setGraphVal('memcached_statget', 'hit',
stats.get('get_hits'))
self.setGraphVal('memcached_statget', 'miss',
stats.get('get_misses'))
self.setGraphVal('memcached_statget', 'total',
safe_sum([stats.get('get_hits'),
stats.get('get_misses')]))
if self.hasGraph('memcached_statset'):
self.setGraphVal('memcached_statset', 'hit',
stats.get('set_hits'))
self.setGraphVal('memcached_statset', 'miss',
stats.get('set_misses'))
self.setGraphVal('memcached_statset', 'total',
safe_sum([stats.get('set_hits'),
stats.get('set_misses')]))
if self.hasGraph('memcached_statdel'):
self.setGraphVal('memcached_statdel', 'hit',
stats.get('delete_hits'))
self.setGraphVal('memcached_statdel', 'miss',
stats.get('delete_misses'))
self.setGraphVal('memcached_statdel', 'total',
safe_sum([stats.get('delete_hits'),
stats.get('delete_misses')]))
if self.hasGraph('memcached_statcas'):
self.setGraphVal('memcached_statcas', 'hit',
stats.get('cas_hits'))
self.setGraphVal('memcached_statcas', 'miss',
stats.get('cas_misses'))
self.setGraphVal('memcached_statcas', 'badval',
stats.get('cas_badval'))
self.setGraphVal('memcached_statcas', 'total',
safe_sum([stats.get('cas_hits'),
stats.get('cas_misses'),
stats.get('cas_badval')]))
if self.hasGraph('memcached_statincrdecr'):
self.setGraphVal('memcached_statincrdecr', 'incr_hit',
stats.get('incr_hits'))
self.setGraphVal('memcached_statincrdecr', 'decr_hit',
stats.get('decr_hits'))
self.setGraphVal('memcached_statincrdecr', 'incr_miss',
stats.get('incr_misses'))
self.setGraphVal('memcached_statincrdecr', 'decr_miss',
stats.get('decr_misses'))
self.setGraphVal('memcached_statincrdecr', 'total',
safe_sum([stats.get('incr_hits'),
stats.get('decr_hits'),
stats.get('incr_misses'),
stats.get('decr_misses')]))
if self.hasGraph('memcached_statevict'):
self.setGraphVal('memcached_statevict', 'evict',
stats.get('evictions'))
if self.graphHasField('memcached_statevict', 'reclaim'):
self.setGraphVal('memcached_statevict', 'reclaim',
stats.get('reclaimed'))
if self.hasGraph('memcached_statauth'):
self.setGraphVal('memcached_statauth', 'reqs',
stats.get('auth_cmds'))
self.setGraphVal('memcached_statauth', 'errors',
stats.get('auth_errors'))
if self.hasGraph('memcached_hitpct'):
prev_stats = self._prev_stats
for (field_name, field_hits, field_misses) in (
('set', 'set_hits', 'set_misses'),
('get', 'get_hits', 'get_misses'),
('del', 'delete_hits', 'delete_misses'),
('cas', 'cas_hits', 'cas_misses'),
('incr', 'incr_hits', 'incr_misses'),
('decr', 'decr_hits', 'decr_misses')
):
if prev_stats:
if (stats.has_key(field_hits)
and prev_stats.has_key(field_hits)
and stats.has_key(field_misses)
and prev_stats.has_key(field_misses)):
hits = stats[field_hits] - prev_stats[field_hits]
misses = stats[field_misses] - prev_stats[field_misses]
total = hits + misses
if total > 0:
val = 100.0 * hits / total
else:
val = 0
self.setGraphVal('memcached_hitpct', field_name,
round(val, 2)) | 0.006601 |
def fix_bam_header(job, bamfile, sample_type, univ_options):
"""
This module modified the header in BAMFILE
ARGUMENTS
1. bamfile: <JSid for a bam file>
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
RETURN VALUES
1. output_files: REFER output_files in run_bwa()
"""
job.fileStore.logToMaster('Running reheader on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'aligned.bam': bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['view',
'-H',
input_files['aligned.bam']]
with open('/'.join([work_dir, 'aligned_bam.header']), 'w') as headerfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=headerfile)
with open(headerfile.name, 'r') as headerfile, \
open('/'.join([work_dir, 'output_bam.header']), 'w') as outheaderfile:
for line in headerfile:
if line.startswith('@PG'):
line = '\t'.join([x for x in line.strip().split('\t') if not x.startswith('CL')])
print(line.strip(), file=outheaderfile)
parameters = ['reheader',
docker_path(outheaderfile.name),
input_files['aligned.bam']]
with open('/'.join([work_dir, 'aligned_fixPG.bam']), 'w') as fixpg_bamfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=fixpg_bamfile)
output_file = job.fileStore.writeGlobalFile(fixpg_bamfile.name)
job.fileStore.deleteGlobalFile(bamfile)
add_rg = job.wrapJobFn(add_readgroups, output_file, sample_type, univ_options, disk='60G')
job.addChild(add_rg)
return add_rg.rv() | 0.003897 |
def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
if isinstance(country_codes, string_types):
raise TypeError(
"Got string {!r} instead of an iterable of strings in "
"AssetFinder.lifetimes.".format(country_codes),
)
# normalize to a cache-key so that we can memoize results.
country_codes = frozenset(country_codes)
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = (
self._compute_asset_lifetimes(country_codes)
)
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid) | 0.000888 |
def get_empty_results(self):
"""
Because the base result type is different depending on the return structure
(e.g. list for flat, dict for object), `get_result_type` initials the
`results` variable to the proper type
"""
assert self.result_type is not None, (
'{} must specify a `result_type` value or overwrite the '
'`get_empty_result` method.'.format(self.__class__.__name__)
)
return self.result_type() | 0.006061 |
def get_polygon(self, polygon_id):
"""
Retrieves a named polygon registered on the Agro API.
:param id: the ID of the polygon
:type id: str
:returns: a `pyowm.agro10.polygon.Polygon` object
"""
status, data = self.http_client.get_json(
NAMED_POLYGON_URI % str(polygon_id),
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return Polygon.from_dict(data) | 0.004141 |
def pprint_path(path):
"""
print information of a pathlib / os.DirEntry() instance with all "is_*" functions.
"""
print("\n*** %s" % path)
for attrname in sorted(dir(path)):
if attrname.startswith("is_"):
value = getattr(path, attrname)
print("%20s: %s" % (attrname, value))
print() | 0.005917 |
def _chunks(self, iterable, chunk_size):
"""Chunks data into chunk with size<=chunk_size."""
iterator = iter(iterable)
chunk = list(itertools.islice(iterator, 0, chunk_size))
while chunk:
yield chunk
chunk = list(itertools.islice(iterator, 0, chunk_size)) | 0.006431 |
def blank_stim(self,type=None,fill=0):
'''Makes a blank version of stim. If a type is not given, returned as same type as current stim.
If a column stim, will fill in blanks with ``fill``'''
blank = copy.copy(self)
blank.name = 'Blank'
if type==None:
type = self.type()
if type=="column":
num_reps = self.reps
if num_reps==None:
if self.type()=="column":
self.read_file()
num_reps = len(self.column)
else:
nl.notify('Error: requested to return a blank column, but I can\'t figure out how many reps to make it!',level=nl.level.error)
blank.column = [fill]*num_reps
return blank
if type=="times":
blank.times = []
return blank | 0.016355 |
def send_many(kwargs_list):
"""
Similar to mail.send(), but this function accepts a list of kwargs.
Internally, it uses Django's bulk_create command for efficiency reasons.
Currently send_many() can't be used to send emails with priority = 'now'.
"""
emails = []
for kwargs in kwargs_list:
emails.append(send(commit=False, **kwargs))
Email.objects.bulk_create(emails) | 0.002457 |
def setEmissionClass(self, typeID, clazz):
"""setEmissionClass(string, string) -> None
Sets the emission class of vehicles of this type.
"""
self._connection._sendStringCmd(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_EMISSIONCLASS, typeID, clazz) | 0.010417 |
def drag_and_drop(self, source, target):
"""
Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- target: The element to mouse up.
"""
self.click_and_hold(source)
self.release(target)
return self | 0.005076 |
def dump_frames(self, frame):
""" dumps frames chain in a representation suitable for serialization
and remote (debugger) client usage.
"""
current_thread = threading.currentThread()
frames = []
frame_browser = frame
# Browse the frame chain as far as we can
_logger.f_debug("dump_frames(), frame analysis:")
spacer = ""
while hasattr(frame_browser, 'f_back') and frame_browser.f_back != self.frame_beginning:
spacer += "="
_logger.f_debug("%s>frame = %s, frame.f_code = %s, frame.f_back = %s, "
"self.frame_beginning = %s",
spacer,
hex(id(frame_browser)),
frame_browser.f_code,
hex(id(frame_browser.f_back)),
hex(id(self.frame_beginning)))
# At root frame, globals == locals so we dump only globals
if hasattr(frame_browser.f_back, 'f_back')\
and frame_browser.f_back.f_back != self.frame_beginning:
locals_vars_list = self.extract_object_properties(frame_browser.f_locals,
limit_size=True)
else:
locals_vars_list = []
globals_vars_list = self.extract_object_properties(frame_browser.f_globals,
limit_size=True)
# normalize path sent to debugging client
file_path = self.normalize_path_out(frame_browser.f_code.co_filename)
frame_name = "%s() [%s]" % (frame_browser.f_code.co_name, current_thread.name,)
remote_frame = {
'id': id(frame_browser),
'name': frame_name,
'line_number': frame_browser.f_lineno, # Warning 1 based
'file_path': file_path,
'f_locals': locals_vars_list + globals_vars_list,
'thread': current_thread.ident,
'thread_name': current_thread.name
}
frames.append(remote_frame)
frame_browser = frame_browser.f_back
return frames | 0.00522 |
def energy_prolongation_smoother(A, T, Atilde, B, Bf, Cpt_params,
krylov='cg', maxiter=4, tol=1e-8,
degree=1, weighting='local',
prefilter={}, postfilter={}):
"""Minimize the energy of the coarse basis functions (columns of T).
Both root-node and non-root-node style prolongation smoothing is available,
see Cpt_params description below.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
T : bsr_matrix
Tentative prolongator, a NxM sparse matrix (M < N)
Atilde : csr_matrix
Strength of connection matrix
B : array
Near-nullspace modes for coarse grid. Has shape (M,k) where
k is the number of coarse candidate vectors.
Bf : array
Near-nullspace modes for fine grid. Has shape (N,k) where
k is the number of coarse candidate vectors.
Cpt_params : tuple
Tuple of the form (bool, dict). If the Cpt_params[0] = False, then the
standard SA prolongation smoothing is carried out. If True, then
root-node style prolongation smoothing is carried out. The dict must
be a dictionary of parameters containing, (1) for P_I, P_I.T is the
injection matrix for the Cpts, (2) I_F is an identity matrix for only the
F-points (i.e. I, but with zero rows and columns for C-points) and I_C is
the C-point analogue to I_F. See Notes below for more information.
krylov : string
'cg' for SPD systems. Solve A T = 0 in a constraint space with CG
'cgnr' for nonsymmetric and/or indefinite systems.
Solve A T = 0 in a constraint space with CGNR
'gmres' for nonsymmetric and/or indefinite systems.
Solve A T = 0 in a constraint space with GMRES
maxiter : integer
Number of energy minimization steps to apply to the prolongator
tol : scalar
Minimization tolerance
degree : int
Generate sparsity pattern for P based on (Atilde^degree T)
weighting : string
'block', 'diagonal' or 'local' construction of the diagonal preconditioning
'local' Uses a local row-wise weight based on the Gershgorin estimate.
Avoids any potential under-damping due to inaccurate spectral
radius estimates.
'block' Uses a block diagonal inverse of A if A is BSR.
'diagonal' Uses the inverse of the diagonal of A
prefilter : dictionary
Filter elements by row in sparsity pattern for P to reduce operator and
setup complexity. If None or an empty dictionary, then no dropping in P
is done. If postfilter has key 'k', then the largest 'k' entries are
kept in each row. If postfilter has key 'theta', all entries such that
:math:`P[i,j] < kwargs['theta']*max(abs(P[i,:]))`
are dropped. If postfilter['k'] and postfiler['theta'] are present,
then they are used with the union of their patterns.
postfilter : dictionary
Filters elements by row in smoothed P to reduce operator complexity.
Only supported if using the rootnode_solver. If None or an empty
dictionary, no dropping in P is done. If postfilter has key 'k',
then the largest 'k' entries are kept in each row. If postfilter
has key 'theta', all entries such that
:math::`P[i,j] < kwargs['theta']*max(abs(P[i,:]))`
are dropped. If postfilter['k'] and postfiler['theta'] are present,
then they are used with the union of their patterns.
Returns
-------
T : bsr_matrix
Smoothed prolongator
Notes
-----
Only 'diagonal' weighting is supported for the CGNR method, because
we are working with A^* A and not A.
When Cpt_params[0] == True, root-node style prolongation smoothing is used
to minimize the energy of columns of T. Essentially, an identity block is
maintained in T, corresponding to injection from the coarse-grid to the
fine-grid root-nodes. See [2011OlScTu]_ for more details, and see
util.utils.get_Cpt_params for the helper function to generate Cpt_params.
If Cpt_params[0] == False, the energy of columns of T are still
minimized, but without maintaining the identity block.
See [1999cMaBrVa]_ for more details on smoothed aggregation.
Examples
--------
>>> from pyamg.aggregation import energy_prolongation_smoother
>>> from pyamg.gallery import poisson
>>> from scipy.sparse import coo_matrix
>>> import numpy as np
>>> data = np.ones((6,))
>>> row = np.arange(0,6)
>>> col = np.kron([0,1],np.ones((3,)))
>>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
>>> print T.todense()
[[ 1. 0.]
[ 1. 0.]
[ 1. 0.]
[ 0. 1.]
[ 0. 1.]
[ 0. 1.]]
>>> A = poisson((6,),format='csr')
>>> B = np.ones((2,1),dtype=float)
>>> P = energy_prolongation_smoother(A,T,A,B, None, (False,{}))
>>> print P.todense()
[[ 1. 0. ]
[ 1. 0. ]
[ 0.66666667 0.33333333]
[ 0.33333333 0.66666667]
[ 0. 1. ]
[ 0. 1. ]]
References
----------
.. [1999cMaBrVa] Jan Mandel, Marian Brezina, and Petr Vanek
"Energy Optimization of Algebraic Multigrid Bases"
Computing 62, 205-228, 1999
http://dx.doi.org/10.1007/s006070050022
.. [2011OlScTu] Olson, L. and Schroder, J. and Tuminaro, R.,
"A general interpolation strategy for algebraic
multigrid using energy minimization", SIAM Journal
on Scientific Computing (SISC), vol. 33, pp.
966--991, 2011.
"""
# Test Inputs
if maxiter < 0:
raise ValueError('maxiter must be > 0')
if tol > 1:
raise ValueError('tol must be <= 1')
if sparse.isspmatrix_csr(A):
A = A.tobsr(blocksize=(1, 1), copy=False)
elif sparse.isspmatrix_bsr(A):
pass
else:
raise TypeError("A must be csr_matrix or bsr_matrix")
if sparse.isspmatrix_csr(T):
T = T.tobsr(blocksize=(1, 1), copy=False)
elif sparse.isspmatrix_bsr(T):
pass
else:
raise TypeError("T must be csr_matrix or bsr_matrix")
if T.blocksize[0] != A.blocksize[0]:
raise ValueError("T row-blocksize should be the same as A blocksize")
if B.shape[0] != T.shape[1]:
raise ValueError("B is the candidates for the coarse grid. \
num_rows(b) = num_cols(T)")
if min(T.nnz, A.nnz) == 0:
return T
if not sparse.isspmatrix_csr(Atilde):
raise TypeError("Atilde must be csr_matrix")
if ('theta' in prefilter) and (prefilter['theta'] == 0):
prefilter.pop('theta', None)
if ('theta' in postfilter) and (postfilter['theta'] == 0):
postfilter.pop('theta', None)
# Prepocess Atilde, the strength matrix
if Atilde is None:
Atilde = sparse.csr_matrix((np.ones(len(A.indices)),
A.indices.copy(), A.indptr.copy()),
shape=(A.shape[0]/A.blocksize[0],
A.shape[1]/A.blocksize[1]))
# If Atilde has no nonzeros, then return T
if min(T.nnz, A.nnz) == 0:
return T
# Expand allowed sparsity pattern for P through multiplication by Atilde
if degree > 0:
# Construct Sparsity_Pattern by multiplying with Atilde
T.sort_indices()
shape = (int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]))
Sparsity_Pattern = sparse.csr_matrix((np.ones(T.indices.shape),
T.indices, T.indptr),
shape=shape)
AtildeCopy = Atilde.copy()
for i in range(degree):
Sparsity_Pattern = AtildeCopy*Sparsity_Pattern
# Optional filtering of sparsity pattern before smoothing
if 'theta' in prefilter and 'k' in prefilter:
Sparsity_theta = filter_matrix_rows(Sparsity_Pattern,
prefilter['theta'])
Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'])
# Union two sparsity patterns
Sparsity_Pattern += Sparsity_theta
elif 'k' in prefilter:
Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'])
elif 'theta' in prefilter:
Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern,
prefilter['theta'])
elif len(prefilter) > 0:
raise ValueError("Unrecognized prefilter option")
# UnAmal returns a BSR matrix with 1's in the nonzero locations
Sparsity_Pattern = UnAmal(Sparsity_Pattern,
T.blocksize[0], T.blocksize[1])
Sparsity_Pattern.sort_indices()
else:
# If degree is 0, just copy T for the sparsity pattern
Sparsity_Pattern = T.copy()
if 'theta' in prefilter and 'k' in prefilter:
Sparsity_theta = filter_matrix_rows(Sparsity_Pattern,
prefilter['theta'])
Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'])
# Union two sparsity patterns
Sparsity_Pattern += Sparsity_theta
elif 'k' in prefilter:
Sparsity_Pattern = truncate_rows(Sparsity_Pattern, prefilter['k'])
elif 'theta' in prefilter:
Sparsity_Pattern = filter_matrix_rows(Sparsity_Pattern,
prefilter['theta'])
elif len(prefilter) > 0:
raise ValueError("Unrecognized prefilter option")
Sparsity_Pattern.data[:] = 1.0
Sparsity_Pattern.sort_indices()
# If using root nodes, enforce identity at C-points
if Cpt_params[0]:
Sparsity_Pattern = Cpt_params[1]['I_F'] * Sparsity_Pattern
Sparsity_Pattern = Cpt_params[1]['P_I'] + Sparsity_Pattern
# Construct array of inv(Bi'Bi), where Bi is B restricted to row i's
# sparsity pattern in Sparsity Pattern. This array is used multiple times
# in Satisfy_Constraints(...).
BtBinv = compute_BtBinv(B, Sparsity_Pattern)
# If using root nodes and B has more columns that A's blocksize, then
# T must be updated so that T*B = Bfine. Note, if this is a 'secondpass'
# after dropping entries in P, then we must re-enforce the constraints
if ((Cpt_params[0] and (B.shape[1] > A.blocksize[0])) or
('secondpass' in postfilter)):
T = filter_operator(T, Sparsity_Pattern, B, Bf, BtBinv)
# Ensure identity at C-pts
if Cpt_params[0]:
T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']
# Iteratively minimize the energy of T subject to the constraints of
# Sparsity_Pattern and maintaining T's effect on B, i.e. T*B =
# (T+Update)*B, i.e. Update*B = 0
if krylov == 'cg':
T = cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
maxiter, tol, weighting, Cpt_params)
elif krylov == 'cgnr':
T = cgnr_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
maxiter, tol, weighting, Cpt_params)
elif krylov == 'gmres':
T = gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern,
maxiter, tol, weighting, Cpt_params)
T.eliminate_zeros()
# Filter entries in P, only in the rootnode case,
# i.e., Cpt_params[0] == True
if ((len(postfilter) == 0) or ('secondpass' in postfilter) or
(Cpt_params[0] is False)):
return T
else:
if 'theta' in postfilter and 'k' in postfilter:
T_theta = filter_matrix_rows(T, postfilter['theta'])
T_k = truncate_rows(T, postfilter['k'])
# Union two sparsity patterns
T_theta.data[:] = 1.0
T_k.data[:] = 1.0
T_filter = T_theta + T_k
T_filter.data[:] = 1.0
T_filter = T.multiply(T_filter)
elif 'k' in postfilter:
T_filter = truncate_rows(T, postfilter['k'])
elif 'theta' in postfilter:
T_filter = filter_matrix_rows(T, postfilter['theta'])
else:
raise ValueError("Unrecognized postfilter option")
# Re-smooth T_filter and re-fit the modes B into the span.
# Note, we set 'secondpass', because this is the second
# filtering pass
T = energy_prolongation_smoother(A, T_filter,
Atilde, B, Bf, Cpt_params,
krylov=krylov, maxiter=1,
tol=1e-8, degree=0,
weighting=weighting,
prefilter={},
postfilter={'secondpass': True})
return T | 0.000305 |
def p_user_add_link(self):
'''
user add link.
'''
if self.check_post_role()['ADD']:
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.get_current_user()
cur_uid = tools.get_uudd(2)
while MLink.get_by_uid(cur_uid):
cur_uid = tools.get_uudd(2)
if MLink.create_link(cur_uid, post_data):
output = {
'addinfo ': 1,
}
else:
output = {
'addinfo ': 0,
}
return json.dump(output, self) | 0.003185 |
def restore_model(self, directory=None, file=None):
"""
Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is
restored. If no checkpoint directory is given, the model's default saver directory is
used (unless file specifies the entire path).
Args:
directory: Optional checkpoint directory.
file: Optional checkpoint file, or path if directory not given.
"""
self.model.restore(directory=directory, file=file) | 0.007722 |
def process_orders(self, orderbook):
''' Default and costant orders processor. Overwrite it for more
sophisticated strategies '''
for stock, alloc in orderbook.iteritems():
self.logger.info('{}: Ordered {} {} stocks'.format(
self.datetime, stock, alloc))
if isinstance(alloc, int):
self.order(stock, alloc)
elif isinstance(alloc, float) and \
alloc >= -1 and alloc <= 1:
self.order_percent(stock, alloc)
else:
self.logger.warning(
'{}: invalid order for {}: {})'
.format(self.datetime, stock, alloc)) | 0.002874 |
def _init_file(self):
"""
Initialize a new password file and set the reference password.
"""
self.keyring_key = self._get_new_password()
# set a reference password, used to check that the password provided
# matches for subsequent checks.
self.set_password('keyring-setting',
'password reference',
'password reference value')
self._write_config_value('keyring-setting',
'scheme',
self.scheme)
self._write_config_value('keyring-setting',
'version',
self.version) | 0.002786 |
def handle_special_cases(request, data, baseURI, meta):
"""Handle sepcial cases for returned values by the doAction function"""
if request.method == 'OPTIONS':
r = HttpResponse('')
return r
if data is None:
return gen404(request, baseURI, 'data')
if data.__class__.__name__ == 'PlugIt500':
return gen500(request, baseURI)
if data.__class__.__name__ == 'PlugItSpecialCode':
r = HttpResponse('')
r.status_code = data.code
return r
if data.__class__.__name__ == 'PlugItRedirect':
url = data.url
if not data.no_prefix:
url = baseURI + url
return HttpResponseRedirect(url)
if data.__class__.__name__ == 'PlugItFile':
response = HttpResponse(data.content, content_type=data.content_type)
response['Content-Disposition'] = data.content_disposition
return response
if data.__class__.__name__ == 'PlugItNoTemplate':
response = HttpResponse(data.content)
return response
if meta.get('json_only', None): # Just send the json back
# Return application/json if requested
if 'HTTP_ACCEPT' in request.META and request.META['HTTP_ACCEPT'].find('json') != -1:
return JsonResponse(data)
# Return json data without html content type, since json was not
# requiered
result = json.dumps(data)
return HttpResponse(result)
if meta.get('xml_only', None): # Just send the xml back
return HttpResponse(data['xml'], content_type='application/xml') | 0.001271 |
def remove_locations(node):
"""
Removes locations from the given AST tree completely
"""
def fix(node):
if 'lineno' in node._attributes and hasattr(node, 'lineno'):
del node.lineno
if 'col_offset' in node._attributes and hasattr(node, 'col_offset'):
del node.col_offset
for child in iter_child_nodes(node):
fix(child)
fix(node) | 0.002433 |
def _apply_concretization_strategies(self, idx, strategies, action): # pylint: disable=unused-argument
"""
Applies concretization strategies on the index, until one of them succeeds.
"""
for s in strategies:
try:
idxes = s.concretize(self, idx)
except SimUnsatError:
idxes = None
if idxes:
return idxes
raise SimMemoryAddressError("Unable to concretize index %s" % idx) | 0.010081 |
def clear_published(self):
"""Removes the published status.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
metadata = Metadata(**settings.METADATA['published'])
if metadata.is_read_only() or metadata.is_required():
raise NoAccess()
self._my_map['published'] = False | 0.006579 |
def patches(destination, settings=None, traverse_bases=True,
filter=default_filter, recursive=True, use_decorators=True):
"""Decorator to create a patch for each member of a module or a class.
Parameters
----------
destination : object
Patch destination.
settings : gorilla.Settings
Settings.
traverse_bases : bool
If the object is a class, the base classes are also traversed.
filter : function
Attributes for which the function returns ``False`` are skipped. The
function needs to define two parameters: ``name``, the attribute name,
and ``obj``, the attribute value. If ``None``, no attribute is skipped.
recursive : bool
If ``True``, and a hit occurs due to an attribute at the destination
already existing with the given name, and both the member and the
target attributes are classes, then instead of creating a patch
directly with the member attribute value as is, a patch for each of its
own members is created with the target as new destination.
use_decorators : bool
Allows to take any modifier decorator into consideration to allow for
more granular customizations.
Returns
-------
object
The decorated object.
Note
----
A 'target' differs from a 'destination' in that a target represents an
existing attribute at the destination about to be hit by a patch.
See Also
--------
:class:`Patch`, :func:`create_patches`.
"""
def decorator(wrapped):
settings_ = copy.deepcopy(settings)
patches = create_patches(
destination, wrapped, settings=settings_,
traverse_bases=traverse_bases, filter=filter, recursive=recursive,
use_decorators=use_decorators)
data = get_decorator_data(_get_base(wrapped), set_default=True)
data.patches.extend(patches)
return wrapped
return decorator | 0.000507 |
def dump_stream(self, iterator, stream):
"""
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent.
This should be sent after creating the first record batch so in case of an error, it can
be sent back to the JVM before the Arrow stream starts.
"""
def init_stream_yield_batches():
should_write_start_length = True
for series in iterator:
batch = self._create_batch(series)
if should_write_start_length:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
should_write_start_length = False
yield batch
return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream) | 0.006313 |
def local_self_attention_layer(hparams, prefix):
"""Create self-attention layer based on hyperparameters."""
return transformer_layers.LocalSelfAttention(
num_heads=hparams.get(prefix + "num_heads"),
num_memory_heads=hparams.get(prefix + "num_memory_heads"),
radius=hparams.local_attention_radius,
key_value_size=hparams.d_kv,
shared_kv=hparams.get(prefix + "shared_kv", False),
attention_kwargs=attention_kwargs_from_hparams(hparams)) | 0.006316 |
def _encrypt(self, archive):
"""Encrypts the compressed archive using GPG.
If encryption fails for any reason, it should be logged by sos but not
cause execution to stop. The assumption is that the unencrypted archive
would still be of use to the user, and/or that the end user has another
means of securing the archive.
Returns the name of the encrypted archive, or raises an exception to
signal that encryption failed and the unencrypted archive name should
be used.
"""
arc_name = archive.replace("sosreport-", "secured-sosreport-")
arc_name += ".gpg"
enc_cmd = "gpg --batch -o %s " % arc_name
env = None
if self.enc_opts["key"]:
# need to assume a trusted key here to be able to encrypt the
# archive non-interactively
enc_cmd += "--trust-model always -e -r %s " % self.enc_opts["key"]
enc_cmd += archive
if self.enc_opts["password"]:
# prevent change of gpg options using a long password, but also
# prevent the addition of quote characters to the passphrase
passwd = "%s" % self.enc_opts["password"].replace('\'"', '')
env = {"sos_gpg": passwd}
enc_cmd += "-c --passphrase-fd 0 "
enc_cmd = "/bin/bash -c \"echo $sos_gpg | %s\"" % enc_cmd
enc_cmd += archive
r = sos_get_command_output(enc_cmd, timeout=0, env=env)
if r["status"] == 0:
return arc_name
elif r["status"] == 2:
if self.enc_opts["key"]:
msg = "Specified key not in keyring"
else:
msg = "Could not read passphrase"
else:
# TODO: report the actual error from gpg. Currently, we cannot as
# sos_get_command_output() does not capture stderr
msg = "gpg exited with code %s" % r["status"]
raise Exception(msg) | 0.001016 |
def copy_keys(source, destination, keys=None):
"""
Add keys in source to destination
Parameters
----------
source : dict
destination: dict
keys : None | iterable
The keys in source to be copied into destination. If
None, then `keys = destination.keys()`
"""
if keys is None:
keys = destination.keys()
for k in set(source) & set(keys):
destination[k] = source[k]
return destination | 0.002179 |
def from_email(self, value):
"""The email address of the sender
:param value: The email address of the sender
:type value: From, str, tuple
"""
if isinstance(value, str):
value = From(value, None)
if isinstance(value, tuple):
value = From(value[0], value[1])
self._from_email = value | 0.005495 |
def get_wrapped_stream(stream):
"""
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream.
:param stream: A stream instance to wrap
:returns: A new, wrapped stream
:rtype: :class:`StreamWrapper`
"""
if stream is None:
raise TypeError("must provide a stream to wrap")
encoding = getattr(stream, "encoding", None)
encoding = get_output_encoding(encoding)
return StreamWrapper(stream, encoding, "replace", line_buffering=True) | 0.003976 |
def num_buttons(self):
"""The number of buttons on a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD` capability.
Buttons on a pad device are numbered sequentially, see
`Tablet pad button numbers`_ for details.
Returns:
int: The number of buttons supported by the device.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_buttons(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | 0.030769 |
def present(name,
user='root',
minute='*',
hour='*',
daymonth='*',
month='*',
dayweek='*',
comment=None,
commented=False,
identifier=False,
special=None):
'''
Verifies that the specified cron job is present for the specified user.
It is recommended to use `identifier`. Otherwise the cron job is installed
twice if you change the name.
For more advanced information about what exactly can be set in the cron
timing parameters, check your cron system's documentation. Most Unix-like
systems' cron documentation can be found via the crontab man page:
``man 5 crontab``.
name
The command that should be executed by the cron job.
user
The name of the user whose crontab needs to be modified, defaults to
the root user
minute
The information to be set into the minute section, this can be any
string supported by your cron system's the minute field. Default is
``*``
hour
The information to be set in the hour section. Default is ``*``
daymonth
The information to be set in the day of month section. Default is ``*``
month
The information to be set in the month section. Default is ``*``
dayweek
The information to be set in the day of week section. Default is ``*``
comment
User comment to be added on line previous the cron job
commented
The cron job is set commented (prefixed with ``#DISABLED#``).
Defaults to False.
.. versionadded:: 2016.3.0
identifier
Custom-defined identifier for tracking the cron line for future crontab
edits. This defaults to the state name
special
A special keyword to specify periodicity (eg. @reboot, @hourly...).
Quotes must be used, otherwise PyYAML will strip the '@' sign.
.. versionadded:: 2016.3.0
'''
name = name.strip()
if identifier is False:
identifier = name
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if __opts__['test']:
status = _check_cron(user,
cmd=name,
minute=minute,
hour=hour,
daymonth=daymonth,
month=month,
dayweek=dayweek,
comment=comment,
commented=commented,
identifier=identifier,
special=special)
ret['result'] = None
if status == 'absent':
ret['comment'] = 'Cron {0} is set to be added'.format(name)
elif status == 'present':
ret['result'] = True
ret['comment'] = 'Cron {0} already present'.format(name)
elif status == 'update':
ret['comment'] = 'Cron {0} is set to be updated'.format(name)
return ret
if special is None:
data = __salt__['cron.set_job'](user=user,
minute=minute,
hour=hour,
daymonth=daymonth,
month=month,
dayweek=dayweek,
cmd=name,
comment=comment,
commented=commented,
identifier=identifier)
else:
data = __salt__['cron.set_special'](user=user,
special=special,
cmd=name,
comment=comment,
commented=commented,
identifier=identifier)
if data == 'present':
ret['comment'] = 'Cron {0} already present'.format(name)
return ret
if data == 'new':
ret['comment'] = 'Cron {0} added to {1}\'s crontab'.format(name, user)
ret['changes'] = {user: name}
return ret
if data == 'updated':
ret['comment'] = 'Cron {0} updated'.format(name)
ret['changes'] = {user: name}
return ret
ret['comment'] = ('Cron {0} for user {1} failed to commit with error \n{2}'
.format(name, user, data))
ret['result'] = False
return ret | 0.000216 |
def upload_read(infile, table):
"""
Reads a table from a MagIC upload (or downloaded) txt file, puts data in a
list of dictionaries
"""
delim = 'tab'
hold, magic_data, magic_record, magic_keys = [], [], {}, []
f = open(infile, "r")
#
# look for right table
#
line = f.readline()[:-1]
file_type = line.split('\t')[1]
if file_type == 'delimited':
file_type = line.split('\t')[2]
if delim == 'tab':
line = f.readline()[:-1].split('\t')
else:
f.close()
print("only tab delimitted files are supported now")
return
while file_type != table:
while line[0][0:5] in f.readlines() != ">>>>>":
pass
line = f.readline()[:-1]
file_type = line.split('\t')[1]
if file_type == 'delimited':
file_type = line.split('\t')[2]
ine = f.readline()[:-1].split('\t')
while line[0][0:5] in f.readlines() != ">>>>>":
for key in line:
magic_keys.append(key)
for line in f.readlines():
rec = line[:-1].split('\t')
hold.append(rec)
for rec in hold:
magic_record = {}
if len(magic_keys) != len(rec):
print("Uneven record lengths detected: ", rec)
input("Return to continue.... ")
for k in range(len(magic_keys)):
magic_record[magic_keys[k]] = rec[k]
magic_data.append(magic_record)
f.close()
return magic_data | 0.000666 |
def unsubscribe(self, destination=None, id=None, headers=None, **keyword_headers):
"""
Unsubscribe from a destination by either id or the destination name.
:param str destination: the name of the topic or queue to unsubscribe from
:param str id: the unique identifier of the topic or queue to unsubscribe from
:param dict headers: a map of any additional headers the broker requires
:param keyword_headers: any additional headers the broker requires
"""
assert id is not None or destination is not None, "'id' or 'destination' is required"
headers = utils.merge_headers([headers, keyword_headers])
if id:
headers[HDR_ID] = id
if destination:
headers[HDR_DESTINATION] = destination
self.send_frame(CMD_UNSUBSCRIBE, headers) | 0.008304 |
def normalizeURL(url):
"""Normalize a URL, converting normalization failures to
DiscoveryFailure"""
try:
normalized = urinorm.urinorm(url)
except ValueError as why:
raise DiscoveryFailure('Normalizing identifier: %s' % (why, ), None)
else:
return urllib.parse.urldefrag(normalized)[0] | 0.003049 |
def _get_by_index(self, index, path=None):
"""Returns the 2-tuple (node, idx) where node is either a terminal leaf
node containing the value of that index in either it's left or right slot,
or is the most specific node on the path which would contain the index if
it were not pruned."""
left_size = 2 ** ((self.size-1).bit_length() - 1)
if index < 0 or self.size <= index:
return (None, self, index)
if index < left_size:
if self.prune is self.LEFT_NODE:
return (None, self, index)
if isinstance(self.left, numbers.Integral):
return (self.left, self, index)
if path is not None:
path.append((node, index, self.LEFT_NODE))
return self.left._get_by_index(index, path)
else:
if self.prune is self.RIGHT_NODE:
return (None, self, index)
if isinstance(self.right, numbers.Integral):
return (self.right, self, index)
if path is not None:
path.append((node, index, self.RIGHT_NODE))
return self.right._get_by_index(index-left_size, path) | 0.003339 |
def dict(self):
"""
the python object for rendering json.
It is called dict to be
coherent with the other modules but it actually returns a list
:return: the python object for rendering json
:rtype: list
"""
json_list = []
for step in self.steps:
json_list.append(step.dict)
return json_list | 0.005181 |
def _values(self):
"""Getter for series values (flattened)"""
if self.interpolate:
return [
val[0] for serie in self.series for val in serie.interpolated
]
else:
return super(Line, self)._values | 0.007407 |
def fit(self):
"""Fit each distribution to `data` and calculate an SSE.
WARNING: significant runtime. (~1min)
"""
# Defaults/anchors
best_sse = np.inf
best_param = (0.0, 1.0)
best_dist = scs.norm
# Compute the histogram of `x`. density=True gives a probability
# density function at each bin, normalized such that the integral over
# the range is 1.0
hist, bin_edges = np.histogram(self.x, bins=self.bins, density=True)
# The results of np.histogram will have len(bin_edges) = len(hist) + 1
# Find the midpoint at each bin to reduce the size of bin_edges by 1
bin_edges = (bin_edges + np.roll(bin_edges, -1))[:-1] / 2.0
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
sses = []
params = []
for dist in self.distributions:
dist = getattr(scs, dist)
try:
# The generic rv_continuous.fit() returns `mle_tuple`:
# 'MLEs for any shape parameters (if applicable),
# followed by those for location and scale.'
param = *shape, loc, scale = dist.fit(self.x)
pdf = dist.pdf(bin_edges, loc=loc, scale=scale, *shape)
sse = np.sum(np.power(hist - pdf, 2.0))
sses.append(sse)
params.append(param)
if best_sse > sse > 0.0:
best_dist = dist
best_param = param
best_sse = sse
best_pdf = pdf
except (NotImplementedError, AttributeError):
sses.append(np.nan)
params.append(np.nan)
self.best_dist = best_dist
self.best_param = best_param
self.best_sse = best_sse
self.best_pdf = best_pdf
self.sses = sses
self.params = params
self.hist = hist
self.bin_edges = bin_edges
return self | 0.000923 |
def get_shelveset(self, shelveset_id, request_data=None):
"""GetShelveset.
Get a single deep shelveset.
:param str shelveset_id: Shelveset's unique ID
:param :class:`<TfvcShelvesetRequestData> <azure.devops.v5_0.tfvc.models.TfvcShelvesetRequestData>` request_data: includeDetails, includeWorkItems, maxChangeCount, and maxCommentLength
:rtype: :class:`<TfvcShelveset> <azure.devops.v5_0.tfvc.models.TfvcShelveset>`
"""
query_parameters = {}
if shelveset_id is not None:
query_parameters['shelvesetId'] = self._serialize.query('shelveset_id', shelveset_id, 'str')
if request_data is not None:
if request_data.name is not None:
query_parameters['requestData.name'] = request_data.name
if request_data.owner is not None:
query_parameters['requestData.owner'] = request_data.owner
if request_data.max_comment_length is not None:
query_parameters['requestData.maxCommentLength'] = request_data.max_comment_length
if request_data.max_change_count is not None:
query_parameters['requestData.maxChangeCount'] = request_data.max_change_count
if request_data.include_details is not None:
query_parameters['requestData.includeDetails'] = request_data.include_details
if request_data.include_work_items is not None:
query_parameters['requestData.includeWorkItems'] = request_data.include_work_items
if request_data.include_links is not None:
query_parameters['requestData.includeLinks'] = request_data.include_links
response = self._send(http_method='GET',
location_id='e36d44fb-e907-4b0a-b194-f83f1ed32ad3',
version='5.0',
query_parameters=query_parameters)
return self._deserialize('TfvcShelveset', response) | 0.005544 |
def match(tgt, functions=None, opts=None):
'''
Match based on the local data store on the minion
'''
if not opts:
opts = __opts__
if functions is None:
utils = salt.loader.utils(opts)
functions = salt.loader.minion_mods(opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(six.text_type(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
)) | 0.002212 |
def get_port_channel_detail_input_request_type_get_next_request_last_aggregator_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
input = ET.SubElement(get_port_channel_detail, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
last_aggregator_id = ET.SubElement(get_next_request, "last-aggregator-id")
last_aggregator_id.text = kwargs.pop('last_aggregator_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.005457 |
def _phiforce(self,R,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2011-10-19 - Written - Bovy (IAS)
"""
if R < self._rb:
return self._mphio*math.sin(self._m*phi-self._mphib)\
*self._rbp*(2.*self._r1p-self._rbp/R**self._p)
else:
return self._mphio*R**self._p*math.sin(self._m*phi-self._mphib) | 0.009509 |
def zset_score_pairs(response, **options):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if not response or not options['withscores']:
return response
score_cast_func = options.get('score_cast_func', float)
it = iter(response)
return list(zip(it, map(score_cast_func, it))) | 0.00266 |
def _init_auth(self, config):
""" Init authentication
@dict: configuration of ldapcherry
"""
self.auth_mode = self._get_param('auth', 'auth.mode', config)
if self.auth_mode in ['and', 'or', 'none']:
pass
elif self.auth_mode == 'custom':
# load custom auth module
auth_module = self._get_param('auth', 'auth.module', config)
auth = __import__(auth_module, globals(), locals(), ['Auth'], 0)
self.auth = auth.Auth(config['auth'], cherrypy.log)
else:
raise WrongParamValue(
'auth.mode',
'auth',
['and', 'or', 'none', 'custom'],
)
self.roles_file = self._get_param('roles', 'roles.file', config)
cherrypy.log.error(
msg="loading roles file '%(file)s'" % {'file': self.roles_file},
severity=logging.DEBUG
)
self.roles = Roles(self.roles_file) | 0.00203 |
def stop(logfile, time_format):
"stop tracking for the active project"
def save_and_output(records):
records = server.stop(records)
write(records, logfile, time_format)
def output(r):
print "worked on %s" % colored(r[0], attrs=['bold'])
print " from %s" % colored(
server.date_to_txt(r[1][0], time_format), 'green')
print " to now, %s" % colored(
server.date_to_txt(r[1][1], time_format), 'green')
print " => %s elapsed" % colored(
time_elapsed(r[1][0], r[1][1]), 'red')
output(records[-1])
save_and_output(read(logfile, time_format)) | 0.012924 |
async def get(self):
"""Printing runtime statistics in JSON"""
context_data = self.get_context_data()
context_data.update(getattr(self.request.app, "stats", {}))
response = self.json_response(context_data)
return response | 0.007605 |
def _aggregate_func(self, aggregate):
"""
Return a suitable aggregate score function.
"""
funcs = {"sum": add, "min": min, "max": max}
func_name = aggregate.lower() if aggregate else 'sum'
try:
return funcs[func_name]
except KeyError:
raise TypeError("Unsupported aggregate: {}".format(aggregate)) | 0.005305 |
def freq_from_str(freq_str):
"""Obtain frequency ranges from input string, either as list or dynamic
notation.
Parameters
----------
freq_str : str
String with frequency ranges, either as a list:
e.g. [[1-3], [3-5], [5-8]];
or with a dynamic definition: (start, stop, width, step).
Returns
-------
list of tuple of float or None
Every tuple of float represents a frequency band. If input is invalid,
returns None.
"""
freq = []
as_list = freq_str[1:-1].replace(' ', '').split(',')
try:
if freq_str[0] == '[' and freq_str[-1] == ']':
for i in as_list:
one_band = i[1:-1].split('-')
one_band = float(one_band[0]), float(one_band[1])
freq.append(one_band)
elif freq_str[0] == '(' and freq_str[-1] == ')':
if len(as_list) == 4:
start = float(as_list[0])
stop = float(as_list[1])
halfwidth = float(as_list[2]) / 2
step = float(as_list[3])
centres = arange(start, stop, step)
for i in centres:
freq.append((i - halfwidth, i + halfwidth))
else:
return None
else:
return None
except:
return None
return freq | 0.004332 |
def select_regexes(strings, regexes):
"""
select subset of strings matching a regex
treats strings as a set
"""
strings = set(strings)
select = set()
if isinstance(strings, collections.Iterable):
for r in regexes:
s = set(filter(re.compile('^' + r + '$').search, strings))
strings -= s
select |= s
return select
else:
raise ValueError("exclude should be iterable") | 0.002193 |
def reverseCommit(self):
"""
Reverse the document to the original state.
"""
print(self.after == self.before)
pos = self.qteWidget.textCursor().position()
self.qteWidget.setHtml(self.before)
self.placeCursor(pos) | 0.007463 |
def is_throttled(e):
"""
Determines whether the exception is due to API throttling.
:param e: Exception raised
:return: True if it's a throttling exception else False
"""
return True if (hasattr(e, 'response') and
e.response is not None and
'Error' in e.response and
e.response['Error']['Code'] in ['Throttling', 'RequestLimitExceeded', 'ThrottlingException']) else \
False | 0.00578 |