id_within_dataset
int64 46
2.71M
| snippet
stringlengths 63
481k
| tokens
sequencelengths 20
15.6k
| language
stringclasses 2
values | nl
stringlengths 1
32.4k
| is_duplicated
bool 2
classes |
---|---|---|---|---|---|
2,370,572 | def getExtn(fimg, extn=None):
"""
Returns the PyFITS extension corresponding to extension specified in
filename.
Defaults to returning the first extension with data or the primary
extension, if none have data. If a non-existent extension has been
specified, it raises a `KeyError` exception.
"""
# If no extension is provided, search for first extension
# in FITS file with data associated with it.
if extn is None:
# Set up default to point to PRIMARY extension.
_extn = fimg[0]
# then look for first extension with data.
for _e in fimg:
if _e.data is not None:
_extn = _e
break
else:
# An extension was provided, so parse it out...
if repr(extn).find(',') > 1:
if isinstance(extn, tuple):
# We have a tuple possibly created by parseExtn(), so
# turn it into a list for easier manipulation.
_extns = list(extn)
if '' in _extns:
_extns.remove('')
else:
_extns = extn.split(',')
# Two values given for extension:
# for example, 'sci,1' or 'dq,1'
try:
_extn = fimg[_extns[0], int(_extns[1])]
except KeyError:
_extn = None
for e in fimg:
hdr = e.header
if ('extname' in hdr and
hdr['extname'].lower() == _extns[0].lower() and
hdr['extver'] == int(_extns[1])):
_extn = e
break
elif repr(extn).find('/') > 1:
# We are working with GEIS group syntax
_indx = str(extn[:extn.find('/')])
_extn = fimg[int(_indx)]
elif isinstance(extn, string_types):
if extn.strip() == '':
_extn = None # force error since invalid name was provided
# Only one extension value specified...
elif extn.isdigit():
# We only have an extension number specified as a string...
_nextn = int(extn)
else:
# We only have EXTNAME specified...
_nextn = None
if extn.lower() == 'primary':
_nextn = 0
else:
i = 0
for hdu in fimg:
isimg = 'extname' in hdu.header
hdr = hdu.header
if isimg and extn.lower() == hdr['extname'].lower():
_nextn = i
break
i += 1
if _nextn < len(fimg):
_extn = fimg[_nextn]
else:
_extn = None
else:
# Only integer extension number given, or default of 0 is used.
if int(extn) < len(fimg):
_extn = fimg[int(extn)]
else:
_extn = None
if _extn is None:
raise KeyError('Extension %s not found' % extn)
return _extn | [
"def",
"getExtn",
"(",
"fimg",
",",
"extn",
"=",
"None",
")",
":",
"if",
"extn",
"is",
"None",
":",
"_extn",
"=",
"fimg",
"[",
"0",
"]",
"for",
"_e",
"in",
"fimg",
":",
"if",
"_e",
".",
"data",
"is",
"not",
"None",
":",
"_extn",
"=",
"_e",
"break",
"else",
":",
"if",
"repr",
"(",
"extn",
")",
".",
"find",
"(",
"','",
")",
">",
"1",
":",
"if",
"isinstance",
"(",
"extn",
",",
"tuple",
")",
":",
"_extns",
"=",
"list",
"(",
"extn",
")",
"if",
"''",
"in",
"_extns",
":",
"_extns",
".",
"remove",
"(",
"''",
")",
"else",
":",
"_extns",
"=",
"extn",
".",
"split",
"(",
"','",
")",
"try",
":",
"_extn",
"=",
"fimg",
"[",
"_extns",
"[",
"0",
"]",
",",
"int",
"(",
"_extns",
"[",
"1",
"]",
")",
"]",
"except",
"KeyError",
":",
"_extn",
"=",
"None",
"for",
"e",
"in",
"fimg",
":",
"hdr",
"=",
"e",
".",
"header",
"if",
"(",
"'extname'",
"in",
"hdr",
"and",
"hdr",
"[",
"'extname'",
"]",
".",
"lower",
"(",
")",
"==",
"_extns",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"and",
"hdr",
"[",
"'extver'",
"]",
"==",
"int",
"(",
"_extns",
"[",
"1",
"]",
")",
")",
":",
"_extn",
"=",
"e",
"break",
"elif",
"repr",
"(",
"extn",
")",
".",
"find",
"(",
"'/'",
")",
">",
"1",
":",
"_indx",
"=",
"str",
"(",
"extn",
"[",
":",
"extn",
".",
"find",
"(",
"'/'",
")",
"]",
")",
"_extn",
"=",
"fimg",
"[",
"int",
"(",
"_indx",
")",
"]",
"elif",
"isinstance",
"(",
"extn",
",",
"string_types",
")",
":",
"if",
"extn",
".",
"strip",
"(",
")",
"==",
"''",
":",
"_extn",
"=",
"None",
"elif",
"extn",
".",
"isdigit",
"(",
")",
":",
"_nextn",
"=",
"int",
"(",
"extn",
")",
"else",
":",
"_nextn",
"=",
"None",
"if",
"extn",
".",
"lower",
"(",
")",
"==",
"'primary'",
":",
"_nextn",
"=",
"0",
"else",
":",
"i",
"=",
"0",
"for",
"hdu",
"in",
"fimg",
":",
"isimg",
"=",
"'extname'",
"in",
"hdu",
".",
"header",
"hdr",
"=",
"hdu",
".",
"header",
"if",
"isimg",
"and",
"extn",
".",
"lower",
"(",
")",
"==",
"hdr",
"[",
"'extname'",
"]",
".",
"lower",
"(",
")",
":",
"_nextn",
"=",
"i",
"break",
"i",
"+=",
"1",
"if",
"_nextn",
"<",
"len",
"(",
"fimg",
")",
":",
"_extn",
"=",
"fimg",
"[",
"_nextn",
"]",
"else",
":",
"_extn",
"=",
"None",
"else",
":",
"if",
"int",
"(",
"extn",
")",
"<",
"len",
"(",
"fimg",
")",
":",
"_extn",
"=",
"fimg",
"[",
"int",
"(",
"extn",
")",
"]",
"else",
":",
"_extn",
"=",
"None",
"if",
"_extn",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"'Extension %s not found'",
"%",
"extn",
")",
"return",
"_extn"
] | python | Returns the PyFITS extension corresponding to extension specified in
filename.
Defaults to returning the first extension with data or the primary
extension, if none have data. If a non-existent extension has been
specified, it raises a `KeyError` exception. | false |
2,057,399 | def get_fieldsets(self, request, obj=None):
"""
Fieldsets configuration
"""
return [
(None, {
'fields': ('type', 'namespace', 'app_title', 'object_name')
}),
(_('Generic'), {
'fields': (
'config.default_published', 'config.use_placeholder', 'config.use_abstract',
'config.set_author', 'config.use_related',
)
}),
(_('Layout'), {
'fields': (
'config.paginate_by', 'config.url_patterns', 'config.template_prefix',
'config.menu_structure', 'config.menu_empty_categories',
('config.default_image_full', 'config.default_image_thumbnail'),
),
'classes': ('collapse',)
}),
(_('Notifications'), {
'fields': (
'config.send_knock_create', 'config.send_knock_update'
),
'classes': ('collapse',)
}),
(_('Sitemap'), {
'fields': (
'config.sitemap_changefreq', 'config.sitemap_priority',
),
'classes': ('collapse',)
}),
(_('Meta'), {
'fields': (
'config.object_type',
)
}),
('Open Graph', {
'fields': (
'config.og_type', 'config.og_app_id', 'config.og_profile_id',
'config.og_publisher', 'config.og_author_url', 'config.og_author',
),
'description': _(
'You can provide plain strings, Post model attribute or method names'
)
}),
('Twitter', {
'fields': (
'config.twitter_type', 'config.twitter_site', 'config.twitter_author',
),
'description': _(
'You can provide plain strings, Post model attribute or method names'
)
}),
('Google+', {
'fields': (
'config.gplus_type', 'config.gplus_author',
),
'description': _(
'You can provide plain strings, Post model attribute or method names'
)
}),
] | [
"def",
"get_fieldsets",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
")",
":",
"return",
"[",
"(",
"None",
",",
"{",
"'fields'",
":",
"(",
"'type'",
",",
"'namespace'",
",",
"'app_title'",
",",
"'object_name'",
")",
"}",
")",
",",
"(",
"_",
"(",
"'Generic'",
")",
",",
"{",
"'fields'",
":",
"(",
"'config.default_published'",
",",
"'config.use_placeholder'",
",",
"'config.use_abstract'",
",",
"'config.set_author'",
",",
"'config.use_related'",
",",
")",
"}",
")",
",",
"(",
"_",
"(",
"'Layout'",
")",
",",
"{",
"'fields'",
":",
"(",
"'config.paginate_by'",
",",
"'config.url_patterns'",
",",
"'config.template_prefix'",
",",
"'config.menu_structure'",
",",
"'config.menu_empty_categories'",
",",
"(",
"'config.default_image_full'",
",",
"'config.default_image_thumbnail'",
")",
",",
")",
",",
"'classes'",
":",
"(",
"'collapse'",
",",
")",
"}",
")",
",",
"(",
"_",
"(",
"'Notifications'",
")",
",",
"{",
"'fields'",
":",
"(",
"'config.send_knock_create'",
",",
"'config.send_knock_update'",
")",
",",
"'classes'",
":",
"(",
"'collapse'",
",",
")",
"}",
")",
",",
"(",
"_",
"(",
"'Sitemap'",
")",
",",
"{",
"'fields'",
":",
"(",
"'config.sitemap_changefreq'",
",",
"'config.sitemap_priority'",
",",
")",
",",
"'classes'",
":",
"(",
"'collapse'",
",",
")",
"}",
")",
",",
"(",
"_",
"(",
"'Meta'",
")",
",",
"{",
"'fields'",
":",
"(",
"'config.object_type'",
",",
")",
"}",
")",
",",
"(",
"'Open Graph'",
",",
"{",
"'fields'",
":",
"(",
"'config.og_type'",
",",
"'config.og_app_id'",
",",
"'config.og_profile_id'",
",",
"'config.og_publisher'",
",",
"'config.og_author_url'",
",",
"'config.og_author'",
",",
")",
",",
"'description'",
":",
"_",
"(",
"'You can provide plain strings, Post model attribute or method names'",
")",
"}",
")",
",",
"(",
"'Twitter'",
",",
"{",
"'fields'",
":",
"(",
"'config.twitter_type'",
",",
"'config.twitter_site'",
",",
"'config.twitter_author'",
",",
")",
",",
"'description'",
":",
"_",
"(",
"'You can provide plain strings, Post model attribute or method names'",
")",
"}",
")",
",",
"(",
"'Google+'",
",",
"{",
"'fields'",
":",
"(",
"'config.gplus_type'",
",",
"'config.gplus_author'",
",",
")",
",",
"'description'",
":",
"_",
"(",
"'You can provide plain strings, Post model attribute or method names'",
")",
"}",
")",
",",
"]"
] | python | Fieldsets configuration | false |
2,371,367 | def focusIn(self, event=None):
"""Select all text (if applicable) on taking focus"""
try:
# doScroll returns false if the call was ignored because the
# last call also came from this widget. That avoids unwanted
# scrolls and text selection when the focus moves in and out
# of the window.
if self.doScroll(event):
self.entry.selection_range(0, END) # select all text in widget
else:
# restore selection to what it was on the last FocusOut
if self.lastSelection:
self.entry.selection_range(*self.lastSelection)
except AttributeError:
pass | [
"def",
"focusIn",
"(",
"self",
",",
"event",
"=",
"None",
")",
":",
"try",
":",
"if",
"self",
".",
"doScroll",
"(",
"event",
")",
":",
"self",
".",
"entry",
".",
"selection_range",
"(",
"0",
",",
"END",
")",
"else",
":",
"if",
"self",
".",
"lastSelection",
":",
"self",
".",
"entry",
".",
"selection_range",
"(",
"*",
"self",
".",
"lastSelection",
")",
"except",
"AttributeError",
":",
"pass"
] | python | Select all text (if applicable) on taking focus | false |
2,666,031 | def WSDLUriToVersion(self, uri):
"""Return the WSDL version related to a WSDL namespace uri."""
value = self._wsdl_uri_mapping.get(uri)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP envelope uri: %s' % uri
) | [
"def",
"WSDLUriToVersion",
"(",
"self",
",",
"uri",
")",
":",
"value",
"=",
"self",
".",
"_wsdl_uri_mapping",
".",
"get",
"(",
"uri",
")",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"value",
"raise",
"ValueError",
"(",
"'Unsupported SOAP envelope uri: %s'",
"%",
"uri",
")"
] | python | Return the WSDL version related to a WSDL namespace uri. | false |
2,446,300 | def __eq__(self, other):
""" Return true if another ItemList has all identical fields
:type other: ItemList
:param other: the other ItemList to compare to.
:rtype: Boolean
:returns: True if the ItemLists are identical, otherwise False
"""
return (self.url() == other.url() and
self.name() == other.name() and
super(ItemList, self).__eq__(other)) | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"return",
"(",
"self",
".",
"url",
"(",
")",
"==",
"other",
".",
"url",
"(",
")",
"and",
"self",
".",
"name",
"(",
")",
"==",
"other",
".",
"name",
"(",
")",
"and",
"super",
"(",
"ItemList",
",",
"self",
")",
".",
"__eq__",
"(",
"other",
")",
")"
] | python | Return true if another ItemList has all identical fields
:type other: ItemList
:param other: the other ItemList to compare to.
:rtype: Boolean
:returns: True if the ItemLists are identical, otherwise False | false |
2,204,524 | def _f_A20(self, r_a, r_s):
"""
equation A20 in Eliasdottir (2013)
:param r_a: r/Ra
:param r_s: r/Rs
:return:
"""
return r_a/(1+np.sqrt(1 + r_a**2)) - r_s/(1+np.sqrt(1 + r_s**2)) | [
"def",
"_f_A20",
"(",
"self",
",",
"r_a",
",",
"r_s",
")",
":",
"return",
"r_a",
"/",
"(",
"1",
"+",
"np",
".",
"sqrt",
"(",
"1",
"+",
"r_a",
"**",
"2",
")",
")",
"-",
"r_s",
"/",
"(",
"1",
"+",
"np",
".",
"sqrt",
"(",
"1",
"+",
"r_s",
"**",
"2",
")",
")"
] | python | equation A20 in Eliasdottir (2013)
:param r_a: r/Ra
:param r_s: r/Rs
:return: | false |
2,381,035 | def parallel(processes, threads):
"""
execute jobs in processes using N threads
"""
pool = multithread(threads)
pool.map(run_process, processes)
pool.close()
pool.join() | [
"def",
"parallel",
"(",
"processes",
",",
"threads",
")",
":",
"pool",
"=",
"multithread",
"(",
"threads",
")",
"pool",
".",
"map",
"(",
"run_process",
",",
"processes",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")"
] | python | execute jobs in processes using N threads | false |
1,897,766 | def filter_image_sep2d(image, fh, fv, impl='numpy', padding=None):
"""Filter an image with a separable filter.
Parameters
----------
image : 2D array-like
The image to be filtered. It must have a real (vs. complex) dtype.
fh, fv : 1D array-like
Horizontal (axis 0) and vertical (axis 1) filters. Their sizes
can be at most the image sizes in the respective axes.
impl : {'numpy', 'pyfftw'}, optional
FFT backend to use. The ``pyfftw`` backend requires the
``pyfftw`` package to be installed. It is usually significantly
faster than the NumPy backend.
padding : positive int, optional
Amount of zeros added to the left and right of the image in all
axes before FFT. This helps avoiding wraparound artifacts due to
large boundary values.
For ``None``, the padding is computed as ::
padding = min(max(len(fh), len(fv)) - 1, 64)
A padding of ``len(filt) - 1`` ensures that errors in FFT-based
convolutions are small. At the same time, the padding should not
be excessive to retain efficiency.
Returns
-------
filtered : 2D `numpy.ndarray`
The image filtered horizontally by ``fh`` and vertically by ``fv``.
It has the same shape as ``image``, and its dtype is
``np.result_type(image, fh, fv)``.
"""
# TODO: generalize for nD
impl, impl_in = str(impl).lower(), impl
if impl not in ('numpy', 'pyfftw'):
raise ValueError('`impl` {!r} not understood'
''.format(impl_in))
image = np.asarray(image)
if image.ndim != 2:
raise ValueError('`image` must be 2-dimensional, got image with '
'ndim={}'.format(image.ndim))
if image.size == 0:
raise ValueError('`image` cannot have size 0')
if not np.issubsctype(image.dtype, np.floating):
image = image.astype(float)
fh = np.asarray(fh).astype(image.dtype)
if fh.ndim != 1:
raise ValueError('`fh` must be one-dimensional')
elif fh.size == 0:
raise ValueError('`fh` cannot have size 0')
elif fh.size > image.shape[0]:
raise ValueError('`fh` can be at most `image.shape[0]`, got '
'{} > {}'.format(fh.size, image.shape[0]))
fv = np.asarray(fv).astype(image.dtype)
if fv.ndim != 1:
raise ValueError('`fv` must be one-dimensional')
elif fv.size == 0:
raise ValueError('`fv` cannot have size 0')
elif fv.size > image.shape[0]:
raise ValueError('`fv` can be at most `image.shape[1]`, got '
'{} > {}'.format(fv.size, image.shape[1]))
# Pad image with zeros
if padding is None:
padding = min(max(len(fh), len(fv)) - 1, 64)
if padding != 0:
image_padded = np.pad(image, padding, mode='constant')
else:
image_padded = image.copy() if impl == 'pyfftw' else image
# Prepare filters for the convolution
def prepare_for_fft(filt, n_new):
"""Return padded and shifted filter ready for FFT.
The filter is padded with zeros to the new size, and then shifted
such that such that the middle element of old filter, i.e., the
one at index ``(len(filt) - 1) // 2`` ends up at index 0.
"""
mid = (len(filt) - 1) // 2
padded = np.zeros(n_new, dtype=filt.dtype)
padded[:len(filt) - mid] = filt[mid:]
padded[len(padded) - mid:] = filt[:mid]
return padded
fh = prepare_for_fft(fh, image_padded.shape[0])
fv = prepare_for_fft(fv, image_padded.shape[1])
# Perform the multiplication in Fourier space and apply inverse FFT
if impl == 'numpy':
image_ft = np.fft.rfftn(image_padded)
fh_ft = np.fft.fft(fh)
fv_ft = np.fft.rfft(fv)
image_ft *= fh_ft[:, None]
image_ft *= fv_ft[None, :]
# Important to specify the shape since `irfftn` cannot know the
# original shape
conv = np.fft.irfftn(image_ft, s=image_padded.shape)
if conv.dtype != image.dtype:
conv = conv.astype(image.dtype)
elif impl == 'pyfftw':
if not PYFFTW_AVAILABLE:
raise ValueError(
'`pyfftw` package is not available; you need to install it '
'to use the pyfftw backend')
import pyfftw
import multiprocessing
# Generate output arrays, for half-complex transform of image and
# vertical filter, and full FT of the horizontal filter
out_img_shape = (image_padded.shape[0], image_padded.shape[1] // 2 + 1)
out_img_dtype = np.result_type(image_padded, 1j)
out_img = np.empty(out_img_shape, out_img_dtype)
out_fh_shape = out_img_shape[0]
out_fh_dtype = np.result_type(fh, 1j)
fh_c = fh.astype(out_fh_dtype) # need to make this a C2C trafo
out_fh = np.empty(out_fh_shape, out_fh_dtype)
out_fv_shape = out_img_shape[1]
out_fv_dtype = np.result_type(fv, 1j)
out_fv = np.empty(out_fv_shape, out_fv_dtype)
# Perform the forward transforms of image and filters. We use
# the `FFTW_ESTIMATE` flag to not allow the planner to destroy
# the input.
plan = pyfftw.FFTW(image_padded, out_img, axes=(0, 1),
direction='FFTW_FORWARD',
flags=['FFTW_ESTIMATE'],
threads=multiprocessing.cpu_count())
plan(image_padded, out_img)
plan = pyfftw.FFTW(fh_c, out_fh, axes=(0,),
direction='FFTW_FORWARD',
flags=['FFTW_ESTIMATE'],
threads=multiprocessing.cpu_count())
plan(fh_c, out_fh)
plan = pyfftw.FFTW(fv, out_fv, axes=(0,),
direction='FFTW_FORWARD',
flags=['FFTW_ESTIMATE'],
threads=multiprocessing.cpu_count())
plan(fv, out_fv)
# Fourier space multiplication
out_img *= out_fh[:, None]
out_img *= out_fv[None, :]
# Inverse trafo
conv = image_padded # Overwrite
plan = pyfftw.FFTW(out_img.copy(), conv, axes=(0, 1),
direction='FFTW_BACKWARD',
flags=['FFTW_ESTIMATE'],
threads=multiprocessing.cpu_count())
plan(out_img, conv)
else:
raise ValueError('unsupported `impl` {!r}'.format(impl_in))
if padding:
return conv[padding:-padding, padding:-padding]
else:
return conv | [
"def",
"filter_image_sep2d",
"(",
"image",
",",
"fh",
",",
"fv",
",",
"impl",
"=",
"'numpy'",
",",
"padding",
"=",
"None",
")",
":",
"impl",
",",
"impl_in",
"=",
"str",
"(",
"impl",
")",
".",
"lower",
"(",
")",
",",
"impl",
"if",
"impl",
"not",
"in",
"(",
"'numpy'",
",",
"'pyfftw'",
")",
":",
"raise",
"ValueError",
"(",
"'`impl` {!r} not understood'",
"''",
".",
"format",
"(",
"impl_in",
")",
")",
"image",
"=",
"np",
".",
"asarray",
"(",
"image",
")",
"if",
"image",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'`image` must be 2-dimensional, got image with '",
"'ndim={}'",
".",
"format",
"(",
"image",
".",
"ndim",
")",
")",
"if",
"image",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'`image` cannot have size 0'",
")",
"if",
"not",
"np",
".",
"issubsctype",
"(",
"image",
".",
"dtype",
",",
"np",
".",
"floating",
")",
":",
"image",
"=",
"image",
".",
"astype",
"(",
"float",
")",
"fh",
"=",
"np",
".",
"asarray",
"(",
"fh",
")",
".",
"astype",
"(",
"image",
".",
"dtype",
")",
"if",
"fh",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'`fh` must be one-dimensional'",
")",
"elif",
"fh",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'`fh` cannot have size 0'",
")",
"elif",
"fh",
".",
"size",
">",
"image",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'`fh` can be at most `image.shape[0]`, got '",
"'{} > {}'",
".",
"format",
"(",
"fh",
".",
"size",
",",
"image",
".",
"shape",
"[",
"0",
"]",
")",
")",
"fv",
"=",
"np",
".",
"asarray",
"(",
"fv",
")",
".",
"astype",
"(",
"image",
".",
"dtype",
")",
"if",
"fv",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'`fv` must be one-dimensional'",
")",
"elif",
"fv",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'`fv` cannot have size 0'",
")",
"elif",
"fv",
".",
"size",
">",
"image",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'`fv` can be at most `image.shape[1]`, got '",
"'{} > {}'",
".",
"format",
"(",
"fv",
".",
"size",
",",
"image",
".",
"shape",
"[",
"1",
"]",
")",
")",
"if",
"padding",
"is",
"None",
":",
"padding",
"=",
"min",
"(",
"max",
"(",
"len",
"(",
"fh",
")",
",",
"len",
"(",
"fv",
")",
")",
"-",
"1",
",",
"64",
")",
"if",
"padding",
"!=",
"0",
":",
"image_padded",
"=",
"np",
".",
"pad",
"(",
"image",
",",
"padding",
",",
"mode",
"=",
"'constant'",
")",
"else",
":",
"image_padded",
"=",
"image",
".",
"copy",
"(",
")",
"if",
"impl",
"==",
"'pyfftw'",
"else",
"image",
"def",
"prepare_for_fft",
"(",
"filt",
",",
"n_new",
")",
":",
"mid",
"=",
"(",
"len",
"(",
"filt",
")",
"-",
"1",
")",
"//",
"2",
"padded",
"=",
"np",
".",
"zeros",
"(",
"n_new",
",",
"dtype",
"=",
"filt",
".",
"dtype",
")",
"padded",
"[",
":",
"len",
"(",
"filt",
")",
"-",
"mid",
"]",
"=",
"filt",
"[",
"mid",
":",
"]",
"padded",
"[",
"len",
"(",
"padded",
")",
"-",
"mid",
":",
"]",
"=",
"filt",
"[",
":",
"mid",
"]",
"return",
"padded",
"fh",
"=",
"prepare_for_fft",
"(",
"fh",
",",
"image_padded",
".",
"shape",
"[",
"0",
"]",
")",
"fv",
"=",
"prepare_for_fft",
"(",
"fv",
",",
"image_padded",
".",
"shape",
"[",
"1",
"]",
")",
"if",
"impl",
"==",
"'numpy'",
":",
"image_ft",
"=",
"np",
".",
"fft",
".",
"rfftn",
"(",
"image_padded",
")",
"fh_ft",
"=",
"np",
".",
"fft",
".",
"fft",
"(",
"fh",
")",
"fv_ft",
"=",
"np",
".",
"fft",
".",
"rfft",
"(",
"fv",
")",
"image_ft",
"*=",
"fh_ft",
"[",
":",
",",
"None",
"]",
"image_ft",
"*=",
"fv_ft",
"[",
"None",
",",
":",
"]",
"conv",
"=",
"np",
".",
"fft",
".",
"irfftn",
"(",
"image_ft",
",",
"s",
"=",
"image_padded",
".",
"shape",
")",
"if",
"conv",
".",
"dtype",
"!=",
"image",
".",
"dtype",
":",
"conv",
"=",
"conv",
".",
"astype",
"(",
"image",
".",
"dtype",
")",
"elif",
"impl",
"==",
"'pyfftw'",
":",
"if",
"not",
"PYFFTW_AVAILABLE",
":",
"raise",
"ValueError",
"(",
"'`pyfftw` package is not available; you need to install it '",
"'to use the pyfftw backend'",
")",
"import",
"pyfftw",
"import",
"multiprocessing",
"out_img_shape",
"=",
"(",
"image_padded",
".",
"shape",
"[",
"0",
"]",
",",
"image_padded",
".",
"shape",
"[",
"1",
"]",
"//",
"2",
"+",
"1",
")",
"out_img_dtype",
"=",
"np",
".",
"result_type",
"(",
"image_padded",
",",
"1j",
")",
"out_img",
"=",
"np",
".",
"empty",
"(",
"out_img_shape",
",",
"out_img_dtype",
")",
"out_fh_shape",
"=",
"out_img_shape",
"[",
"0",
"]",
"out_fh_dtype",
"=",
"np",
".",
"result_type",
"(",
"fh",
",",
"1j",
")",
"fh_c",
"=",
"fh",
".",
"astype",
"(",
"out_fh_dtype",
")",
"out_fh",
"=",
"np",
".",
"empty",
"(",
"out_fh_shape",
",",
"out_fh_dtype",
")",
"out_fv_shape",
"=",
"out_img_shape",
"[",
"1",
"]",
"out_fv_dtype",
"=",
"np",
".",
"result_type",
"(",
"fv",
",",
"1j",
")",
"out_fv",
"=",
"np",
".",
"empty",
"(",
"out_fv_shape",
",",
"out_fv_dtype",
")",
"plan",
"=",
"pyfftw",
".",
"FFTW",
"(",
"image_padded",
",",
"out_img",
",",
"axes",
"=",
"(",
"0",
",",
"1",
")",
",",
"direction",
"=",
"'FFTW_FORWARD'",
",",
"flags",
"=",
"[",
"'FFTW_ESTIMATE'",
"]",
",",
"threads",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
"plan",
"(",
"image_padded",
",",
"out_img",
")",
"plan",
"=",
"pyfftw",
".",
"FFTW",
"(",
"fh_c",
",",
"out_fh",
",",
"axes",
"=",
"(",
"0",
",",
")",
",",
"direction",
"=",
"'FFTW_FORWARD'",
",",
"flags",
"=",
"[",
"'FFTW_ESTIMATE'",
"]",
",",
"threads",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
"plan",
"(",
"fh_c",
",",
"out_fh",
")",
"plan",
"=",
"pyfftw",
".",
"FFTW",
"(",
"fv",
",",
"out_fv",
",",
"axes",
"=",
"(",
"0",
",",
")",
",",
"direction",
"=",
"'FFTW_FORWARD'",
",",
"flags",
"=",
"[",
"'FFTW_ESTIMATE'",
"]",
",",
"threads",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
"plan",
"(",
"fv",
",",
"out_fv",
")",
"out_img",
"*=",
"out_fh",
"[",
":",
",",
"None",
"]",
"out_img",
"*=",
"out_fv",
"[",
"None",
",",
":",
"]",
"conv",
"=",
"image_padded",
"plan",
"=",
"pyfftw",
".",
"FFTW",
"(",
"out_img",
".",
"copy",
"(",
")",
",",
"conv",
",",
"axes",
"=",
"(",
"0",
",",
"1",
")",
",",
"direction",
"=",
"'FFTW_BACKWARD'",
",",
"flags",
"=",
"[",
"'FFTW_ESTIMATE'",
"]",
",",
"threads",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
"plan",
"(",
"out_img",
",",
"conv",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'unsupported `impl` {!r}'",
".",
"format",
"(",
"impl_in",
")",
")",
"if",
"padding",
":",
"return",
"conv",
"[",
"padding",
":",
"-",
"padding",
",",
"padding",
":",
"-",
"padding",
"]",
"else",
":",
"return",
"conv"
] | python | Filter an image with a separable filter.
Parameters
----------
image : 2D array-like
The image to be filtered. It must have a real (vs. complex) dtype.
fh, fv : 1D array-like
Horizontal (axis 0) and vertical (axis 1) filters. Their sizes
can be at most the image sizes in the respective axes.
impl : {'numpy', 'pyfftw'}, optional
FFT backend to use. The ``pyfftw`` backend requires the
``pyfftw`` package to be installed. It is usually significantly
faster than the NumPy backend.
padding : positive int, optional
Amount of zeros added to the left and right of the image in all
axes before FFT. This helps avoiding wraparound artifacts due to
large boundary values.
For ``None``, the padding is computed as ::
padding = min(max(len(fh), len(fv)) - 1, 64)
A padding of ``len(filt) - 1`` ensures that errors in FFT-based
convolutions are small. At the same time, the padding should not
be excessive to retain efficiency.
Returns
-------
filtered : 2D `numpy.ndarray`
The image filtered horizontally by ``fh`` and vertically by ``fv``.
It has the same shape as ``image``, and its dtype is
``np.result_type(image, fh, fv)``. | false |
1,842,272 | def synset_properties(synset: "wn.Synset", parameter: str):
"""
Making from NLTK's WordNet Synset's properties to function.
Note: This is for compatibility with NLTK 2.x
"""
return_type = SS_PARAMETERS_TYPE_MAP[parameter]
func = 'synset.' + parameter
return eval(func) if isinstance(eval(func), return_type) else eval(func)() | [
"def",
"synset_properties",
"(",
"synset",
":",
"\"wn.Synset\"",
",",
"parameter",
":",
"str",
")",
":",
"return_type",
"=",
"SS_PARAMETERS_TYPE_MAP",
"[",
"parameter",
"]",
"func",
"=",
"'synset.'",
"+",
"parameter",
"return",
"eval",
"(",
"func",
")",
"if",
"isinstance",
"(",
"eval",
"(",
"func",
")",
",",
"return_type",
")",
"else",
"eval",
"(",
"func",
")",
"(",
")"
] | python | Making from NLTK's WordNet Synset's properties to function.
Note: This is for compatibility with NLTK 2.x | false |
2,119,547 | def _evaluate(self, viewset_results=None):
"""Evaluate query observer.
:param viewset_results: Objects returned by the viewset query
"""
if viewset_results is None:
viewset_results = self._viewset_results()
try:
observer = models.Observer.objects.get(id=self.id)
# Do not evaluate the observer if there are no subscribers
if observer.subscribers.count() == 0:
return (None, None, None)
# Update last evaluation time.
models.Observer.objects.filter(id=self.id).update(
last_evaluation=timezone.now()
)
# Log viewsets with too much output.
max_result = get_queryobserver_settings()['warnings']['max_result_length']
if len(viewset_results) > max_result:
self._warning(
"Observed viewset returns too many results",
results=len(viewset_results),
)
new_results = collections.OrderedDict()
for order, item in enumerate(viewset_results):
if not isinstance(item, dict):
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
item = {'order': order, 'data': item}
try:
new_results[str(item['data'][self._meta.primary_key])] = item
except KeyError:
raise KeyError(
"Observable view did not return primary key field '{}'!".format(
self._meta.primary_key
)
)
# Process difference between old results and new results.
added, changed = [], []
new_ids = list(new_results.keys())
removed_qs = observer.items.exclude(primary_key__in=new_results.keys())
removed = list(removed_qs.values('order', 'data'))
maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())
with transaction.atomic():
# Removed items.
removed_qs.delete()
# Defer unique ordering constraint before processing order updates.
# NOTE: The name of the constrait is generated by Django ORM.
with connection.cursor() as cursor:
cursor.execute(
"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED"
)
# Changed items.
for item_id, old_order, old_data in maybe_changed_qs.values_list(
'primary_key', 'order', 'data'
):
new_item = new_results[item_id]
new_ids.remove(item_id)
if new_item['data'] != old_data:
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
data=new_item['data'], order=new_item['order']
)
elif new_item['order'] != old_order:
# TODO: If only order has changed, don't transmit
# full data (needs frontend support).
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
order=new_item['order']
)
# Added items.
for item_id in new_ids:
item = new_results[item_id]
added.append(item)
observer.items.create(
primary_key=item_id, order=item['order'], data=item['data']
)
return (added, changed, removed)
except models.Observer.DoesNotExist:
# Observer removed, ignore evaluation
return (None, None, None) | [
"def",
"_evaluate",
"(",
"self",
",",
"viewset_results",
"=",
"None",
")",
":",
"if",
"viewset_results",
"is",
"None",
":",
"viewset_results",
"=",
"self",
".",
"_viewset_results",
"(",
")",
"try",
":",
"observer",
"=",
"models",
".",
"Observer",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"self",
".",
"id",
")",
"if",
"observer",
".",
"subscribers",
".",
"count",
"(",
")",
"==",
"0",
":",
"return",
"(",
"None",
",",
"None",
",",
"None",
")",
"models",
".",
"Observer",
".",
"objects",
".",
"filter",
"(",
"id",
"=",
"self",
".",
"id",
")",
".",
"update",
"(",
"last_evaluation",
"=",
"timezone",
".",
"now",
"(",
")",
")",
"max_result",
"=",
"get_queryobserver_settings",
"(",
")",
"[",
"'warnings'",
"]",
"[",
"'max_result_length'",
"]",
"if",
"len",
"(",
"viewset_results",
")",
">",
"max_result",
":",
"self",
".",
"_warning",
"(",
"\"Observed viewset returns too many results\"",
",",
"results",
"=",
"len",
"(",
"viewset_results",
")",
",",
")",
"new_results",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"order",
",",
"item",
"in",
"enumerate",
"(",
"viewset_results",
")",
":",
"if",
"not",
"isinstance",
"(",
"item",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"Observable views must return a dictionary or a list of dictionaries!\"",
")",
"item",
"=",
"{",
"'order'",
":",
"order",
",",
"'data'",
":",
"item",
"}",
"try",
":",
"new_results",
"[",
"str",
"(",
"item",
"[",
"'data'",
"]",
"[",
"self",
".",
"_meta",
".",
"primary_key",
"]",
")",
"]",
"=",
"item",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Observable view did not return primary key field '{}'!\"",
".",
"format",
"(",
"self",
".",
"_meta",
".",
"primary_key",
")",
")",
"added",
",",
"changed",
"=",
"[",
"]",
",",
"[",
"]",
"new_ids",
"=",
"list",
"(",
"new_results",
".",
"keys",
"(",
")",
")",
"removed_qs",
"=",
"observer",
".",
"items",
".",
"exclude",
"(",
"primary_key__in",
"=",
"new_results",
".",
"keys",
"(",
")",
")",
"removed",
"=",
"list",
"(",
"removed_qs",
".",
"values",
"(",
"'order'",
",",
"'data'",
")",
")",
"maybe_changed_qs",
"=",
"observer",
".",
"items",
".",
"filter",
"(",
"primary_key__in",
"=",
"new_results",
".",
"keys",
"(",
")",
")",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"removed_qs",
".",
"delete",
"(",
")",
"with",
"connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"\"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED\"",
")",
"for",
"item_id",
",",
"old_order",
",",
"old_data",
"in",
"maybe_changed_qs",
".",
"values_list",
"(",
"'primary_key'",
",",
"'order'",
",",
"'data'",
")",
":",
"new_item",
"=",
"new_results",
"[",
"item_id",
"]",
"new_ids",
".",
"remove",
"(",
"item_id",
")",
"if",
"new_item",
"[",
"'data'",
"]",
"!=",
"old_data",
":",
"changed",
".",
"append",
"(",
"new_item",
")",
"observer",
".",
"items",
".",
"filter",
"(",
"primary_key",
"=",
"item_id",
")",
".",
"update",
"(",
"data",
"=",
"new_item",
"[",
"'data'",
"]",
",",
"order",
"=",
"new_item",
"[",
"'order'",
"]",
")",
"elif",
"new_item",
"[",
"'order'",
"]",
"!=",
"old_order",
":",
"changed",
".",
"append",
"(",
"new_item",
")",
"observer",
".",
"items",
".",
"filter",
"(",
"primary_key",
"=",
"item_id",
")",
".",
"update",
"(",
"order",
"=",
"new_item",
"[",
"'order'",
"]",
")",
"for",
"item_id",
"in",
"new_ids",
":",
"item",
"=",
"new_results",
"[",
"item_id",
"]",
"added",
".",
"append",
"(",
"item",
")",
"observer",
".",
"items",
".",
"create",
"(",
"primary_key",
"=",
"item_id",
",",
"order",
"=",
"item",
"[",
"'order'",
"]",
",",
"data",
"=",
"item",
"[",
"'data'",
"]",
")",
"return",
"(",
"added",
",",
"changed",
",",
"removed",
")",
"except",
"models",
".",
"Observer",
".",
"DoesNotExist",
":",
"return",
"(",
"None",
",",
"None",
",",
"None",
")"
] | python | Evaluate query observer.
:param viewset_results: Objects returned by the viewset query | false |
2,417,059 | def get_wf(self,token_id):
"""
Returns the token object for the given token identifier
@type token_id: string
@param token_id: the token identifier
@rtype: L{Cwf}
@return: the token object
"""
wf_node = self.idx.get(token_id)
if wf_node is not None:
return Cwf(node=wf_node,type=self.type)
else:
for wf_node in self.__get_wf_nodes():
if self.type == 'NAF': label_id = 'id'
elif self.type == 'KAF': label_id = 'wid'
if wf_node.get(label_id) == token_id:
return Cwf(node=wf_node, type=self.type)
return None | [
"def",
"get_wf",
"(",
"self",
",",
"token_id",
")",
":",
"wf_node",
"=",
"self",
".",
"idx",
".",
"get",
"(",
"token_id",
")",
"if",
"wf_node",
"is",
"not",
"None",
":",
"return",
"Cwf",
"(",
"node",
"=",
"wf_node",
",",
"type",
"=",
"self",
".",
"type",
")",
"else",
":",
"for",
"wf_node",
"in",
"self",
".",
"__get_wf_nodes",
"(",
")",
":",
"if",
"self",
".",
"type",
"==",
"'NAF'",
":",
"label_id",
"=",
"'id'",
"elif",
"self",
".",
"type",
"==",
"'KAF'",
":",
"label_id",
"=",
"'wid'",
"if",
"wf_node",
".",
"get",
"(",
"label_id",
")",
"==",
"token_id",
":",
"return",
"Cwf",
"(",
"node",
"=",
"wf_node",
",",
"type",
"=",
"self",
".",
"type",
")",
"return",
"None"
] | python | Returns the token object for the given token identifier
@type token_id: string
@param token_id: the token identifier
@rtype: L{Cwf}
@return: the token object | false |
1,685,509 | def add_reporter(self, reporter):
"""Add a MetricReporter"""
with self._lock:
reporter.init(list(self.metrics.values()))
self._reporters.append(reporter) | [
"def",
"add_reporter",
"(",
"self",
",",
"reporter",
")",
":",
"with",
"self",
".",
"_lock",
":",
"reporter",
".",
"init",
"(",
"list",
"(",
"self",
".",
"metrics",
".",
"values",
"(",
")",
")",
")",
"self",
".",
"_reporters",
".",
"append",
"(",
"reporter",
")"
] | python | Add a MetricReporter | false |
2,040,375 | def post(self, request, *args, **kwargs):
"""
Builds a dynamic form that targets only the field in question, and saves the modification.
"""
self.object_list = None
form = self.get_xeditable_form(self.get_xeditable_form_class())
if form.is_valid():
obj = self.get_update_object(form)
if obj is None:
data = json.dumps({
'status': 'error',
'message': "Object does not exist."
})
return HttpResponse(data, content_type="application/json", status=404)
return self.update_object(form, obj)
else:
data = json.dumps({
'status': 'error',
'message': "Invalid request",
'form_errors': form.errors,
})
return HttpResponse(data, content_type="application/json", status=400) | [
"def",
"post",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"self",
".",
"object_list",
"=",
"None",
"form",
"=",
"self",
".",
"get_xeditable_form",
"(",
"self",
".",
"get_xeditable_form_class",
"(",
")",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"obj",
"=",
"self",
".",
"get_update_object",
"(",
"form",
")",
"if",
"obj",
"is",
"None",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'status'",
":",
"'error'",
",",
"'message'",
":",
"\"Object does not exist.\"",
"}",
")",
"return",
"HttpResponse",
"(",
"data",
",",
"content_type",
"=",
"\"application/json\"",
",",
"status",
"=",
"404",
")",
"return",
"self",
".",
"update_object",
"(",
"form",
",",
"obj",
")",
"else",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'status'",
":",
"'error'",
",",
"'message'",
":",
"\"Invalid request\"",
",",
"'form_errors'",
":",
"form",
".",
"errors",
",",
"}",
")",
"return",
"HttpResponse",
"(",
"data",
",",
"content_type",
"=",
"\"application/json\"",
",",
"status",
"=",
"400",
")"
] | python | Builds a dynamic form that targets only the field in question, and saves the modification. | false |
2,183,322 | def update(self, bandit, payout):
'''
Update bandit trials and payouts for given bandit.
Parameters
----------
bandit : int
Bandit index
payout : float
Returns
-------
None
'''
self.choices.append(bandit)
self.pulls[bandit] += 1
self.wins[bandit] += payout
self.bandits.payouts[bandit] += payout | [
"def",
"update",
"(",
"self",
",",
"bandit",
",",
"payout",
")",
":",
"self",
".",
"choices",
".",
"append",
"(",
"bandit",
")",
"self",
".",
"pulls",
"[",
"bandit",
"]",
"+=",
"1",
"self",
".",
"wins",
"[",
"bandit",
"]",
"+=",
"payout",
"self",
".",
"bandits",
".",
"payouts",
"[",
"bandit",
"]",
"+=",
"payout"
] | python | Update bandit trials and payouts for given bandit.
Parameters
----------
bandit : int
Bandit index
payout : float
Returns
-------
None | false |
2,291,862 | def nice():
"""
Returns a randomly generated uuid v4 compliant slug which conforms to a set
of "nice" properties, at the cost of some entropy. Currently this means one
extra fixed bit (the first bit of the uuid is set to 0) which guarantees the
slug will begin with [A-Za-f]. For example such slugs don't require special
handling when used as command line parameters (whereas non-nice slugs may
start with `-` which can confuse command line tools).
Potentially other "nice" properties may be added in future to further
restrict the range of potential uuids that may be generated.
"""
rawBytes = bytearray(uuid.uuid4().bytes)
rawBytes[0] = rawBytes[0] & 0x7f # Ensure slug starts with [A-Za-f]
return _convert_bytes_to_slug(rawBytes) | [
"def",
"nice",
"(",
")",
":",
"rawBytes",
"=",
"bytearray",
"(",
"uuid",
".",
"uuid4",
"(",
")",
".",
"bytes",
")",
"rawBytes",
"[",
"0",
"]",
"=",
"rawBytes",
"[",
"0",
"]",
"&",
"0x7f",
"return",
"_convert_bytes_to_slug",
"(",
"rawBytes",
")"
] | python | Returns a randomly generated uuid v4 compliant slug which conforms to a set
of "nice" properties, at the cost of some entropy. Currently this means one
extra fixed bit (the first bit of the uuid is set to 0) which guarantees the
slug will begin with [A-Za-f]. For example such slugs don't require special
handling when used as command line parameters (whereas non-nice slugs may
start with `-` which can confuse command line tools).
Potentially other "nice" properties may be added in future to further
restrict the range of potential uuids that may be generated. | false |
2,361,973 | def _update_camera_pos(self):
""" Set the camera position and orientation"""
# transform will be updated several times; do not update camera
# transform until we are done.
ch_em = self.events.transform_change
with ch_em.blocker(self._update_transform):
tr = self.transform
tr.reset()
up, forward, right = self._get_dim_vectors()
# Create mapping so correct dim is up
pp1 = np.array([(0, 0, 0), (0, 0, -1), (1, 0, 0), (0, 1, 0)])
pp2 = np.array([(0, 0, 0), forward, right, up])
tr.set_mapping(pp1, pp2)
tr.translate(-self._actual_distance * forward)
self._rotate_tr()
tr.scale([1.0/a for a in self._flip_factors])
tr.translate(np.array(self.center)) | [
"def",
"_update_camera_pos",
"(",
"self",
")",
":",
"ch_em",
"=",
"self",
".",
"events",
".",
"transform_change",
"with",
"ch_em",
".",
"blocker",
"(",
"self",
".",
"_update_transform",
")",
":",
"tr",
"=",
"self",
".",
"transform",
"tr",
".",
"reset",
"(",
")",
"up",
",",
"forward",
",",
"right",
"=",
"self",
".",
"_get_dim_vectors",
"(",
")",
"pp1",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"0",
",",
"-",
"1",
")",
",",
"(",
"1",
",",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"1",
",",
"0",
")",
"]",
")",
"pp2",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"forward",
",",
"right",
",",
"up",
"]",
")",
"tr",
".",
"set_mapping",
"(",
"pp1",
",",
"pp2",
")",
"tr",
".",
"translate",
"(",
"-",
"self",
".",
"_actual_distance",
"*",
"forward",
")",
"self",
".",
"_rotate_tr",
"(",
")",
"tr",
".",
"scale",
"(",
"[",
"1.0",
"/",
"a",
"for",
"a",
"in",
"self",
".",
"_flip_factors",
"]",
")",
"tr",
".",
"translate",
"(",
"np",
".",
"array",
"(",
"self",
".",
"center",
")",
")"
] | python | Set the camera position and orientation | false |
2,126,249 | def param_changed_to(self, key, to_value, from_value=None):
"""
Returns true if the given parameter, with name key, has transitioned to the given value.
"""
last_value = getattr(self.last_manifest, key)
current_value = self.current_manifest.get(key)
if from_value is not None:
return last_value == from_value and current_value == to_value
return last_value != to_value and current_value == to_value | [
"def",
"param_changed_to",
"(",
"self",
",",
"key",
",",
"to_value",
",",
"from_value",
"=",
"None",
")",
":",
"last_value",
"=",
"getattr",
"(",
"self",
".",
"last_manifest",
",",
"key",
")",
"current_value",
"=",
"self",
".",
"current_manifest",
".",
"get",
"(",
"key",
")",
"if",
"from_value",
"is",
"not",
"None",
":",
"return",
"last_value",
"==",
"from_value",
"and",
"current_value",
"==",
"to_value",
"return",
"last_value",
"!=",
"to_value",
"and",
"current_value",
"==",
"to_value"
] | python | Returns true if the given parameter, with name key, has transitioned to the given value. | false |
1,835,095 | def _set_bottomMargin(self, value):
"""
value will be an int or float.
Subclasses may override this method.
"""
diff = value - self.bottomMargin
self.moveBy((0, diff))
self.height += diff | [
"def",
"_set_bottomMargin",
"(",
"self",
",",
"value",
")",
":",
"diff",
"=",
"value",
"-",
"self",
".",
"bottomMargin",
"self",
".",
"moveBy",
"(",
"(",
"0",
",",
"diff",
")",
")",
"self",
".",
"height",
"+=",
"diff"
] | python | value will be an int or float.
Subclasses may override this method. | false |
2,126,661 | def write_pbm(matrix, version, out, scale=1, border=None, plain=False):
"""\
Serializes the matrix as `PBM <http://netpbm.sourceforge.net/doc/pbm.html>`_
image.
:param matrix: The matrix to serialize.
:param int version: The (Micro) QR code version
:param out: Filename or a file-like object supporting to write binary data.
:param scale: Indicates the size of a single module (default: 1 which
corresponds to 1 x 1 pixel per module).
:param int border: Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes).
:param bool plain: Indicates if a P1 (ASCII encoding) image should be
created (default: False). By default a (binary) P4 image is created.
"""
row_iter = matrix_iter(matrix, version, scale, border)
width, height = get_symbol_size(version, scale=scale, border=border)
with writable(out, 'wb') as f:
write = f.write
write('{0}\n'
'# Created by {1}\n'
'{2} {3}\n'\
.format(('P4' if not plain else 'P1'), CREATOR, width, height).encode('ascii'))
if not plain:
for row in row_iter:
write(bytearray(_pack_bits_into_byte(row)))
else:
for row in row_iter:
write(b''.join(str(i).encode('ascii') for i in row))
write(b'\n') | [
"def",
"write_pbm",
"(",
"matrix",
",",
"version",
",",
"out",
",",
"scale",
"=",
"1",
",",
"border",
"=",
"None",
",",
"plain",
"=",
"False",
")",
":",
"row_iter",
"=",
"matrix_iter",
"(",
"matrix",
",",
"version",
",",
"scale",
",",
"border",
")",
"width",
",",
"height",
"=",
"get_symbol_size",
"(",
"version",
",",
"scale",
"=",
"scale",
",",
"border",
"=",
"border",
")",
"with",
"writable",
"(",
"out",
",",
"'wb'",
")",
"as",
"f",
":",
"write",
"=",
"f",
".",
"write",
"write",
"(",
"'{0}\\n'",
"'# Created by {1}\\n'",
"'{2} {3}\\n'",
".",
"format",
"(",
"(",
"'P4'",
"if",
"not",
"plain",
"else",
"'P1'",
")",
",",
"CREATOR",
",",
"width",
",",
"height",
")",
".",
"encode",
"(",
"'ascii'",
")",
")",
"if",
"not",
"plain",
":",
"for",
"row",
"in",
"row_iter",
":",
"write",
"(",
"bytearray",
"(",
"_pack_bits_into_byte",
"(",
"row",
")",
")",
")",
"else",
":",
"for",
"row",
"in",
"row_iter",
":",
"write",
"(",
"b''",
".",
"join",
"(",
"str",
"(",
"i",
")",
".",
"encode",
"(",
"'ascii'",
")",
"for",
"i",
"in",
"row",
")",
")",
"write",
"(",
"b'\\n'",
")"
] | python | \
Serializes the matrix as `PBM <http://netpbm.sourceforge.net/doc/pbm.html>`_
image.
:param matrix: The matrix to serialize.
:param int version: The (Micro) QR code version
:param out: Filename or a file-like object supporting to write binary data.
:param scale: Indicates the size of a single module (default: 1 which
corresponds to 1 x 1 pixel per module).
:param int border: Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes).
:param bool plain: Indicates if a P1 (ASCII encoding) image should be
created (default: False). By default a (binary) P4 image is created. | false |
2,557,675 | def delete(self):
"""Remove the item from the infoblox server.
:rtype: bool
:raises: AssertionError
:raises: ValueError
:raises: infoblox.exceptions.ProtocolError
"""
if not self._ref:
raise ValueError('Object has no reference id for deletion')
if 'save' not in self._supports:
raise AssertionError('Can not save this object type')
response = self._session.delete(self._path)
if response.status_code == 200:
self._ref = None
self.clear()
return True
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content) | [
"def",
"delete",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ref",
":",
"raise",
"ValueError",
"(",
"'Object has no reference id for deletion'",
")",
"if",
"'save'",
"not",
"in",
"self",
".",
"_supports",
":",
"raise",
"AssertionError",
"(",
"'Can not save this object type'",
")",
"response",
"=",
"self",
".",
"_session",
".",
"delete",
"(",
"self",
".",
"_path",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"_ref",
"=",
"None",
"self",
".",
"clear",
"(",
")",
"return",
"True",
"try",
":",
"error",
"=",
"response",
".",
"json",
"(",
")",
"raise",
"exceptions",
".",
"ProtocolError",
"(",
"error",
"[",
"'text'",
"]",
")",
"except",
"ValueError",
":",
"raise",
"exceptions",
".",
"ProtocolError",
"(",
"response",
".",
"content",
")"
] | python | Remove the item from the infoblox server.
:rtype: bool
:raises: AssertionError
:raises: ValueError
:raises: infoblox.exceptions.ProtocolError | false |
2,093,654 | async def on_isupport_wallchops(self, value):
""" Support for messaging every opped member or higher on a channel. Replaced by STATUSMSG. """
for prefix, mode in self._nickname_prefixes.items():
if mode == 'o':
break
else:
prefix = '@'
self._status_message_prefixes.add(prefix) | [
"async",
"def",
"on_isupport_wallchops",
"(",
"self",
",",
"value",
")",
":",
"for",
"prefix",
",",
"mode",
"in",
"self",
".",
"_nickname_prefixes",
".",
"items",
"(",
")",
":",
"if",
"mode",
"==",
"'o'",
":",
"break",
"else",
":",
"prefix",
"=",
"'@'",
"self",
".",
"_status_message_prefixes",
".",
"add",
"(",
"prefix",
")"
] | python | Support for messaging every opped member or higher on a channel. Replaced by STATUSMSG. | false |
2,532,887 | def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
try:
return hooks.get(key).__call__(hook_data) or hook_data
except Exception, why:
warnings.warn(str(why))
return hook_data | [
"def",
"dispatch_hook",
"(",
"key",
",",
"hooks",
",",
"hook_data",
")",
":",
"hooks",
"=",
"hooks",
"or",
"dict",
"(",
")",
"if",
"key",
"in",
"hooks",
":",
"try",
":",
"return",
"hooks",
".",
"get",
"(",
"key",
")",
".",
"__call__",
"(",
"hook_data",
")",
"or",
"hook_data",
"except",
"Exception",
",",
"why",
":",
"warnings",
".",
"warn",
"(",
"str",
"(",
"why",
")",
")",
"return",
"hook_data"
] | python | Dispatches a hook dictionary on a given piece of data. | false |
2,517,833 | def ground_resolution(lat, level):
"""Gets ground res in meters / pixel"""
lat = TileSystem.clip(lat, TileSystem.LATITUDE_RANGE)
return cos(lat * pi / 180) * 2 * pi * TileSystem.EARTH_RADIUS / TileSystem.map_size(level) | [
"def",
"ground_resolution",
"(",
"lat",
",",
"level",
")",
":",
"lat",
"=",
"TileSystem",
".",
"clip",
"(",
"lat",
",",
"TileSystem",
".",
"LATITUDE_RANGE",
")",
"return",
"cos",
"(",
"lat",
"*",
"pi",
"/",
"180",
")",
"*",
"2",
"*",
"pi",
"*",
"TileSystem",
".",
"EARTH_RADIUS",
"/",
"TileSystem",
".",
"map_size",
"(",
"level",
")"
] | python | Gets ground res in meters / pixel | false |
2,106,942 | def show_messages(self):
"""Show all messages."""
string = self.header
if self.static_message is not None:
string += self.static_message.to_html()
for message in self.dynamic_messages:
string += message.to_html()
string += self.footer
print(string)
self.setHtml(string) | [
"def",
"show_messages",
"(",
"self",
")",
":",
"string",
"=",
"self",
".",
"header",
"if",
"self",
".",
"static_message",
"is",
"not",
"None",
":",
"string",
"+=",
"self",
".",
"static_message",
".",
"to_html",
"(",
")",
"for",
"message",
"in",
"self",
".",
"dynamic_messages",
":",
"string",
"+=",
"message",
".",
"to_html",
"(",
")",
"string",
"+=",
"self",
".",
"footer",
"print",
"(",
"string",
")",
"self",
".",
"setHtml",
"(",
"string",
")"
] | python | Show all messages. | false |
2,596,206 | def textile(text, **kwargs):
"""
Applies Textile conversion to a string, and returns the HTML.
This is simply a pass-through to the ``textile`` template filter
included in ``django.contrib.markup``, which works around issues
PyTextile has with Unicode strings. If you're not using Django but
want to use Textile with ``MarkupFormatter``, you'll need to
supply your own Textile filter.
"""
from django.contrib.markup.templatetags.markup import textile
return textile(text) | [
"def",
"textile",
"(",
"text",
",",
"**",
"kwargs",
")",
":",
"from",
"django",
".",
"contrib",
".",
"markup",
".",
"templatetags",
".",
"markup",
"import",
"textile",
"return",
"textile",
"(",
"text",
")"
] | python | Applies Textile conversion to a string, and returns the HTML.
This is simply a pass-through to the ``textile`` template filter
included in ``django.contrib.markup``, which works around issues
PyTextile has with Unicode strings. If you're not using Django but
want to use Textile with ``MarkupFormatter``, you'll need to
supply your own Textile filter. | false |
2,556,005 | def groups_names(self):
"""Names of all groups (get-only).
:getter: Returns names of all groups
:type: list of str
"""
return _ListProxy(self._get_group_name_by_num(i) for i in range(self.groups_count)) | [
"def",
"groups_names",
"(",
"self",
")",
":",
"return",
"_ListProxy",
"(",
"self",
".",
"_get_group_name_by_num",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"groups_count",
")",
")"
] | python | Names of all groups (get-only).
:getter: Returns names of all groups
:type: list of str | false |
2,566,798 | def __init__(self):
"""Initialize the queue and spawn the main loop thread
Upon initialization, tasks stored in the database are immediately
scheduled.
_task_queue is a priority queue ordered using Python's heapq functionality.
Elements in _task_queue are tuples of the form (datetime, task) where
datetime is the scheduled run time and task is a dictionary as defined
in the above docstring for the Scheduler class.
For concurrency safety reasons, never write to _task_queue outside the
_loop() thread.
"""
self._task_queue = [] # Never write to this outside the _loop thread
self._pending_cancels = set()
self._executor = GIPCExecutor()
# Load previously scheduled tasks from database
now = datetime.datetime.now()
with get_app().app_context():
saved_schedule = Task.query.filter_by(active=True)
for task in saved_schedule:
new_task = {
'id': task.id,
'interval': task.interval,
'code': task.code
}
# Writing directly to the _task_queue is safe since we haven't started
# the _loop yet
self._task_queue.append((now, new_task))
# Make _task_queue a priority queue
heapify(self._task_queue)
# Spawn main loop and save writer for future communication
(read, write) = gipc.pipe()
self._main_thread = gevent.spawn(self._loop, read)
self._schedule_pipe = write
atexit.register(self._interrupt) | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"_task_queue",
"=",
"[",
"]",
"self",
".",
"_pending_cancels",
"=",
"set",
"(",
")",
"self",
".",
"_executor",
"=",
"GIPCExecutor",
"(",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"with",
"get_app",
"(",
")",
".",
"app_context",
"(",
")",
":",
"saved_schedule",
"=",
"Task",
".",
"query",
".",
"filter_by",
"(",
"active",
"=",
"True",
")",
"for",
"task",
"in",
"saved_schedule",
":",
"new_task",
"=",
"{",
"'id'",
":",
"task",
".",
"id",
",",
"'interval'",
":",
"task",
".",
"interval",
",",
"'code'",
":",
"task",
".",
"code",
"}",
"self",
".",
"_task_queue",
".",
"append",
"(",
"(",
"now",
",",
"new_task",
")",
")",
"heapify",
"(",
"self",
".",
"_task_queue",
")",
"(",
"read",
",",
"write",
")",
"=",
"gipc",
".",
"pipe",
"(",
")",
"self",
".",
"_main_thread",
"=",
"gevent",
".",
"spawn",
"(",
"self",
".",
"_loop",
",",
"read",
")",
"self",
".",
"_schedule_pipe",
"=",
"write",
"atexit",
".",
"register",
"(",
"self",
".",
"_interrupt",
")"
] | python | Initialize the queue and spawn the main loop thread
Upon initialization, tasks stored in the database are immediately
scheduled.
_task_queue is a priority queue ordered using Python's heapq functionality.
Elements in _task_queue are tuples of the form (datetime, task) where
datetime is the scheduled run time and task is a dictionary as defined
in the above docstring for the Scheduler class.
For concurrency safety reasons, never write to _task_queue outside the
_loop() thread. | false |
1,686,526 | def _next_image_partname(self, ext):
"""
The next available image partname, starting from
``/word/media/image1.{ext}`` where unused numbers are reused. The
partname is unique by number, without regard to the extension. *ext*
does not include the leading period.
"""
def image_partname(n):
return PackURI('/word/media/image%d.%s' % (n, ext))
used_numbers = [image_part.partname.idx for image_part in self]
for n in range(1, len(self)+1):
if n not in used_numbers:
return image_partname(n)
return image_partname(len(self)+1) | [
"def",
"_next_image_partname",
"(",
"self",
",",
"ext",
")",
":",
"def",
"image_partname",
"(",
"n",
")",
":",
"return",
"PackURI",
"(",
"'/word/media/image%d.%s'",
"%",
"(",
"n",
",",
"ext",
")",
")",
"used_numbers",
"=",
"[",
"image_part",
".",
"partname",
".",
"idx",
"for",
"image_part",
"in",
"self",
"]",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
")",
"+",
"1",
")",
":",
"if",
"n",
"not",
"in",
"used_numbers",
":",
"return",
"image_partname",
"(",
"n",
")",
"return",
"image_partname",
"(",
"len",
"(",
"self",
")",
"+",
"1",
")"
] | python | The next available image partname, starting from
``/word/media/image1.{ext}`` where unused numbers are reused. The
partname is unique by number, without regard to the extension. *ext*
does not include the leading period. | false |
1,875,609 | def __updateNavButtons(self):
"""
Updates the navigation buttons that might be on the device screen.
"""
navButtons = None
for v in self.views:
if v.getId() == 'com.android.systemui:id/nav_buttons':
navButtons = v
break
if navButtons:
self.navBack = self.findViewById('com.android.systemui:id/back', navButtons)
self.navHome = self.findViewById('com.android.systemui:id/home', navButtons)
self.navRecentApps = self.findViewById('com.android.systemui:id/recent_apps', navButtons)
else:
if self.uiAutomatorHelper:
print >> sys.stderr, "WARNING: nav buttons not found. Perhaps the device has hardware buttons."
self.navBack = None
self.navHome = None
self.navRecentApps = None | [
"def",
"__updateNavButtons",
"(",
"self",
")",
":",
"navButtons",
"=",
"None",
"for",
"v",
"in",
"self",
".",
"views",
":",
"if",
"v",
".",
"getId",
"(",
")",
"==",
"'com.android.systemui:id/nav_buttons'",
":",
"navButtons",
"=",
"v",
"break",
"if",
"navButtons",
":",
"self",
".",
"navBack",
"=",
"self",
".",
"findViewById",
"(",
"'com.android.systemui:id/back'",
",",
"navButtons",
")",
"self",
".",
"navHome",
"=",
"self",
".",
"findViewById",
"(",
"'com.android.systemui:id/home'",
",",
"navButtons",
")",
"self",
".",
"navRecentApps",
"=",
"self",
".",
"findViewById",
"(",
"'com.android.systemui:id/recent_apps'",
",",
"navButtons",
")",
"else",
":",
"if",
"self",
".",
"uiAutomatorHelper",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"WARNING: nav buttons not found. Perhaps the device has hardware buttons.\"",
"self",
".",
"navBack",
"=",
"None",
"self",
".",
"navHome",
"=",
"None",
"self",
".",
"navRecentApps",
"=",
"None"
] | python | Updates the navigation buttons that might be on the device screen. | false |
1,615,698 | def read_history(self, num=10, segment=0):
"""
Outputs the last `num` elements that were appended either by `append` or
`append_multiple`.
Returns
-------
out : list
"""
if num < 0:
num = 0
if segment < 0:
raise TypeError("segment must be >= 0")
return self._builder.read_history(num, segment) | [
"def",
"read_history",
"(",
"self",
",",
"num",
"=",
"10",
",",
"segment",
"=",
"0",
")",
":",
"if",
"num",
"<",
"0",
":",
"num",
"=",
"0",
"if",
"segment",
"<",
"0",
":",
"raise",
"TypeError",
"(",
"\"segment must be >= 0\"",
")",
"return",
"self",
".",
"_builder",
".",
"read_history",
"(",
"num",
",",
"segment",
")"
] | python | Outputs the last `num` elements that were appended either by `append` or
`append_multiple`.
Returns
-------
out : list | false |
2,102,874 | def to_html(self, show_mean=None, sortable=None, colorize=True, *args,
**kwargs):
"""Extend Pandas built in `to_html` method for rendering a DataFrame
and use it to render a ScoreMatrix."""
if show_mean is None:
show_mean = self.show_mean
if sortable is None:
sortable = self.sortable
df = self.copy()
if show_mean:
df.insert(0, 'Mean', None)
df.loc[:, 'Mean'] = ['%.3f' % self[m].mean() for m in self.models]
html = df.to_html(*args, **kwargs) # Pandas method
html, table_id = self.annotate(df, html, show_mean, colorize)
if sortable:
self.dynamify(table_id)
return html | [
"def",
"to_html",
"(",
"self",
",",
"show_mean",
"=",
"None",
",",
"sortable",
"=",
"None",
",",
"colorize",
"=",
"True",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"show_mean",
"is",
"None",
":",
"show_mean",
"=",
"self",
".",
"show_mean",
"if",
"sortable",
"is",
"None",
":",
"sortable",
"=",
"self",
".",
"sortable",
"df",
"=",
"self",
".",
"copy",
"(",
")",
"if",
"show_mean",
":",
"df",
".",
"insert",
"(",
"0",
",",
"'Mean'",
",",
"None",
")",
"df",
".",
"loc",
"[",
":",
",",
"'Mean'",
"]",
"=",
"[",
"'%.3f'",
"%",
"self",
"[",
"m",
"]",
".",
"mean",
"(",
")",
"for",
"m",
"in",
"self",
".",
"models",
"]",
"html",
"=",
"df",
".",
"to_html",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"html",
",",
"table_id",
"=",
"self",
".",
"annotate",
"(",
"df",
",",
"html",
",",
"show_mean",
",",
"colorize",
")",
"if",
"sortable",
":",
"self",
".",
"dynamify",
"(",
"table_id",
")",
"return",
"html"
] | python | Extend Pandas built in `to_html` method for rendering a DataFrame
and use it to render a ScoreMatrix. | false |
1,927,463 | def split(args):
"""
%prog split input.bed
Split suspicious scaffolds. Suspicious scaffolds are those that contain
chunks that map to more than one linkage group. The chunk size can be
modified through --chunk option.
"""
p = OptionParser(split.__doc__)
p.add_option("--chunk", default=4, type="int",
help="Split chunks of at least N markers")
p.add_option("--splitsingle", default=False, action="store_true",
help="Split breakpoint range right in the middle")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
bonus = 2
nchunk = opts.chunk
nbreaks = 0
penalty = -(nchunk * bonus - 1)
bed = Bed(inputbed)
for seqid, bb in bed.sub_beds():
markers = [Marker(x) for x in bb]
markers = compute_score(markers, bonus, penalty)
for mi, mj in pairwise(markers):
if mi.mlg == mj.mlg:
continue
assert mi.seqid == mj.seqid
start, end = mi.pos, mj.pos
if start > end:
start, end = end, start
if opts.splitsingle:
start = end = (start + end) / 2
print("\t".join(str(x) for x in (mi.seqid, start - 1, end)))
nbreaks += 1
logging.debug("A total of {} breakpoints inferred (--chunk={})".\
format(nbreaks, nchunk)) | [
"def",
"split",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"split",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--chunk\"",
",",
"default",
"=",
"4",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Split chunks of at least N markers\"",
")",
"p",
".",
"add_option",
"(",
"\"--splitsingle\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Split breakpoint range right in the middle\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"inputbed",
",",
"=",
"args",
"bonus",
"=",
"2",
"nchunk",
"=",
"opts",
".",
"chunk",
"nbreaks",
"=",
"0",
"penalty",
"=",
"-",
"(",
"nchunk",
"*",
"bonus",
"-",
"1",
")",
"bed",
"=",
"Bed",
"(",
"inputbed",
")",
"for",
"seqid",
",",
"bb",
"in",
"bed",
".",
"sub_beds",
"(",
")",
":",
"markers",
"=",
"[",
"Marker",
"(",
"x",
")",
"for",
"x",
"in",
"bb",
"]",
"markers",
"=",
"compute_score",
"(",
"markers",
",",
"bonus",
",",
"penalty",
")",
"for",
"mi",
",",
"mj",
"in",
"pairwise",
"(",
"markers",
")",
":",
"if",
"mi",
".",
"mlg",
"==",
"mj",
".",
"mlg",
":",
"continue",
"assert",
"mi",
".",
"seqid",
"==",
"mj",
".",
"seqid",
"start",
",",
"end",
"=",
"mi",
".",
"pos",
",",
"mj",
".",
"pos",
"if",
"start",
">",
"end",
":",
"start",
",",
"end",
"=",
"end",
",",
"start",
"if",
"opts",
".",
"splitsingle",
":",
"start",
"=",
"end",
"=",
"(",
"start",
"+",
"end",
")",
"/",
"2",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"mi",
".",
"seqid",
",",
"start",
"-",
"1",
",",
"end",
")",
")",
")",
"nbreaks",
"+=",
"1",
"logging",
".",
"debug",
"(",
"\"A total of {} breakpoints inferred (--chunk={})\"",
".",
"format",
"(",
"nbreaks",
",",
"nchunk",
")",
")"
] | python | %prog split input.bed
Split suspicious scaffolds. Suspicious scaffolds are those that contain
chunks that map to more than one linkage group. The chunk size can be
modified through --chunk option. | false |
2,300,249 | def service(flavour):
r"""
Mark a class as implementing a Service
Each Service class must have a ``run`` method, which does not take any arguments.
This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless
* the Service has been garbage collected, or
* the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed.
For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``.
"""
def service_unit_decorator(raw_cls):
__new__ = raw_cls.__new__
def __new_service__(cls, *args, **kwargs):
if __new__ is object.__new__:
self = __new__(cls)
else:
self = __new__(cls, *args, **kwargs)
service_unit = ServiceUnit(self, flavour)
self.__service_unit__ = service_unit
return self
raw_cls.__new__ = __new_service__
if raw_cls.run.__doc__ is None:
raw_cls.run.__doc__ = "Service entry point"
return raw_cls
return service_unit_decorator | [
"def",
"service",
"(",
"flavour",
")",
":",
"def",
"service_unit_decorator",
"(",
"raw_cls",
")",
":",
"__new__",
"=",
"raw_cls",
".",
"__new__",
"def",
"__new_service__",
"(",
"cls",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"__new__",
"is",
"object",
".",
"__new__",
":",
"self",
"=",
"__new__",
"(",
"cls",
")",
"else",
":",
"self",
"=",
"__new__",
"(",
"cls",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"service_unit",
"=",
"ServiceUnit",
"(",
"self",
",",
"flavour",
")",
"self",
".",
"__service_unit__",
"=",
"service_unit",
"return",
"self",
"raw_cls",
".",
"__new__",
"=",
"__new_service__",
"if",
"raw_cls",
".",
"run",
".",
"__doc__",
"is",
"None",
":",
"raw_cls",
".",
"run",
".",
"__doc__",
"=",
"\"Service entry point\"",
"return",
"raw_cls",
"return",
"service_unit_decorator"
] | python | r"""
Mark a class as implementing a Service
Each Service class must have a ``run`` method, which does not take any arguments.
This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless
* the Service has been garbage collected, or
* the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed.
For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``. | false |
2,222,421 | def output_files(self):
"""Return a list of the output files produced by this link.
For `Link` sub-classes this will return the union
of all the output files of each internal `Link`.
That is to say this will include files produced by one
`Link` in a `Chain` and used as input to another `Link` in the `Chain`
"""
ret_list = []
for key, val in self.file_dict.items():
# For output files we only want files that were marked as output
if val & FileFlags.output_mask:
ret_list.append(key)
return ret_list | [
"def",
"output_files",
"(",
"self",
")",
":",
"ret_list",
"=",
"[",
"]",
"for",
"key",
",",
"val",
"in",
"self",
".",
"file_dict",
".",
"items",
"(",
")",
":",
"if",
"val",
"&",
"FileFlags",
".",
"output_mask",
":",
"ret_list",
".",
"append",
"(",
"key",
")",
"return",
"ret_list"
] | python | Return a list of the output files produced by this link.
For `Link` sub-classes this will return the union
of all the output files of each internal `Link`.
That is to say this will include files produced by one
`Link` in a `Chain` and used as input to another `Link` in the `Chain` | false |
2,256,407 | def __getitem__(self, jid):
'''Get a job object corresponding to that jid, or ``None`` if it
doesn't exist'''
results = self.client('get', jid)
if not results:
results = self.client('recur.get', jid)
if not results:
return None
return RecurringJob(self.client, **json.loads(results))
return Job(self.client, **json.loads(results)) | [
"def",
"__getitem__",
"(",
"self",
",",
"jid",
")",
":",
"results",
"=",
"self",
".",
"client",
"(",
"'get'",
",",
"jid",
")",
"if",
"not",
"results",
":",
"results",
"=",
"self",
".",
"client",
"(",
"'recur.get'",
",",
"jid",
")",
"if",
"not",
"results",
":",
"return",
"None",
"return",
"RecurringJob",
"(",
"self",
".",
"client",
",",
"**",
"json",
".",
"loads",
"(",
"results",
")",
")",
"return",
"Job",
"(",
"self",
".",
"client",
",",
"**",
"json",
".",
"loads",
"(",
"results",
")",
")"
] | python | Get a job object corresponding to that jid, or ``None`` if it
doesn't exist | false |
1,973,702 | def get_cached_translation(instance, language_code=None, related_name=None, use_fallback=False):
"""
Fetch an cached translation.
.. versionadded 1.2 Added the ``related_name`` parameter.
"""
if language_code is None:
language_code = instance.get_current_language()
translated_model = instance._parler_meta.get_model_by_related_name(related_name)
values = _get_cached_values(instance, translated_model, language_code, use_fallback)
if not values:
return None
try:
translation = translated_model(**values)
except TypeError:
# Some model field was removed, cache entry is no longer working.
return None
translation._state.adding = False
return translation | [
"def",
"get_cached_translation",
"(",
"instance",
",",
"language_code",
"=",
"None",
",",
"related_name",
"=",
"None",
",",
"use_fallback",
"=",
"False",
")",
":",
"if",
"language_code",
"is",
"None",
":",
"language_code",
"=",
"instance",
".",
"get_current_language",
"(",
")",
"translated_model",
"=",
"instance",
".",
"_parler_meta",
".",
"get_model_by_related_name",
"(",
"related_name",
")",
"values",
"=",
"_get_cached_values",
"(",
"instance",
",",
"translated_model",
",",
"language_code",
",",
"use_fallback",
")",
"if",
"not",
"values",
":",
"return",
"None",
"try",
":",
"translation",
"=",
"translated_model",
"(",
"**",
"values",
")",
"except",
"TypeError",
":",
"return",
"None",
"translation",
".",
"_state",
".",
"adding",
"=",
"False",
"return",
"translation"
] | python | Fetch an cached translation.
.. versionadded 1.2 Added the ``related_name`` parameter. | false |
1,851,236 | def generate_thumbnail_download_link_vimeo(video_id_from_shortcode):
"""Thumbnail URL generator for Vimeo videos."""
# Following the Vimeo API at https://developer.vimeo.com/api#video-request, we need to request the video's metadata and get the thumbnail from that. First, then, we'll get the metadata in JSON format, and then will parse it to find the thumbnail URL.
video_metadata = urlopen("https://vimeo.com/api/v2/video/" + str(video_id_from_shortcode) + ".json").read() # Download the video's metadata in JSON format.
video_metadata_parsed = json.loads(video_metadata.decode('utf-8')) # Parse the JSON
video_thumbnail_large_location = video_metadata_parsed[0]['thumbnail_large'] # Go into the JSON and get the URL of the thumbnail.
return video_thumbnail_large_location | [
"def",
"generate_thumbnail_download_link_vimeo",
"(",
"video_id_from_shortcode",
")",
":",
"video_metadata",
"=",
"urlopen",
"(",
"\"https://vimeo.com/api/v2/video/\"",
"+",
"str",
"(",
"video_id_from_shortcode",
")",
"+",
"\".json\"",
")",
".",
"read",
"(",
")",
"video_metadata_parsed",
"=",
"json",
".",
"loads",
"(",
"video_metadata",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"video_thumbnail_large_location",
"=",
"video_metadata_parsed",
"[",
"0",
"]",
"[",
"'thumbnail_large'",
"]",
"return",
"video_thumbnail_large_location"
] | python | Thumbnail URL generator for Vimeo videos. | false |
2,154,673 | def generate_component_id_namespace_overview(model, components):
"""
Tabulate which MIRIAM databases the component's identifier matches.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
pandas.DataFrame
The index of the table is given by the component identifiers. Each
column corresponds to one MIRIAM database and a Boolean entry
determines whether the annotation matches.
"""
patterns = {
"metabolites": METABOLITE_ANNOTATIONS,
"reactions": REACTION_ANNOTATIONS,
"genes": GENE_PRODUCT_ANNOTATIONS
}[components]
databases = list(patterns)
data = list()
index = list()
for elem in getattr(model, components):
index.append(elem.id)
data.append(tuple(patterns[db].match(elem.id) is not None
for db in databases))
df = pd.DataFrame(data, index=index, columns=databases)
if components != "genes":
# Clean up of the dataframe. Unfortunately the Biocyc patterns match
# broadly. Hence, whenever a Metabolite or Reaction ID matches to any
# DB pattern AND the Biocyc pattern we have to assume that this is a
# false positive.
# First determine all rows in which 'biocyc' and other entries are
# True simultaneously and use this Boolean series to create another
# column temporarily.
df['duplicate'] = df[df['biocyc']].sum(axis=1) >= 2
# Replace all nan values with False
df['duplicate'].fillna(False, inplace=True)
# Use the additional column to index the original dataframe to identify
# false positive biocyc hits and set them to False.
df.loc[df['duplicate'], 'biocyc'] = False
# Delete the additional column
del df['duplicate']
return df | [
"def",
"generate_component_id_namespace_overview",
"(",
"model",
",",
"components",
")",
":",
"patterns",
"=",
"{",
"\"metabolites\"",
":",
"METABOLITE_ANNOTATIONS",
",",
"\"reactions\"",
":",
"REACTION_ANNOTATIONS",
",",
"\"genes\"",
":",
"GENE_PRODUCT_ANNOTATIONS",
"}",
"[",
"components",
"]",
"databases",
"=",
"list",
"(",
"patterns",
")",
"data",
"=",
"list",
"(",
")",
"index",
"=",
"list",
"(",
")",
"for",
"elem",
"in",
"getattr",
"(",
"model",
",",
"components",
")",
":",
"index",
".",
"append",
"(",
"elem",
".",
"id",
")",
"data",
".",
"append",
"(",
"tuple",
"(",
"patterns",
"[",
"db",
"]",
".",
"match",
"(",
"elem",
".",
"id",
")",
"is",
"not",
"None",
"for",
"db",
"in",
"databases",
")",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"databases",
")",
"if",
"components",
"!=",
"\"genes\"",
":",
"df",
"[",
"'duplicate'",
"]",
"=",
"df",
"[",
"df",
"[",
"'biocyc'",
"]",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
">=",
"2",
"df",
"[",
"'duplicate'",
"]",
".",
"fillna",
"(",
"False",
",",
"inplace",
"=",
"True",
")",
"df",
".",
"loc",
"[",
"df",
"[",
"'duplicate'",
"]",
",",
"'biocyc'",
"]",
"=",
"False",
"del",
"df",
"[",
"'duplicate'",
"]",
"return",
"df"
] | python | Tabulate which MIRIAM databases the component's identifier matches.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
pandas.DataFrame
The index of the table is given by the component identifiers. Each
column corresponds to one MIRIAM database and a Boolean entry
determines whether the annotation matches. | false |
2,170,009 | def __init__(self, configTreeModel, parent=None):
""" Constructor
"""
super(ConfigTreeView, self).__init__(treeModel=configTreeModel, parent=parent)
self.expanded.connect(configTreeModel.expand)
self.collapsed.connect(configTreeModel.collapse)
#configTreeModel.update.connect(self.update) # not necessary
#self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectItems)
treeHeader = self.header()
treeHeader.resizeSection(ConfigTreeModel.COL_NODE_NAME, RIGHT_DOCK_WIDTH * 0.5)
treeHeader.resizeSection(ConfigTreeModel.COL_VALUE, RIGHT_DOCK_WIDTH * 0.5)
headerNames = self.model().horizontalHeaders
enabled = dict((name, True) for name in headerNames)
enabled[headerNames[ConfigTreeModel.COL_NODE_NAME]] = False # Name cannot be unchecked
enabled[headerNames[ConfigTreeModel.COL_VALUE]] = False # Value cannot be unchecked
checked = dict((name, False) for name in headerNames)
checked[headerNames[ConfigTreeModel.COL_NODE_NAME]] = True # Checked by default
checked[headerNames[ConfigTreeModel.COL_VALUE]] = True # Checked by default
self.addHeaderContextMenu(checked=checked, enabled=enabled, checkable={})
self.setRootIsDecorated(False)
self.setUniformRowHeights(True)
self.setItemDelegate(ConfigItemDelegate())
self.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers) | [
"def",
"__init__",
"(",
"self",
",",
"configTreeModel",
",",
"parent",
"=",
"None",
")",
":",
"super",
"(",
"ConfigTreeView",
",",
"self",
")",
".",
"__init__",
"(",
"treeModel",
"=",
"configTreeModel",
",",
"parent",
"=",
"parent",
")",
"self",
".",
"expanded",
".",
"connect",
"(",
"configTreeModel",
".",
"expand",
")",
"self",
".",
"collapsed",
".",
"connect",
"(",
"configTreeModel",
".",
"collapse",
")",
"treeHeader",
"=",
"self",
".",
"header",
"(",
")",
"treeHeader",
".",
"resizeSection",
"(",
"ConfigTreeModel",
".",
"COL_NODE_NAME",
",",
"RIGHT_DOCK_WIDTH",
"*",
"0.5",
")",
"treeHeader",
".",
"resizeSection",
"(",
"ConfigTreeModel",
".",
"COL_VALUE",
",",
"RIGHT_DOCK_WIDTH",
"*",
"0.5",
")",
"headerNames",
"=",
"self",
".",
"model",
"(",
")",
".",
"horizontalHeaders",
"enabled",
"=",
"dict",
"(",
"(",
"name",
",",
"True",
")",
"for",
"name",
"in",
"headerNames",
")",
"enabled",
"[",
"headerNames",
"[",
"ConfigTreeModel",
".",
"COL_NODE_NAME",
"]",
"]",
"=",
"False",
"enabled",
"[",
"headerNames",
"[",
"ConfigTreeModel",
".",
"COL_VALUE",
"]",
"]",
"=",
"False",
"checked",
"=",
"dict",
"(",
"(",
"name",
",",
"False",
")",
"for",
"name",
"in",
"headerNames",
")",
"checked",
"[",
"headerNames",
"[",
"ConfigTreeModel",
".",
"COL_NODE_NAME",
"]",
"]",
"=",
"True",
"checked",
"[",
"headerNames",
"[",
"ConfigTreeModel",
".",
"COL_VALUE",
"]",
"]",
"=",
"True",
"self",
".",
"addHeaderContextMenu",
"(",
"checked",
"=",
"checked",
",",
"enabled",
"=",
"enabled",
",",
"checkable",
"=",
"{",
"}",
")",
"self",
".",
"setRootIsDecorated",
"(",
"False",
")",
"self",
".",
"setUniformRowHeights",
"(",
"True",
")",
"self",
".",
"setItemDelegate",
"(",
"ConfigItemDelegate",
"(",
")",
")",
"self",
".",
"setEditTriggers",
"(",
"QtWidgets",
".",
"QAbstractItemView",
".",
"AllEditTriggers",
")"
] | python | Constructor | false |
2,672,284 | def enforce_required_fields(self, attrs):
"""
The `UniqueTogetherValidator` always forces an implied 'required'
state on the fields it applies to.
"""
if self.instance is not None:
return
missing = {
field_name: self.missing_message
for field_name in self.fields
if field_name not in attrs
}
if missing:
raise ValidationError(missing) | [
"def",
"enforce_required_fields",
"(",
"self",
",",
"attrs",
")",
":",
"if",
"self",
".",
"instance",
"is",
"not",
"None",
":",
"return",
"missing",
"=",
"{",
"field_name",
":",
"self",
".",
"missing_message",
"for",
"field_name",
"in",
"self",
".",
"fields",
"if",
"field_name",
"not",
"in",
"attrs",
"}",
"if",
"missing",
":",
"raise",
"ValidationError",
"(",
"missing",
")"
] | python | The `UniqueTogetherValidator` always forces an implied 'required'
state on the fields it applies to. | false |
2,109,222 | def bb(self,*args,**kwargs):
"""
NAME:
bb
PURPOSE:
return Galactic latitude
INPUT:
t - (optional) time at which to get bb
obs=[X,Y,Z] - (optional) position of observer (in kpc)
(default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
b(t)
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
_check_roSet(self,kwargs,'bb')
lbd= self._lbd(*args,**kwargs)
return lbd[:,1] | [
"def",
"bb",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"_check_roSet",
"(",
"self",
",",
"kwargs",
",",
"'bb'",
")",
"lbd",
"=",
"self",
".",
"_lbd",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"lbd",
"[",
":",
",",
"1",
"]"
] | python | NAME:
bb
PURPOSE:
return Galactic latitude
INPUT:
t - (optional) time at which to get bb
obs=[X,Y,Z] - (optional) position of observer (in kpc)
(default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
b(t)
HISTORY:
2011-02-23 - Written - Bovy (NYU) | false |
2,533,487 | def get_cats(self):
'''Get top keywords categories'''
start_url = 'http://top.taobao.com/index.php?from=tbsy'
rs = self.fetch(start_url)
if not rs: return None
soup = BeautifulSoup(rs.content, convertEntities=BeautifulSoup.HTML_ENTITIES, markupMassage=hexentityMassage)
cats = [{'id':'TR_%s'%li['id'].encode('utf-8').upper(), 'title':li.a.text.encode('utf-8').strip()} for li in soup.find('div', id='nav').findAll('li') if li['id']!='index']
threadPool = ThreadPool(len(cats) if len(cats)<=5 else 5)
for cat in cats:
threadPool.run(self.get_cats_thread, callback=None, cat=cat)
cats = threadPool.killAllWorkers(None)
return cats | [
"def",
"get_cats",
"(",
"self",
")",
":",
"start_url",
"=",
"'http://top.taobao.com/index.php?from=tbsy'",
"rs",
"=",
"self",
".",
"fetch",
"(",
"start_url",
")",
"if",
"not",
"rs",
":",
"return",
"None",
"soup",
"=",
"BeautifulSoup",
"(",
"rs",
".",
"content",
",",
"convertEntities",
"=",
"BeautifulSoup",
".",
"HTML_ENTITIES",
",",
"markupMassage",
"=",
"hexentityMassage",
")",
"cats",
"=",
"[",
"{",
"'id'",
":",
"'TR_%s'",
"%",
"li",
"[",
"'id'",
"]",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"upper",
"(",
")",
",",
"'title'",
":",
"li",
".",
"a",
".",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")",
"}",
"for",
"li",
"in",
"soup",
".",
"find",
"(",
"'div'",
",",
"id",
"=",
"'nav'",
")",
".",
"findAll",
"(",
"'li'",
")",
"if",
"li",
"[",
"'id'",
"]",
"!=",
"'index'",
"]",
"threadPool",
"=",
"ThreadPool",
"(",
"len",
"(",
"cats",
")",
"if",
"len",
"(",
"cats",
")",
"<=",
"5",
"else",
"5",
")",
"for",
"cat",
"in",
"cats",
":",
"threadPool",
".",
"run",
"(",
"self",
".",
"get_cats_thread",
",",
"callback",
"=",
"None",
",",
"cat",
"=",
"cat",
")",
"cats",
"=",
"threadPool",
".",
"killAllWorkers",
"(",
"None",
")",
"return",
"cats"
] | python | Get top keywords categories | false |
2,527,578 | def convert(self, path, version, target):
"""Converts the specified source file to a new version number."""
source = self.comparer.get_representation(path)
lines = [ '# <fortpy version="{}"></fortpy>\n'.format(version) ]
for line in self.comparer.template.contents[version].preamble:
lines.append(line.write(source.preamble, source.version, source.stored) + "\n")
for line in self.comparer.template.contents[version].body:
for valueset in source.body:
lines.append(line.write(valueset, source.version, source.stored) + "\n")
with open(os.path.expanduser(target), 'w') as f:
f.writelines(lines) | [
"def",
"convert",
"(",
"self",
",",
"path",
",",
"version",
",",
"target",
")",
":",
"source",
"=",
"self",
".",
"comparer",
".",
"get_representation",
"(",
"path",
")",
"lines",
"=",
"[",
"'# <fortpy version=\"{}\"></fortpy>\\n'",
".",
"format",
"(",
"version",
")",
"]",
"for",
"line",
"in",
"self",
".",
"comparer",
".",
"template",
".",
"contents",
"[",
"version",
"]",
".",
"preamble",
":",
"lines",
".",
"append",
"(",
"line",
".",
"write",
"(",
"source",
".",
"preamble",
",",
"source",
".",
"version",
",",
"source",
".",
"stored",
")",
"+",
"\"\\n\"",
")",
"for",
"line",
"in",
"self",
".",
"comparer",
".",
"template",
".",
"contents",
"[",
"version",
"]",
".",
"body",
":",
"for",
"valueset",
"in",
"source",
".",
"body",
":",
"lines",
".",
"append",
"(",
"line",
".",
"write",
"(",
"valueset",
",",
"source",
".",
"version",
",",
"source",
".",
"stored",
")",
"+",
"\"\\n\"",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"target",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"writelines",
"(",
"lines",
")"
] | python | Converts the specified source file to a new version number. | false |
2,429,176 | def percentile(self, percentile):
"""Get a percentile of all floats in the map. Since the sorting is
done in-place, the map is no longer safe to use after calling this
or median()"""
percentile = float(percentile)
if percentile != percentile or percentile < 0 or percentile > 100:
raise ValueError('Expected a 0 <= percentile <= 100')
result = c_float()
if not Gauged.map_percentile(self.ptr, percentile, byref(result)):
raise MemoryError
return result.value | [
"def",
"percentile",
"(",
"self",
",",
"percentile",
")",
":",
"percentile",
"=",
"float",
"(",
"percentile",
")",
"if",
"percentile",
"!=",
"percentile",
"or",
"percentile",
"<",
"0",
"or",
"percentile",
">",
"100",
":",
"raise",
"ValueError",
"(",
"'Expected a 0 <= percentile <= 100'",
")",
"result",
"=",
"c_float",
"(",
")",
"if",
"not",
"Gauged",
".",
"map_percentile",
"(",
"self",
".",
"ptr",
",",
"percentile",
",",
"byref",
"(",
"result",
")",
")",
":",
"raise",
"MemoryError",
"return",
"result",
".",
"value"
] | python | Get a percentile of all floats in the map. Since the sorting is
done in-place, the map is no longer safe to use after calling this
or median() | false |
2,068,486 | def get_identities(self, item):
""" Return the identities from an item """
item = item['data']
for identity in ['creator']:
# Todo: questions has also involved and solved_by
if identity in item and item[identity]:
user = self.get_sh_identity(item[identity])
yield user
if 'answers_data' in item:
for answer in item['answers_data']:
user = self.get_sh_identity(answer[identity])
yield user | [
"def",
"get_identities",
"(",
"self",
",",
"item",
")",
":",
"item",
"=",
"item",
"[",
"'data'",
"]",
"for",
"identity",
"in",
"[",
"'creator'",
"]",
":",
"if",
"identity",
"in",
"item",
"and",
"item",
"[",
"identity",
"]",
":",
"user",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
"[",
"identity",
"]",
")",
"yield",
"user",
"if",
"'answers_data'",
"in",
"item",
":",
"for",
"answer",
"in",
"item",
"[",
"'answers_data'",
"]",
":",
"user",
"=",
"self",
".",
"get_sh_identity",
"(",
"answer",
"[",
"identity",
"]",
")",
"yield",
"user"
] | python | Return the identities from an item | false |
1,958,925 | def _GetDataStreams(self):
"""Retrieves the data streams.
Returns:
list[TSKDataStream]: data streams.
"""
if self._data_streams is None:
if self._file_system.IsHFS():
known_data_attribute_types = [
pytsk3.TSK_FS_ATTR_TYPE_HFS_DEFAULT,
pytsk3.TSK_FS_ATTR_TYPE_HFS_DATA]
elif self._file_system.IsNTFS():
known_data_attribute_types = [pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA]
else:
known_data_attribute_types = None
self._data_streams = []
tsk_fs_meta_type = getattr(
self._tsk_file.info.meta, 'type', pytsk3.TSK_FS_META_TYPE_UNDEF)
if not known_data_attribute_types:
if tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_REG:
data_stream = TSKDataStream(self._file_system, None)
self._data_streams.append(data_stream)
else:
for tsk_attribute in self._tsk_file:
# NTFS allows directories to have data streams.
if (not self._file_system.IsNTFS() and
tsk_fs_meta_type != pytsk3.TSK_FS_META_TYPE_REG):
continue
if getattr(tsk_attribute, 'info', None) is None:
continue
attribute_type = getattr(tsk_attribute.info, 'type', None)
if attribute_type in known_data_attribute_types:
data_stream = TSKDataStream(self._file_system, tsk_attribute)
self._data_streams.append(data_stream)
return self._data_streams | [
"def",
"_GetDataStreams",
"(",
"self",
")",
":",
"if",
"self",
".",
"_data_streams",
"is",
"None",
":",
"if",
"self",
".",
"_file_system",
".",
"IsHFS",
"(",
")",
":",
"known_data_attribute_types",
"=",
"[",
"pytsk3",
".",
"TSK_FS_ATTR_TYPE_HFS_DEFAULT",
",",
"pytsk3",
".",
"TSK_FS_ATTR_TYPE_HFS_DATA",
"]",
"elif",
"self",
".",
"_file_system",
".",
"IsNTFS",
"(",
")",
":",
"known_data_attribute_types",
"=",
"[",
"pytsk3",
".",
"TSK_FS_ATTR_TYPE_NTFS_DATA",
"]",
"else",
":",
"known_data_attribute_types",
"=",
"None",
"self",
".",
"_data_streams",
"=",
"[",
"]",
"tsk_fs_meta_type",
"=",
"getattr",
"(",
"self",
".",
"_tsk_file",
".",
"info",
".",
"meta",
",",
"'type'",
",",
"pytsk3",
".",
"TSK_FS_META_TYPE_UNDEF",
")",
"if",
"not",
"known_data_attribute_types",
":",
"if",
"tsk_fs_meta_type",
"==",
"pytsk3",
".",
"TSK_FS_META_TYPE_REG",
":",
"data_stream",
"=",
"TSKDataStream",
"(",
"self",
".",
"_file_system",
",",
"None",
")",
"self",
".",
"_data_streams",
".",
"append",
"(",
"data_stream",
")",
"else",
":",
"for",
"tsk_attribute",
"in",
"self",
".",
"_tsk_file",
":",
"if",
"(",
"not",
"self",
".",
"_file_system",
".",
"IsNTFS",
"(",
")",
"and",
"tsk_fs_meta_type",
"!=",
"pytsk3",
".",
"TSK_FS_META_TYPE_REG",
")",
":",
"continue",
"if",
"getattr",
"(",
"tsk_attribute",
",",
"'info'",
",",
"None",
")",
"is",
"None",
":",
"continue",
"attribute_type",
"=",
"getattr",
"(",
"tsk_attribute",
".",
"info",
",",
"'type'",
",",
"None",
")",
"if",
"attribute_type",
"in",
"known_data_attribute_types",
":",
"data_stream",
"=",
"TSKDataStream",
"(",
"self",
".",
"_file_system",
",",
"tsk_attribute",
")",
"self",
".",
"_data_streams",
".",
"append",
"(",
"data_stream",
")",
"return",
"self",
".",
"_data_streams"
] | python | Retrieves the data streams.
Returns:
list[TSKDataStream]: data streams. | false |
1,633,765 | def __virtual__():
'''
Only load if defined in fileserver_backend and azure.storage is present
'''
if __virtualname__ not in __opts__['fileserver_backend']:
return False
if not HAS_AZURE:
return False
if 'azurefs' not in __opts__:
return False
if not _validate_config():
return False
return True | [
"def",
"__virtual__",
"(",
")",
":",
"if",
"__virtualname__",
"not",
"in",
"__opts__",
"[",
"'fileserver_backend'",
"]",
":",
"return",
"False",
"if",
"not",
"HAS_AZURE",
":",
"return",
"False",
"if",
"'azurefs'",
"not",
"in",
"__opts__",
":",
"return",
"False",
"if",
"not",
"_validate_config",
"(",
")",
":",
"return",
"False",
"return",
"True"
] | python | Only load if defined in fileserver_backend and azure.storage is present | false |
2,278,651 | def _set_pixel_and_convert_color(self, x, y, color):
"""set the pixel but convert the color before."""
if color is None:
return
color = self._convert_color_to_rrggbb(color)
self._set_pixel(x, y, color) | [
"def",
"_set_pixel_and_convert_color",
"(",
"self",
",",
"x",
",",
"y",
",",
"color",
")",
":",
"if",
"color",
"is",
"None",
":",
"return",
"color",
"=",
"self",
".",
"_convert_color_to_rrggbb",
"(",
"color",
")",
"self",
".",
"_set_pixel",
"(",
"x",
",",
"y",
",",
"color",
")"
] | python | set the pixel but convert the color before. | false |
2,331,334 | def wall_of_name(self):
'''
Appends identifiers for the different databases (such as Entrez id's)
and returns them. Uses the CrossRef class below.
'''
names = []
if self.standard_name:
names.append(self.standard_name)
if self.systematic_name:
names.append(self.systematic_name)
names.extend([xref.xrid for xref in self.crossref_set.all()])
for i in range(len(names)):
names[i] = re.sub(nonalpha, '', names[i])
names_string = ' '.join(names)
if self.standard_name:
names_string += ' ' + re.sub(num, '', self.standard_name)
return names_string | [
"def",
"wall_of_name",
"(",
"self",
")",
":",
"names",
"=",
"[",
"]",
"if",
"self",
".",
"standard_name",
":",
"names",
".",
"append",
"(",
"self",
".",
"standard_name",
")",
"if",
"self",
".",
"systematic_name",
":",
"names",
".",
"append",
"(",
"self",
".",
"systematic_name",
")",
"names",
".",
"extend",
"(",
"[",
"xref",
".",
"xrid",
"for",
"xref",
"in",
"self",
".",
"crossref_set",
".",
"all",
"(",
")",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"names",
")",
")",
":",
"names",
"[",
"i",
"]",
"=",
"re",
".",
"sub",
"(",
"nonalpha",
",",
"''",
",",
"names",
"[",
"i",
"]",
")",
"names_string",
"=",
"' '",
".",
"join",
"(",
"names",
")",
"if",
"self",
".",
"standard_name",
":",
"names_string",
"+=",
"' '",
"+",
"re",
".",
"sub",
"(",
"num",
",",
"''",
",",
"self",
".",
"standard_name",
")",
"return",
"names_string"
] | python | Appends identifiers for the different databases (such as Entrez id's)
and returns them. Uses the CrossRef class below. | false |
1,693,566 | def remove_from_labels(self, label):
"""
:calls: `DELETE /repos/:owner/:repo/issues/:number/labels/:name <http://developer.github.com/v3/issues/labels>`_
:param label: :class:`github.Label.Label` or string
:rtype: None
"""
assert isinstance(label, (github.Label.Label, str, unicode)), label
if isinstance(label, github.Label.Label):
label = label._identity
else:
label = urllib.quote(label)
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.issue_url + "/labels/" + label
) | [
"def",
"remove_from_labels",
"(",
"self",
",",
"label",
")",
":",
"assert",
"isinstance",
"(",
"label",
",",
"(",
"github",
".",
"Label",
".",
"Label",
",",
"str",
",",
"unicode",
")",
")",
",",
"label",
"if",
"isinstance",
"(",
"label",
",",
"github",
".",
"Label",
".",
"Label",
")",
":",
"label",
"=",
"label",
".",
"_identity",
"else",
":",
"label",
"=",
"urllib",
".",
"quote",
"(",
"label",
")",
"headers",
",",
"data",
"=",
"self",
".",
"_requester",
".",
"requestJsonAndCheck",
"(",
"\"DELETE\"",
",",
"self",
".",
"issue_url",
"+",
"\"/labels/\"",
"+",
"label",
")"
] | python | :calls: `DELETE /repos/:owner/:repo/issues/:number/labels/:name <http://developer.github.com/v3/issues/labels>`_
:param label: :class:`github.Label.Label` or string
:rtype: None | false |
2,264,074 | def _init_code_edit(self, backend):
"""
Initializes the code editor (setup modes, panels and colors).
"""
from pyqode.core import panels, modes
self.modes.append(_LinkHighlighter(self.document()))
self.background = self._formatter.color_scheme.background
self.foreground = self._formatter.color_scheme.foreground
self._reset_stylesheet()
self.setCenterOnScroll(False)
self.setMouseTracking(True)
self.setUndoRedoEnabled(False)
search_panel = panels.SearchAndReplacePanel()
self.panels.append(search_panel, search_panel.Position.TOP)
self.action_copy.setShortcut('Ctrl+Shift+C')
self.action_paste.setShortcut('Ctrl+Shift+V')
self.remove_action(self.action_undo, sub_menu=None)
self.remove_action(self.action_redo, sub_menu=None)
self.remove_action(self.action_cut, sub_menu=None)
self.remove_action(self.action_duplicate_line, sub_menu=None)
self.remove_action(self.action_indent)
self.remove_action(self.action_un_indent)
self.remove_action(self.action_goto_line)
self.remove_action(search_panel.menu.menuAction())
self.remove_menu(self._sub_menus['Advanced'])
self.add_action(search_panel.actionSearch, sub_menu=None)
self.modes.append(modes.ZoomMode())
self.backend.start(backend) | [
"def",
"_init_code_edit",
"(",
"self",
",",
"backend",
")",
":",
"from",
"pyqode",
".",
"core",
"import",
"panels",
",",
"modes",
"self",
".",
"modes",
".",
"append",
"(",
"_LinkHighlighter",
"(",
"self",
".",
"document",
"(",
")",
")",
")",
"self",
".",
"background",
"=",
"self",
".",
"_formatter",
".",
"color_scheme",
".",
"background",
"self",
".",
"foreground",
"=",
"self",
".",
"_formatter",
".",
"color_scheme",
".",
"foreground",
"self",
".",
"_reset_stylesheet",
"(",
")",
"self",
".",
"setCenterOnScroll",
"(",
"False",
")",
"self",
".",
"setMouseTracking",
"(",
"True",
")",
"self",
".",
"setUndoRedoEnabled",
"(",
"False",
")",
"search_panel",
"=",
"panels",
".",
"SearchAndReplacePanel",
"(",
")",
"self",
".",
"panels",
".",
"append",
"(",
"search_panel",
",",
"search_panel",
".",
"Position",
".",
"TOP",
")",
"self",
".",
"action_copy",
".",
"setShortcut",
"(",
"'Ctrl+Shift+C'",
")",
"self",
".",
"action_paste",
".",
"setShortcut",
"(",
"'Ctrl+Shift+V'",
")",
"self",
".",
"remove_action",
"(",
"self",
".",
"action_undo",
",",
"sub_menu",
"=",
"None",
")",
"self",
".",
"remove_action",
"(",
"self",
".",
"action_redo",
",",
"sub_menu",
"=",
"None",
")",
"self",
".",
"remove_action",
"(",
"self",
".",
"action_cut",
",",
"sub_menu",
"=",
"None",
")",
"self",
".",
"remove_action",
"(",
"self",
".",
"action_duplicate_line",
",",
"sub_menu",
"=",
"None",
")",
"self",
".",
"remove_action",
"(",
"self",
".",
"action_indent",
")",
"self",
".",
"remove_action",
"(",
"self",
".",
"action_un_indent",
")",
"self",
".",
"remove_action",
"(",
"self",
".",
"action_goto_line",
")",
"self",
".",
"remove_action",
"(",
"search_panel",
".",
"menu",
".",
"menuAction",
"(",
")",
")",
"self",
".",
"remove_menu",
"(",
"self",
".",
"_sub_menus",
"[",
"'Advanced'",
"]",
")",
"self",
".",
"add_action",
"(",
"search_panel",
".",
"actionSearch",
",",
"sub_menu",
"=",
"None",
")",
"self",
".",
"modes",
".",
"append",
"(",
"modes",
".",
"ZoomMode",
"(",
")",
")",
"self",
".",
"backend",
".",
"start",
"(",
"backend",
")"
] | python | Initializes the code editor (setup modes, panels and colors). | false |
2,053,243 | def display_db_info(self):
"""Displays some basic info about the GnuCash book"""
with self.open_book() as book:
default_currency = book.default_currency
print("Default currency is ", default_currency.mnemonic) | [
"def",
"display_db_info",
"(",
"self",
")",
":",
"with",
"self",
".",
"open_book",
"(",
")",
"as",
"book",
":",
"default_currency",
"=",
"book",
".",
"default_currency",
"print",
"(",
"\"Default currency is \"",
",",
"default_currency",
".",
"mnemonic",
")"
] | python | Displays some basic info about the GnuCash book | false |
2,079,124 | def __init__(self, ErpReqLineItems=None, *args, **kw_args):
"""Initialises a new 'ErpRequisition' instance.
@param ErpReqLineItems:
"""
self._ErpReqLineItems = []
self.ErpReqLineItems = [] if ErpReqLineItems is None else ErpReqLineItems
super(ErpRequisition, self).__init__(*args, **kw_args) | [
"def",
"__init__",
"(",
"self",
",",
"ErpReqLineItems",
"=",
"None",
",",
"*",
"args",
",",
"**",
"kw_args",
")",
":",
"self",
".",
"_ErpReqLineItems",
"=",
"[",
"]",
"self",
".",
"ErpReqLineItems",
"=",
"[",
"]",
"if",
"ErpReqLineItems",
"is",
"None",
"else",
"ErpReqLineItems",
"super",
"(",
"ErpRequisition",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"**",
"kw_args",
")"
] | python | Initialises a new 'ErpRequisition' instance.
@param ErpReqLineItems: | false |
1,771,444 | def emit(self, batch):
"""Submits batches to Thrift HTTP Server through Binary Protocol.
:type batch:
:class:`~opencensus.ext.jaeger.trace_exporter.gen.jaeger.Batch`
:param batch: Object to emit Jaeger spans.
"""
try:
self.client.submitBatches([batch])
# it will call http_transport.flush() and
# status code and message will be updated
code = self.http_transport.code
msg = self.http_transport.message
if code >= 300 or code < 200:
logging.error("Traces cannot be uploaded;\
HTTP status code: {}, message {}".format(code, msg))
except Exception as e: # pragma: NO COVER
logging.error(getattr(e, 'message', e))
finally:
if self.http_transport.isOpen():
self.http_transport.close() | [
"def",
"emit",
"(",
"self",
",",
"batch",
")",
":",
"try",
":",
"self",
".",
"client",
".",
"submitBatches",
"(",
"[",
"batch",
"]",
")",
"code",
"=",
"self",
".",
"http_transport",
".",
"code",
"msg",
"=",
"self",
".",
"http_transport",
".",
"message",
"if",
"code",
">=",
"300",
"or",
"code",
"<",
"200",
":",
"logging",
".",
"error",
"(",
"\"Traces cannot be uploaded;\\\n HTTP status code: {}, message {}\"",
".",
"format",
"(",
"code",
",",
"msg",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"getattr",
"(",
"e",
",",
"'message'",
",",
"e",
")",
")",
"finally",
":",
"if",
"self",
".",
"http_transport",
".",
"isOpen",
"(",
")",
":",
"self",
".",
"http_transport",
".",
"close",
"(",
")"
] | python | Submits batches to Thrift HTTP Server through Binary Protocol.
:type batch:
:class:`~opencensus.ext.jaeger.trace_exporter.gen.jaeger.Batch`
:param batch: Object to emit Jaeger spans. | false |
1,954,141 | def _set_text(self, value):
""" set text at current working_index. Return whether it changed. """
working_index = self.working_index
working_lines = self._working_lines
original_value = working_lines[working_index]
working_lines[working_index] = value
# Return True when this text has been changed.
if len(value) != len(original_value):
# For Python 2, it seems that when two strings have a different
# length and one is a prefix of the other, Python still scans
# character by character to see whether the strings are different.
# (Some benchmarking showed significant differences for big
# documents. >100,000 of lines.)
return True
elif value != original_value:
return True
return False | [
"def",
"_set_text",
"(",
"self",
",",
"value",
")",
":",
"working_index",
"=",
"self",
".",
"working_index",
"working_lines",
"=",
"self",
".",
"_working_lines",
"original_value",
"=",
"working_lines",
"[",
"working_index",
"]",
"working_lines",
"[",
"working_index",
"]",
"=",
"value",
"if",
"len",
"(",
"value",
")",
"!=",
"len",
"(",
"original_value",
")",
":",
"return",
"True",
"elif",
"value",
"!=",
"original_value",
":",
"return",
"True",
"return",
"False"
] | python | set text at current working_index. Return whether it changed. | false |
1,901,596 | def create_authz_decision_query(self, destination, action,
evidence=None, resource=None, subject=None,
message_id=0, consent=None, extensions=None,
sign=None, sign_alg=None, digest_alg=None, **kwargs):
""" Creates an authz decision query.
:param destination: The IdP endpoint
:param action: The action you want to perform (has to be at least one)
:param evidence: Why you should be able to perform the action
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance
"""
return self._message(AuthzDecisionQuery, destination, message_id,
consent, extensions, sign, action=action,
evidence=evidence, resource=resource,
subject=subject, sign_alg=sign_alg,
digest_alg=digest_alg, **kwargs) | [
"def",
"create_authz_decision_query",
"(",
"self",
",",
"destination",
",",
"action",
",",
"evidence",
"=",
"None",
",",
"resource",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"message_id",
"=",
"0",
",",
"consent",
"=",
"None",
",",
"extensions",
"=",
"None",
",",
"sign",
"=",
"None",
",",
"sign_alg",
"=",
"None",
",",
"digest_alg",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"return",
"self",
".",
"_message",
"(",
"AuthzDecisionQuery",
",",
"destination",
",",
"message_id",
",",
"consent",
",",
"extensions",
",",
"sign",
",",
"action",
"=",
"action",
",",
"evidence",
"=",
"evidence",
",",
"resource",
"=",
"resource",
",",
"subject",
"=",
"subject",
",",
"sign_alg",
"=",
"sign_alg",
",",
"digest_alg",
"=",
"digest_alg",
",",
"**",
"kwargs",
")"
] | python | Creates an authz decision query.
:param destination: The IdP endpoint
:param action: The action you want to perform (has to be at least one)
:param evidence: Why you should be able to perform the action
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance | false |
2,419,123 | def regressfile(filename):
"""
Run all stories in filename 'filename' in python 2 and 3.
Rewrite stories if appropriate.
"""
_storybook().in_filename(filename).with_params(
**{"python version": "2.7.14"}
).ordered_by_name().play()
_storybook().with_params(**{"python version": "3.7.0"}).in_filename(
filename
).ordered_by_name().play() | [
"def",
"regressfile",
"(",
"filename",
")",
":",
"_storybook",
"(",
")",
".",
"in_filename",
"(",
"filename",
")",
".",
"with_params",
"(",
"**",
"{",
"\"python version\"",
":",
"\"2.7.14\"",
"}",
")",
".",
"ordered_by_name",
"(",
")",
".",
"play",
"(",
")",
"_storybook",
"(",
")",
".",
"with_params",
"(",
"**",
"{",
"\"python version\"",
":",
"\"3.7.0\"",
"}",
")",
".",
"in_filename",
"(",
"filename",
")",
".",
"ordered_by_name",
"(",
")",
".",
"play",
"(",
")"
] | python | Run all stories in filename 'filename' in python 2 and 3.
Rewrite stories if appropriate. | false |
2,371,203 | def __str__(self):
"""Return readable description of parameter"""
s = "<" + self.__class__.__name__ + " " + self.name + " " + self.type
s = s + " " + self.mode + " " + repr(self.value)
if self.choice is not None:
schoice = list(map(self.toString, self.choice))
s = s + " |" + "|".join(schoice) + "|"
else:
s = s + " " + repr(self.min) + " " + repr(self.max)
s = s + ' "' + self.prompt + '">'
return s | [
"def",
"__str__",
"(",
"self",
")",
":",
"s",
"=",
"\"<\"",
"+",
"self",
".",
"__class__",
".",
"__name__",
"+",
"\" \"",
"+",
"self",
".",
"name",
"+",
"\" \"",
"+",
"self",
".",
"type",
"s",
"=",
"s",
"+",
"\" \"",
"+",
"self",
".",
"mode",
"+",
"\" \"",
"+",
"repr",
"(",
"self",
".",
"value",
")",
"if",
"self",
".",
"choice",
"is",
"not",
"None",
":",
"schoice",
"=",
"list",
"(",
"map",
"(",
"self",
".",
"toString",
",",
"self",
".",
"choice",
")",
")",
"s",
"=",
"s",
"+",
"\" |\"",
"+",
"\"|\"",
".",
"join",
"(",
"schoice",
")",
"+",
"\"|\"",
"else",
":",
"s",
"=",
"s",
"+",
"\" \"",
"+",
"repr",
"(",
"self",
".",
"min",
")",
"+",
"\" \"",
"+",
"repr",
"(",
"self",
".",
"max",
")",
"s",
"=",
"s",
"+",
"' \"'",
"+",
"self",
".",
"prompt",
"+",
"'\">'",
"return",
"s"
] | python | Return readable description of parameter | false |
2,637,022 | def advance_to_checkpoint(self, checkpoint):
"""
Advance to the specified checkpoint, passing all preceding checkpoints including the specified checkpoint.
"""
if checkpoint in self._checkpoints:
for cp in self._checkpoints:
self.insert(cp)
if cp == checkpoint:
return cp
else:
raise InvalidCheckpointError(checkpoint) | [
"def",
"advance_to_checkpoint",
"(",
"self",
",",
"checkpoint",
")",
":",
"if",
"checkpoint",
"in",
"self",
".",
"_checkpoints",
":",
"for",
"cp",
"in",
"self",
".",
"_checkpoints",
":",
"self",
".",
"insert",
"(",
"cp",
")",
"if",
"cp",
"==",
"checkpoint",
":",
"return",
"cp",
"else",
":",
"raise",
"InvalidCheckpointError",
"(",
"checkpoint",
")"
] | python | Advance to the specified checkpoint, passing all preceding checkpoints including the specified checkpoint. | false |
2,465,949 | def combine_focus_with_next(self):
"""Combine the focus edit widget with the one below."""
below, ignore = self.get_next(self.focus)
if below is None:
# already at bottom
return
focus = self.lines[self.focus]
focus.set_edit_text(focus.edit_text + below.edit_text)
del self.lines[self.focus + 1] | [
"def",
"combine_focus_with_next",
"(",
"self",
")",
":",
"below",
",",
"ignore",
"=",
"self",
".",
"get_next",
"(",
"self",
".",
"focus",
")",
"if",
"below",
"is",
"None",
":",
"return",
"focus",
"=",
"self",
".",
"lines",
"[",
"self",
".",
"focus",
"]",
"focus",
".",
"set_edit_text",
"(",
"focus",
".",
"edit_text",
"+",
"below",
".",
"edit_text",
")",
"del",
"self",
".",
"lines",
"[",
"self",
".",
"focus",
"+",
"1",
"]"
] | python | Combine the focus edit widget with the one below. | false |
2,251,187 | def unshare(self, plotters, keys=None, auto_update=False, draw=None):
"""
Close the sharing connection of this plotter with others
This method undoes the sharing connections made by the :meth:`share`
method and releases the given `plotters` again, such that the
formatoptions in this plotter may be updated again to values different
from this one.
Parameters
----------
plotters: list of :class:`Plotter` instances or a :class:`Plotter`
The plotters to release
keys: string or iterable of strings
The formatoptions to unshare, or group names of formatoptions to
unshare all formatoptions of that group (see the
:attr:`fmt_groups` property). If None, all formatoptions of this
plotter are unshared.
%(InteractiveBase.start_update.parameters.draw)s
%(InteractiveBase.update.parameters.auto_update)s
See Also
--------
share, unshare_me"""
auto_update = auto_update or not self.no_auto_update
if isinstance(plotters, Plotter):
plotters = [plotters]
keys = self._set_sharing_keys(keys)
for plotter in plotters:
plotter.unshare_me(keys, auto_update=auto_update, draw=draw,
update_other=False)
self.update(force=keys, auto_update=auto_update, draw=draw) | [
"def",
"unshare",
"(",
"self",
",",
"plotters",
",",
"keys",
"=",
"None",
",",
"auto_update",
"=",
"False",
",",
"draw",
"=",
"None",
")",
":",
"auto_update",
"=",
"auto_update",
"or",
"not",
"self",
".",
"no_auto_update",
"if",
"isinstance",
"(",
"plotters",
",",
"Plotter",
")",
":",
"plotters",
"=",
"[",
"plotters",
"]",
"keys",
"=",
"self",
".",
"_set_sharing_keys",
"(",
"keys",
")",
"for",
"plotter",
"in",
"plotters",
":",
"plotter",
".",
"unshare_me",
"(",
"keys",
",",
"auto_update",
"=",
"auto_update",
",",
"draw",
"=",
"draw",
",",
"update_other",
"=",
"False",
")",
"self",
".",
"update",
"(",
"force",
"=",
"keys",
",",
"auto_update",
"=",
"auto_update",
",",
"draw",
"=",
"draw",
")"
] | python | Close the sharing connection of this plotter with others
This method undoes the sharing connections made by the :meth:`share`
method and releases the given `plotters` again, such that the
formatoptions in this plotter may be updated again to values different
from this one.
Parameters
----------
plotters: list of :class:`Plotter` instances or a :class:`Plotter`
The plotters to release
keys: string or iterable of strings
The formatoptions to unshare, or group names of formatoptions to
unshare all formatoptions of that group (see the
:attr:`fmt_groups` property). If None, all formatoptions of this
plotter are unshared.
%(InteractiveBase.start_update.parameters.draw)s
%(InteractiveBase.update.parameters.auto_update)s
See Also
--------
share, unshare_me | false |
2,205,584 | def load_config(filename):
""" Read config file specified by `filename`
Parameters
----------
filename : str
Description of filename
"""
package_path = ding0.__path__[0]
FILE = path.join(package_path, 'config', filename)
try:
cfg.read(FILE)
global _loaded
_loaded = True
except:
logger.exception("configfile not found.") | [
"def",
"load_config",
"(",
"filename",
")",
":",
"package_path",
"=",
"ding0",
".",
"__path__",
"[",
"0",
"]",
"FILE",
"=",
"path",
".",
"join",
"(",
"package_path",
",",
"'config'",
",",
"filename",
")",
"try",
":",
"cfg",
".",
"read",
"(",
"FILE",
")",
"global",
"_loaded",
"_loaded",
"=",
"True",
"except",
":",
"logger",
".",
"exception",
"(",
"\"configfile not found.\"",
")"
] | python | Read config file specified by `filename`
Parameters
----------
filename : str
Description of filename | false |
2,186,113 | def __init__(self, *args, **kwargs):
"""
Adds a __dict__ member to this dictionary
"""
super(AttributeMap, self).__init__(*args, **kwargs)
self.__dict__ = self | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"super",
"(",
"AttributeMap",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"self",
".",
"__dict__",
"=",
"self"
] | python | Adds a __dict__ member to this dictionary | false |
1,636,314 | def fcontext_add_policy(name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: 2019.2.0
Adds the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_policy my-policy
'''
return _fcontext_add_or_delete_policy('add', name, filetype, sel_type, sel_user, sel_level) | [
"def",
"fcontext_add_policy",
"(",
"name",
",",
"filetype",
"=",
"None",
",",
"sel_type",
"=",
"None",
",",
"sel_user",
"=",
"None",
",",
"sel_level",
"=",
"None",
")",
":",
"return",
"_fcontext_add_or_delete_policy",
"(",
"'add'",
",",
"name",
",",
"filetype",
",",
"sel_type",
",",
"sel_user",
",",
"sel_level",
")"
] | python | .. versionadded:: 2019.2.0
Adds the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_policy my-policy | false |
1,997,400 | def derive_fernet_key(input_key):
"""Derive a 32-bit b64-encoded Fernet key from arbitrary input key."""
hkdf = HKDF(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
info=info,
backend=backend,
)
return base64.urlsafe_b64encode(hkdf.derive(force_bytes(input_key))) | [
"def",
"derive_fernet_key",
"(",
"input_key",
")",
":",
"hkdf",
"=",
"HKDF",
"(",
"algorithm",
"=",
"hashes",
".",
"SHA256",
"(",
")",
",",
"length",
"=",
"32",
",",
"salt",
"=",
"salt",
",",
"info",
"=",
"info",
",",
"backend",
"=",
"backend",
",",
")",
"return",
"base64",
".",
"urlsafe_b64encode",
"(",
"hkdf",
".",
"derive",
"(",
"force_bytes",
"(",
"input_key",
")",
")",
")"
] | python | Derive a 32-bit b64-encoded Fernet key from arbitrary input key. | false |
2,447,449 | def extend(self, xs: Union['List[T]', typing.List[T]]) -> 'List[T]': # type: ignore
"""doufo.List.extend
Args:
`self`
`xs` (`Union['List[T]', typing.List[T]]`): Another List object or Typing.List
Returns:
extented `List` (`List[T]`)
"""
return type(self)(self.unbox() + List(xs).unbox()) | [
"def",
"extend",
"(",
"self",
",",
"xs",
":",
"Union",
"[",
"'List[T]'",
",",
"typing",
".",
"List",
"[",
"T",
"]",
"]",
")",
"->",
"'List[T]'",
":",
"return",
"type",
"(",
"self",
")",
"(",
"self",
".",
"unbox",
"(",
")",
"+",
"List",
"(",
"xs",
")",
".",
"unbox",
"(",
")",
")"
] | python | doufo.List.extend
Args:
`self`
`xs` (`Union['List[T]', typing.List[T]]`): Another List object or Typing.List
Returns:
extented `List` (`List[T]`) | false |
1,850,567 | def __init__(self, screen, signal=None, jitter=6, **kwargs):
"""
:param screen: The Screen being used for the Scene.
:param signal: The renderer to use as the 'signal' in the white noise.
:param jitter: The amount that the signal will jump when there is noise.
Also see the common keyword arguments in :py:obj:`.Effect`.
"""
super(RandomNoise, self).__init__(screen, **kwargs)
self._signal = signal
self._strength = 0.0
self._step = 0.0
self._jitter = jitter | [
"def",
"__init__",
"(",
"self",
",",
"screen",
",",
"signal",
"=",
"None",
",",
"jitter",
"=",
"6",
",",
"**",
"kwargs",
")",
":",
"super",
"(",
"RandomNoise",
",",
"self",
")",
".",
"__init__",
"(",
"screen",
",",
"**",
"kwargs",
")",
"self",
".",
"_signal",
"=",
"signal",
"self",
".",
"_strength",
"=",
"0.0",
"self",
".",
"_step",
"=",
"0.0",
"self",
".",
"_jitter",
"=",
"jitter"
] | python | :param screen: The Screen being used for the Scene.
:param signal: The renderer to use as the 'signal' in the white noise.
:param jitter: The amount that the signal will jump when there is noise.
Also see the common keyword arguments in :py:obj:`.Effect`. | false |
2,245,210 | def generate_clk_from_csv(input_f, # type: TextIO
keys, # type: Tuple[AnyStr, AnyStr]
schema, # type: Schema
validate=True, # type: bool
header=True, # type: Union[bool, AnyStr]
progress_bar=True # type: bool
):
# type: (...) -> List[str]
""" Generate Bloom filters from CSV file, then serialise them.
This function also computes and outputs the Hamming weight
(a.k.a popcount -- the number of bits set to high) of the
generated Bloom filters.
:param input_f: A file-like object of csv data to hash.
:param keys: A tuple of two lists of secret keys.
:param schema: Schema specifying the record formats and
hashing settings.
:param validate: Set to `False` to disable validation of
data against the schema. Note that this will silence
warnings whose aim is to keep the hashes consistent between
data sources; this may affect linkage accuracy.
:param header: Set to `False` if the CSV file does not have
a header. Set to `'ignore'` if the CSV file does have a
header but it should not be checked against the schema.
:param bool progress_bar: Set to `False` to disable the progress
bar.
:return: A list of serialized Bloom filters and a list of
corresponding popcounts.
"""
if header not in {False, True, 'ignore'}:
raise ValueError("header must be False, True or 'ignore' but is {}."
.format(header))
log.info("Hashing data")
# Read from CSV file
reader = unicode_reader(input_f)
if header:
column_names = next(reader)
if header != 'ignore':
validate_header(schema.fields, column_names)
start_time = time.time()
# Read the lines in CSV file and add it to PII
pii_data = []
for line in reader:
pii_data.append(tuple(element.strip() for element in line))
validate_row_lengths(schema.fields, pii_data)
if progress_bar:
stats = OnlineMeanVariance()
with tqdm(desc="generating CLKs", total=len(pii_data), unit='clk', unit_scale=True,
postfix={'mean': stats.mean(), 'std': stats.std()}) as pbar:
def callback(tics, clk_stats):
stats.update(clk_stats)
pbar.set_postfix(mean=stats.mean(), std=stats.std(), refresh=False)
pbar.update(tics)
results = generate_clks(pii_data,
schema,
keys,
validate=validate,
callback=callback)
else:
results = generate_clks(pii_data,
schema,
keys,
validate=validate)
log.info("Hashing took {:.2f} seconds".format(time.time() - start_time))
return results | [
"def",
"generate_clk_from_csv",
"(",
"input_f",
",",
"keys",
",",
"schema",
",",
"validate",
"=",
"True",
",",
"header",
"=",
"True",
",",
"progress_bar",
"=",
"True",
")",
":",
"if",
"header",
"not",
"in",
"{",
"False",
",",
"True",
",",
"'ignore'",
"}",
":",
"raise",
"ValueError",
"(",
"\"header must be False, True or 'ignore' but is {}.\"",
".",
"format",
"(",
"header",
")",
")",
"log",
".",
"info",
"(",
"\"Hashing data\"",
")",
"reader",
"=",
"unicode_reader",
"(",
"input_f",
")",
"if",
"header",
":",
"column_names",
"=",
"next",
"(",
"reader",
")",
"if",
"header",
"!=",
"'ignore'",
":",
"validate_header",
"(",
"schema",
".",
"fields",
",",
"column_names",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"pii_data",
"=",
"[",
"]",
"for",
"line",
"in",
"reader",
":",
"pii_data",
".",
"append",
"(",
"tuple",
"(",
"element",
".",
"strip",
"(",
")",
"for",
"element",
"in",
"line",
")",
")",
"validate_row_lengths",
"(",
"schema",
".",
"fields",
",",
"pii_data",
")",
"if",
"progress_bar",
":",
"stats",
"=",
"OnlineMeanVariance",
"(",
")",
"with",
"tqdm",
"(",
"desc",
"=",
"\"generating CLKs\"",
",",
"total",
"=",
"len",
"(",
"pii_data",
")",
",",
"unit",
"=",
"'clk'",
",",
"unit_scale",
"=",
"True",
",",
"postfix",
"=",
"{",
"'mean'",
":",
"stats",
".",
"mean",
"(",
")",
",",
"'std'",
":",
"stats",
".",
"std",
"(",
")",
"}",
")",
"as",
"pbar",
":",
"def",
"callback",
"(",
"tics",
",",
"clk_stats",
")",
":",
"stats",
".",
"update",
"(",
"clk_stats",
")",
"pbar",
".",
"set_postfix",
"(",
"mean",
"=",
"stats",
".",
"mean",
"(",
")",
",",
"std",
"=",
"stats",
".",
"std",
"(",
")",
",",
"refresh",
"=",
"False",
")",
"pbar",
".",
"update",
"(",
"tics",
")",
"results",
"=",
"generate_clks",
"(",
"pii_data",
",",
"schema",
",",
"keys",
",",
"validate",
"=",
"validate",
",",
"callback",
"=",
"callback",
")",
"else",
":",
"results",
"=",
"generate_clks",
"(",
"pii_data",
",",
"schema",
",",
"keys",
",",
"validate",
"=",
"validate",
")",
"log",
".",
"info",
"(",
"\"Hashing took {:.2f} seconds\"",
".",
"format",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
"return",
"results"
] | python | Generate Bloom filters from CSV file, then serialise them.
This function also computes and outputs the Hamming weight
(a.k.a popcount -- the number of bits set to high) of the
generated Bloom filters.
:param input_f: A file-like object of csv data to hash.
:param keys: A tuple of two lists of secret keys.
:param schema: Schema specifying the record formats and
hashing settings.
:param validate: Set to `False` to disable validation of
data against the schema. Note that this will silence
warnings whose aim is to keep the hashes consistent between
data sources; this may affect linkage accuracy.
:param header: Set to `False` if the CSV file does not have
a header. Set to `'ignore'` if the CSV file does have a
header but it should not be checked against the schema.
:param bool progress_bar: Set to `False` to disable the progress
bar.
:return: A list of serialized Bloom filters and a list of
corresponding popcounts. | false |
2,199,204 | def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DeviceGroupManipulation):
return False
return self.__dict__ == other.__dict__ | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"DeviceGroupManipulation",
")",
":",
"return",
"False",
"return",
"self",
".",
"__dict__",
"==",
"other",
".",
"__dict__"
] | python | Returns true if both objects are equal | false |
2,453,488 | def list_folder_content(self, folder, name=None, entity_type=None,
content_type=None, page_size=DEFAULT_PAGE_SIZE,
page=None, ordering=None):
'''List files and folders (not recursively) contained in the folder.
This function does not retrieve all results, pages have
to be manually retrieved by the caller.
Args:
folder (str): The UUID of the requested folder.
name (str): Optional filter on entity name.
entity_type (str): Optional filter on entity type.
Admitted values: ['file', 'folder'].
content_type (str): Optional filter on entity content type (only
files are returned).
page_size (int): Number of elements per page.
page (int): Number of the page.
ordering (str): Indicate on which fields to sort the result. Prepend
'-' to invert order. Multiple values can be provided.
Ordering is supported on: ['name', 'created_on', 'modified_on'].
Example: 'ordering=name,created_on'
Returns:
A dictionary of the results::
{
u'count': 1,
u'next': None,
u'previous': None,
u'results': [{u'content_type': u'plain/text',
u'created_by': u'303447',
u'created_on': u'2017-03-13T10:17:01.688472Z',
u'description': u'',
u'entity_type': u'file',
u'modified_by': u'303447',
u'modified_on': u'2017-03-13T10:17:01.688632Z',
u'name': u'file_1',
u'parent': u'eac11058-4ae0-4ea9-ada8-d3ea23887509',
u'uuid': u'0e17eaac-cb00-4336-b9d7-657026844281'}]
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(folder):
raise StorageArgumentException(
'Invalid UUID for folder: {0}'.format(folder))
params = self._prep_params(locals())
del params['folder'] # not a query parameter
return self._authenticated_request \
.to_endpoint('folder/{}/children/'.format(folder)) \
.with_params(params) \
.return_body() \
.get() | [
"def",
"list_folder_content",
"(",
"self",
",",
"folder",
",",
"name",
"=",
"None",
",",
"entity_type",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"page_size",
"=",
"DEFAULT_PAGE_SIZE",
",",
"page",
"=",
"None",
",",
"ordering",
"=",
"None",
")",
":",
"if",
"not",
"is_valid_uuid",
"(",
"folder",
")",
":",
"raise",
"StorageArgumentException",
"(",
"'Invalid UUID for folder: {0}'",
".",
"format",
"(",
"folder",
")",
")",
"params",
"=",
"self",
".",
"_prep_params",
"(",
"locals",
"(",
")",
")",
"del",
"params",
"[",
"'folder'",
"]",
"return",
"self",
".",
"_authenticated_request",
".",
"to_endpoint",
"(",
"'folder/{}/children/'",
".",
"format",
"(",
"folder",
")",
")",
".",
"with_params",
"(",
"params",
")",
".",
"return_body",
"(",
")",
".",
"get",
"(",
")"
] | python | List files and folders (not recursively) contained in the folder.
This function does not retrieve all results, pages have
to be manually retrieved by the caller.
Args:
folder (str): The UUID of the requested folder.
name (str): Optional filter on entity name.
entity_type (str): Optional filter on entity type.
Admitted values: ['file', 'folder'].
content_type (str): Optional filter on entity content type (only
files are returned).
page_size (int): Number of elements per page.
page (int): Number of the page.
ordering (str): Indicate on which fields to sort the result. Prepend
'-' to invert order. Multiple values can be provided.
Ordering is supported on: ['name', 'created_on', 'modified_on'].
Example: 'ordering=name,created_on'
Returns:
A dictionary of the results::
{
u'count': 1,
u'next': None,
u'previous': None,
u'results': [{u'content_type': u'plain/text',
u'created_by': u'303447',
u'created_on': u'2017-03-13T10:17:01.688472Z',
u'description': u'',
u'entity_type': u'file',
u'modified_by': u'303447',
u'modified_on': u'2017-03-13T10:17:01.688632Z',
u'name': u'file_1',
u'parent': u'eac11058-4ae0-4ea9-ada8-d3ea23887509',
u'uuid': u'0e17eaac-cb00-4336-b9d7-657026844281'}]
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes | false |
1,969,604 | def MeshPoints(*inputobj, **options):
"""
Build a point ``Actor`` for a list of points.
:param float r: point radius.
:param c: color name, number, or list of [R,G,B] colors of same length as plist.
:type c: int, str, list
:param float alpha: transparency in range [0,1].
"""
r = options.pop("r", 5)
c = options.pop("c", "gray")
alpha = options.pop("alpha", 1)
mesh, u = _inputsort(inputobj)
plist = mesh.coordinates()
if u:
u_values = np.array([u(p) for p in plist])
if len(plist[0]) == 2: # coords are 2d.. not good..
plist = np.insert(plist, 2, 0, axis=1) # make it 3d
if len(plist[0]) == 1: # coords are 1d.. not good..
plist = np.insert(plist, 1, 0, axis=1) # make it 3d
plist = np.insert(plist, 2, 0, axis=1)
actor = shapes.Points(plist, r=r, c=c, alpha=alpha)
actor.mesh = mesh
if u:
actor.u = u
if len(u_values.shape) == 2:
if u_values.shape[1] in [2, 3]: # u_values is 2D or 3D
actor.u_values = u_values
dispsizes = utils.mag(u_values)
else: # u_values is 1D
dispsizes = u_values
actor.addPointScalars(dispsizes, "u_values")
return actor | [
"def",
"MeshPoints",
"(",
"*",
"inputobj",
",",
"**",
"options",
")",
":",
"r",
"=",
"options",
".",
"pop",
"(",
"\"r\"",
",",
"5",
")",
"c",
"=",
"options",
".",
"pop",
"(",
"\"c\"",
",",
"\"gray\"",
")",
"alpha",
"=",
"options",
".",
"pop",
"(",
"\"alpha\"",
",",
"1",
")",
"mesh",
",",
"u",
"=",
"_inputsort",
"(",
"inputobj",
")",
"plist",
"=",
"mesh",
".",
"coordinates",
"(",
")",
"if",
"u",
":",
"u_values",
"=",
"np",
".",
"array",
"(",
"[",
"u",
"(",
"p",
")",
"for",
"p",
"in",
"plist",
"]",
")",
"if",
"len",
"(",
"plist",
"[",
"0",
"]",
")",
"==",
"2",
":",
"plist",
"=",
"np",
".",
"insert",
"(",
"plist",
",",
"2",
",",
"0",
",",
"axis",
"=",
"1",
")",
"if",
"len",
"(",
"plist",
"[",
"0",
"]",
")",
"==",
"1",
":",
"plist",
"=",
"np",
".",
"insert",
"(",
"plist",
",",
"1",
",",
"0",
",",
"axis",
"=",
"1",
")",
"plist",
"=",
"np",
".",
"insert",
"(",
"plist",
",",
"2",
",",
"0",
",",
"axis",
"=",
"1",
")",
"actor",
"=",
"shapes",
".",
"Points",
"(",
"plist",
",",
"r",
"=",
"r",
",",
"c",
"=",
"c",
",",
"alpha",
"=",
"alpha",
")",
"actor",
".",
"mesh",
"=",
"mesh",
"if",
"u",
":",
"actor",
".",
"u",
"=",
"u",
"if",
"len",
"(",
"u_values",
".",
"shape",
")",
"==",
"2",
":",
"if",
"u_values",
".",
"shape",
"[",
"1",
"]",
"in",
"[",
"2",
",",
"3",
"]",
":",
"actor",
".",
"u_values",
"=",
"u_values",
"dispsizes",
"=",
"utils",
".",
"mag",
"(",
"u_values",
")",
"else",
":",
"dispsizes",
"=",
"u_values",
"actor",
".",
"addPointScalars",
"(",
"dispsizes",
",",
"\"u_values\"",
")",
"return",
"actor"
] | python | Build a point ``Actor`` for a list of points.
:param float r: point radius.
:param c: color name, number, or list of [R,G,B] colors of same length as plist.
:type c: int, str, list
:param float alpha: transparency in range [0,1]. | false |
2,609,832 | def _namematcher(regex):
"""Checks if a target name matches with an input regular expression."""
matcher = re_compile(regex)
def match(target):
target_name = getattr(target, '__name__', '')
result = matcher.match(target_name)
return result
return match | [
"def",
"_namematcher",
"(",
"regex",
")",
":",
"matcher",
"=",
"re_compile",
"(",
"regex",
")",
"def",
"match",
"(",
"target",
")",
":",
"target_name",
"=",
"getattr",
"(",
"target",
",",
"'__name__'",
",",
"''",
")",
"result",
"=",
"matcher",
".",
"match",
"(",
"target_name",
")",
"return",
"result",
"return",
"match"
] | python | Checks if a target name matches with an input regular expression. | false |
2,706,134 | def send(self, event_name, *args, **kwargs):
"""
Method,
Calls all callbacks registered for `event_name`. The arguments given
are passed to each callback.
:param event_name: The event name to call the callbacks for.
:param args: The positional arguments passed to the callbacks.
:param kwargs: The keyword arguments passed to the callbacks.
Example:
>>> callbacks = Callbacks()
>>> @callbacks.register("my_event")
... def hello(your_name):
... print("Hello %s, how are you today." % your_name)
...
>>> callbacks.call("my_event", "Wessie")
Hello Wessie, how are you today.
"""
for callback in self.callbacks[event_name]:
# Handle errors (and maybe return values)
callback(*args, **kwargs) | [
"def",
"send",
"(",
"self",
",",
"event_name",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"for",
"callback",
"in",
"self",
".",
"callbacks",
"[",
"event_name",
"]",
":",
"callback",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] | python | Method,
Calls all callbacks registered for `event_name`. The arguments given
are passed to each callback.
:param event_name: The event name to call the callbacks for.
:param args: The positional arguments passed to the callbacks.
:param kwargs: The keyword arguments passed to the callbacks.
Example:
>>> callbacks = Callbacks()
>>> @callbacks.register("my_event")
... def hello(your_name):
... print("Hello %s, how are you today." % your_name)
...
>>> callbacks.call("my_event", "Wessie")
Hello Wessie, how are you today. | false |
2,010,472 | def output_str(f):
"""Create a function that always return instances of `str`.
This decorator is useful when the returned string is to be used
with libraries that do not support ̀`unicode` in Python 2, but work
fine with Python 3 `str` objects.
"""
if six.PY2:
#@functools.wraps(f)
def new_f(*args, **kwargs):
return f(*args, **kwargs).encode("utf-8")
else:
new_f = f
return new_f | [
"def",
"output_str",
"(",
"f",
")",
":",
"if",
"six",
".",
"PY2",
":",
"def",
"new_f",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"f",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
"else",
":",
"new_f",
"=",
"f",
"return",
"new_f"
] | python | Create a function that always return instances of `str`.
This decorator is useful when the returned string is to be used
with libraries that do not support ̀`unicode` in Python 2, but work
fine with Python 3 `str` objects. | false |
1,758,341 | def dump_t_coords(dataset_dir, data_dir, dataset, root=None, compress=True):
"""dump vtkjs texture coordinates"""
if root is None:
root = {}
tcoords = dataset.GetPointData().GetTCoords()
if tcoords:
dumped_array = dump_data_array(dataset_dir, data_dir, tcoords, {}, compress)
root['pointData']['activeTCoords'] = len(root['pointData']['arrays'])
root['pointData']['arrays'].append({'data': dumped_array}) | [
"def",
"dump_t_coords",
"(",
"dataset_dir",
",",
"data_dir",
",",
"dataset",
",",
"root",
"=",
"None",
",",
"compress",
"=",
"True",
")",
":",
"if",
"root",
"is",
"None",
":",
"root",
"=",
"{",
"}",
"tcoords",
"=",
"dataset",
".",
"GetPointData",
"(",
")",
".",
"GetTCoords",
"(",
")",
"if",
"tcoords",
":",
"dumped_array",
"=",
"dump_data_array",
"(",
"dataset_dir",
",",
"data_dir",
",",
"tcoords",
",",
"{",
"}",
",",
"compress",
")",
"root",
"[",
"'pointData'",
"]",
"[",
"'activeTCoords'",
"]",
"=",
"len",
"(",
"root",
"[",
"'pointData'",
"]",
"[",
"'arrays'",
"]",
")",
"root",
"[",
"'pointData'",
"]",
"[",
"'arrays'",
"]",
".",
"append",
"(",
"{",
"'data'",
":",
"dumped_array",
"}",
")"
] | python | dump vtkjs texture coordinates | false |
1,834,829 | def apply_array_pars(arr_par_file="arr_pars.csv"):
""" a function to apply array-based multipler parameters. Used to implement
the parameterization constructed by PstFromFlopyModel during a forward run
Parameters
----------
arr_par_file : str
path to csv file detailing parameter array multipliers
Note
----
"arr_pars.csv" - is written by PstFromFlopy
the function should be added to the forward_run.py script but can be called on any correctly formatted csv
"""
df = pd.read_csv(arr_par_file)
# for fname in df.model_file:
# try:
# os.remove(fname)
# except:
# print("error removing mult array:{0}".format(fname))
if 'pp_file' in df.columns:
for pp_file,fac_file,mlt_file in zip(df.pp_file,df.fac_file,df.mlt_file):
if pd.isnull(pp_file):
continue
pyemu.geostats.fac2real(pp_file=pp_file,factors_file=fac_file,
out_file=mlt_file,lower_lim=1.0e-10)
for model_file in df.model_file.unique():
# find all mults that need to be applied to this array
df_mf = df.loc[df.model_file==model_file,:]
results = []
org_file = df_mf.org_file.unique()
if org_file.shape[0] != 1:
raise Exception("wrong number of org_files for {0}".
format(model_file))
org_arr = np.loadtxt(org_file[0])
for mlt in df_mf.mlt_file:
org_arr *= np.loadtxt(mlt)
if "upper_bound" in df.columns:
ub_vals = df_mf.upper_bound.value_counts().dropna().to_dict()
if len(ub_vals) == 0:
pass
elif len(ub_vals) > 1:
raise Exception("different upper bound values for {0}".format(org_file))
else:
ub = list(ub_vals.keys())[0]
org_arr[org_arr>ub] = ub
if "lower_bound" in df.columns:
lb_vals = df_mf.lower_bound.value_counts().dropna().to_dict()
if len(lb_vals) == 0:
pass
elif len(lb_vals) > 1:
raise Exception("different lower bound values for {0}".format(org_file))
else:
lb = list(lb_vals.keys())[0]
org_arr[org_arr < lb] = lb
np.savetxt(model_file,np.atleast_2d(org_arr),fmt="%15.6E",delimiter='') | [
"def",
"apply_array_pars",
"(",
"arr_par_file",
"=",
"\"arr_pars.csv\"",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"arr_par_file",
")",
"if",
"'pp_file'",
"in",
"df",
".",
"columns",
":",
"for",
"pp_file",
",",
"fac_file",
",",
"mlt_file",
"in",
"zip",
"(",
"df",
".",
"pp_file",
",",
"df",
".",
"fac_file",
",",
"df",
".",
"mlt_file",
")",
":",
"if",
"pd",
".",
"isnull",
"(",
"pp_file",
")",
":",
"continue",
"pyemu",
".",
"geostats",
".",
"fac2real",
"(",
"pp_file",
"=",
"pp_file",
",",
"factors_file",
"=",
"fac_file",
",",
"out_file",
"=",
"mlt_file",
",",
"lower_lim",
"=",
"1.0e-10",
")",
"for",
"model_file",
"in",
"df",
".",
"model_file",
".",
"unique",
"(",
")",
":",
"df_mf",
"=",
"df",
".",
"loc",
"[",
"df",
".",
"model_file",
"==",
"model_file",
",",
":",
"]",
"results",
"=",
"[",
"]",
"org_file",
"=",
"df_mf",
".",
"org_file",
".",
"unique",
"(",
")",
"if",
"org_file",
".",
"shape",
"[",
"0",
"]",
"!=",
"1",
":",
"raise",
"Exception",
"(",
"\"wrong number of org_files for {0}\"",
".",
"format",
"(",
"model_file",
")",
")",
"org_arr",
"=",
"np",
".",
"loadtxt",
"(",
"org_file",
"[",
"0",
"]",
")",
"for",
"mlt",
"in",
"df_mf",
".",
"mlt_file",
":",
"org_arr",
"*=",
"np",
".",
"loadtxt",
"(",
"mlt",
")",
"if",
"\"upper_bound\"",
"in",
"df",
".",
"columns",
":",
"ub_vals",
"=",
"df_mf",
".",
"upper_bound",
".",
"value_counts",
"(",
")",
".",
"dropna",
"(",
")",
".",
"to_dict",
"(",
")",
"if",
"len",
"(",
"ub_vals",
")",
"==",
"0",
":",
"pass",
"elif",
"len",
"(",
"ub_vals",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"\"different upper bound values for {0}\"",
".",
"format",
"(",
"org_file",
")",
")",
"else",
":",
"ub",
"=",
"list",
"(",
"ub_vals",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"org_arr",
"[",
"org_arr",
">",
"ub",
"]",
"=",
"ub",
"if",
"\"lower_bound\"",
"in",
"df",
".",
"columns",
":",
"lb_vals",
"=",
"df_mf",
".",
"lower_bound",
".",
"value_counts",
"(",
")",
".",
"dropna",
"(",
")",
".",
"to_dict",
"(",
")",
"if",
"len",
"(",
"lb_vals",
")",
"==",
"0",
":",
"pass",
"elif",
"len",
"(",
"lb_vals",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"\"different lower bound values for {0}\"",
".",
"format",
"(",
"org_file",
")",
")",
"else",
":",
"lb",
"=",
"list",
"(",
"lb_vals",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"org_arr",
"[",
"org_arr",
"<",
"lb",
"]",
"=",
"lb",
"np",
".",
"savetxt",
"(",
"model_file",
",",
"np",
".",
"atleast_2d",
"(",
"org_arr",
")",
",",
"fmt",
"=",
"\"%15.6E\"",
",",
"delimiter",
"=",
"''",
")"
] | python | a function to apply array-based multipler parameters. Used to implement
the parameterization constructed by PstFromFlopyModel during a forward run
Parameters
----------
arr_par_file : str
path to csv file detailing parameter array multipliers
Note
----
"arr_pars.csv" - is written by PstFromFlopy
the function should be added to the forward_run.py script but can be called on any correctly formatted csv | false |
1,908,958 | def persist(self, name, project=None, drop_model=False, **kwargs):
"""
Persist the execution into a new model.
:param name: model name
:param project: name of the project
:param drop_model: drop model before creation
"""
return super(ODPSModelExpr, self).persist(name, project=project, drop_model=drop_model, **kwargs) | [
"def",
"persist",
"(",
"self",
",",
"name",
",",
"project",
"=",
"None",
",",
"drop_model",
"=",
"False",
",",
"**",
"kwargs",
")",
":",
"return",
"super",
"(",
"ODPSModelExpr",
",",
"self",
")",
".",
"persist",
"(",
"name",
",",
"project",
"=",
"project",
",",
"drop_model",
"=",
"drop_model",
",",
"**",
"kwargs",
")"
] | python | Persist the execution into a new model.
:param name: model name
:param project: name of the project
:param drop_model: drop model before creation | false |
2,053,150 | def transactions(self) -> TransactionsAggregate:
""" Transactions aggregate """
if not self.__transactions_aggregate:
self.__transactions_aggregate = TransactionsAggregate(self.book)
return self.__transactions_aggregate | [
"def",
"transactions",
"(",
"self",
")",
"->",
"TransactionsAggregate",
":",
"if",
"not",
"self",
".",
"__transactions_aggregate",
":",
"self",
".",
"__transactions_aggregate",
"=",
"TransactionsAggregate",
"(",
"self",
".",
"book",
")",
"return",
"self",
".",
"__transactions_aggregate"
] | python | Transactions aggregate | false |
2,269,221 | def read_matlab_features(array_paths, number_of_nodes, dimensionality):
"""
Returns a sparse feature matrix as calculated by a Matlab routine.
"""
# Read the data array
file_row_gen = get_file_row_generator(array_paths[0], "\t")
data = list()
append_data = data.append
for file_row in file_row_gen:
append_data(float(file_row[0]))
# Read the row array
file_row_gen = get_file_row_generator(array_paths[1], "\t")
row = list()
append_row = row.append
for file_row in file_row_gen:
append_row(int(float(file_row[0])))
# Read the data array
file_row_gen = get_file_row_generator(array_paths[2], "\t")
col = list()
append_col = col.append
for file_row in file_row_gen:
append_col(int(float(file_row[0])))
data = np.array(data).astype(np.float64)
row = np.array(row).astype(np.int64) - 1 # Due to Matlab numbering
col = np.array(col).astype(np.int64) - 1 # Due to Matlab numbering
print(np.max(row), np.min(row))
print(np.max(col), np.min(col))
# centroids_new = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes + 1, k))
features = spsp.coo_matrix((data, (row, col)), shape=(number_of_nodes, dimensionality))
return features | [
"def",
"read_matlab_features",
"(",
"array_paths",
",",
"number_of_nodes",
",",
"dimensionality",
")",
":",
"file_row_gen",
"=",
"get_file_row_generator",
"(",
"array_paths",
"[",
"0",
"]",
",",
"\"\\t\"",
")",
"data",
"=",
"list",
"(",
")",
"append_data",
"=",
"data",
".",
"append",
"for",
"file_row",
"in",
"file_row_gen",
":",
"append_data",
"(",
"float",
"(",
"file_row",
"[",
"0",
"]",
")",
")",
"file_row_gen",
"=",
"get_file_row_generator",
"(",
"array_paths",
"[",
"1",
"]",
",",
"\"\\t\"",
")",
"row",
"=",
"list",
"(",
")",
"append_row",
"=",
"row",
".",
"append",
"for",
"file_row",
"in",
"file_row_gen",
":",
"append_row",
"(",
"int",
"(",
"float",
"(",
"file_row",
"[",
"0",
"]",
")",
")",
")",
"file_row_gen",
"=",
"get_file_row_generator",
"(",
"array_paths",
"[",
"2",
"]",
",",
"\"\\t\"",
")",
"col",
"=",
"list",
"(",
")",
"append_col",
"=",
"col",
".",
"append",
"for",
"file_row",
"in",
"file_row_gen",
":",
"append_col",
"(",
"int",
"(",
"float",
"(",
"file_row",
"[",
"0",
"]",
")",
")",
")",
"data",
"=",
"np",
".",
"array",
"(",
"data",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"row",
"=",
"np",
".",
"array",
"(",
"row",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"-",
"1",
"col",
"=",
"np",
".",
"array",
"(",
"col",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"-",
"1",
"print",
"(",
"np",
".",
"max",
"(",
"row",
")",
",",
"np",
".",
"min",
"(",
"row",
")",
")",
"print",
"(",
"np",
".",
"max",
"(",
"col",
")",
",",
"np",
".",
"min",
"(",
"col",
")",
")",
"features",
"=",
"spsp",
".",
"coo_matrix",
"(",
"(",
"data",
",",
"(",
"row",
",",
"col",
")",
")",
",",
"shape",
"=",
"(",
"number_of_nodes",
",",
"dimensionality",
")",
")",
"return",
"features"
] | python | Returns a sparse feature matrix as calculated by a Matlab routine. | false |
2,272,805 | def __init__(self, *args, **kw):
"""A tiny helper to work around the dunderscores around ``name`` during instantiation.
The value must always be retrieved as ``inst.__name__``, but may be assigned using the shorthand.
"""
# Re-map ``name`` to ``__name__`` in the keyword arguments, if present.
if 'name' in kw:
kw['__name__'] = kw.pop('name')
# Process arguments upstream.
super().__init__(*args, **kw) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kw",
")",
":",
"if",
"'name'",
"in",
"kw",
":",
"kw",
"[",
"'__name__'",
"]",
"=",
"kw",
".",
"pop",
"(",
"'name'",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"**",
"kw",
")"
] | python | A tiny helper to work around the dunderscores around ``name`` during instantiation.
The value must always be retrieved as ``inst.__name__``, but may be assigned using the shorthand. | false |
1,956,976 | def focus_property(self, prop, direction):
"""does a walk in the given direction and focuses the
first message tree that matches the given property"""
newpos = self.get_selected_mid()
newpos = direction(newpos)
while newpos is not None:
MT = self._tree[newpos]
if prop(MT):
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
break
newpos = direction(newpos) | [
"def",
"focus_property",
"(",
"self",
",",
"prop",
",",
"direction",
")",
":",
"newpos",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"newpos",
"=",
"direction",
"(",
"newpos",
")",
"while",
"newpos",
"is",
"not",
"None",
":",
"MT",
"=",
"self",
".",
"_tree",
"[",
"newpos",
"]",
"if",
"prop",
"(",
"MT",
")",
":",
"newpos",
"=",
"self",
".",
"_sanitize_position",
"(",
"(",
"newpos",
",",
")",
")",
"self",
".",
"body",
".",
"set_focus",
"(",
"newpos",
")",
"break",
"newpos",
"=",
"direction",
"(",
"newpos",
")"
] | python | does a walk in the given direction and focuses the
first message tree that matches the given property | false |
2,705,440 | def dateint_week_by_dateint(dateint, first_day='Monday'):
"""Return a dateint range of the week the given dateint belongs to.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
first_day : str, default 'Monday'
The first day of the week.
Returns
-------
iterable
An iterable of dateint representing all days of the week the given
dateint belongs to.
"""
weekday_ix = dateint_to_weekday(dateint, first_day)
first_day_dateint = shift_dateint(dateint, -weekday_ix)
last_day_dateint = shift_dateint(first_day_dateint, 6)
return dateint_range(first_day_dateint, last_day_dateint) | [
"def",
"dateint_week_by_dateint",
"(",
"dateint",
",",
"first_day",
"=",
"'Monday'",
")",
":",
"weekday_ix",
"=",
"dateint_to_weekday",
"(",
"dateint",
",",
"first_day",
")",
"first_day_dateint",
"=",
"shift_dateint",
"(",
"dateint",
",",
"-",
"weekday_ix",
")",
"last_day_dateint",
"=",
"shift_dateint",
"(",
"first_day_dateint",
",",
"6",
")",
"return",
"dateint_range",
"(",
"first_day_dateint",
",",
"last_day_dateint",
")"
] | python | Return a dateint range of the week the given dateint belongs to.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
first_day : str, default 'Monday'
The first day of the week.
Returns
-------
iterable
An iterable of dateint representing all days of the week the given
dateint belongs to. | false |
2,699,290 | def yaml_loc_join(l, n):
'''
YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso|
'''
from photon.util.locations import get_locations
locations = get_locations()
s = l.construct_sequence(n)
for num, seq in enumerate(s):
if seq in locations:
s[num] = '%s' % (locations[seq])
return _path.join(*s) | [
"def",
"yaml_loc_join",
"(",
"l",
",",
"n",
")",
":",
"from",
"photon",
".",
"util",
".",
"locations",
"import",
"get_locations",
"locations",
"=",
"get_locations",
"(",
")",
"s",
"=",
"l",
".",
"construct_sequence",
"(",
"n",
")",
"for",
"num",
",",
"seq",
"in",
"enumerate",
"(",
"s",
")",
":",
"if",
"seq",
"in",
"locations",
":",
"s",
"[",
"num",
"]",
"=",
"'%s'",
"%",
"(",
"locations",
"[",
"seq",
"]",
")",
"return",
"_path",
".",
"join",
"(",
"*",
"s",
")"
] | python | YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso| | false |
2,005,477 | def __unicode__(self):
"""Return the unicode representation of the object."""
children = list(self.children)
if 'raw' in children:
children.remove('raw')
values = [getattr(self, child) for child in children]
return u' '.join([unicode(val) for val in values if val is not None]) | [
"def",
"__unicode__",
"(",
"self",
")",
":",
"children",
"=",
"list",
"(",
"self",
".",
"children",
")",
"if",
"'raw'",
"in",
"children",
":",
"children",
".",
"remove",
"(",
"'raw'",
")",
"values",
"=",
"[",
"getattr",
"(",
"self",
",",
"child",
")",
"for",
"child",
"in",
"children",
"]",
"return",
"u' '",
".",
"join",
"(",
"[",
"unicode",
"(",
"val",
")",
"for",
"val",
"in",
"values",
"if",
"val",
"is",
"not",
"None",
"]",
")"
] | python | Return the unicode representation of the object. | false |
2,150,453 | def aeration_data(DO_column, dirpath):
"""Extract the data from folder containing tab delimited
files of aeration data. The file must be the original tab delimited file.
All text strings below the header must be removed from these files.
The file names must be the air flow rates with units of micromoles/s.
An example file name would be "300.xls" where 300 is the flow rate in
micromoles/s. The function opens a file dialog for the user to select
the directory containing the data.
:param DO_column: Index of the column that contains the dissolved oxygen concentration data.
:type DO_columm: int
:param dirpath: Path to the directory containing aeration data you want to analyze
:type dirpath: string
:return: collection of
* **filepaths** (*string list*) - All file paths in the directory sorted by flow rate
* **airflows** (*numpy.array*) - Sorted array of air flow rates with units of micromole/s
* **DO_data** (*numpy.array list*) - Sorted list of Numpy arrays. Thus each of the numpy data arrays can have different lengths to accommodate short and long experiments
* **time_data** (*numpy.array list*) - Sorted list of Numpy arrays containing the times with units of seconds
"""
#return the list of files in the directory
filenames = os.listdir(dirpath)
#extract the flowrates from the filenames and apply units
airflows = ((np.array([i.split('.', 1)[0] for i in filenames])).astype(np.float32))
#sort airflows and filenames so that they are in ascending order of flow rates
idx = np.argsort(airflows)
airflows = (np.array(airflows)[idx])*u.umole/u.s
filenames = np.array(filenames)[idx]
filepaths = [os.path.join(dirpath, i) for i in filenames]
#DO_data is a list of numpy arrays. Thus each of the numpy data arrays can have different lengths to accommodate short and long experiments
# cycle through all of the files and extract the column of data with oxygen concentrations and the times
DO_data=[column_of_data(i,0,DO_column,-1,'mg/L') for i in filepaths]
time_data=[(column_of_time(i,0,-1)).to(u.s) for i in filepaths]
aeration_collection = collections.namedtuple('aeration_results','filepaths airflows DO_data time_data')
aeration_results = aeration_collection(filepaths, airflows, DO_data, time_data)
return aeration_results | [
"def",
"aeration_data",
"(",
"DO_column",
",",
"dirpath",
")",
":",
"filenames",
"=",
"os",
".",
"listdir",
"(",
"dirpath",
")",
"airflows",
"=",
"(",
"(",
"np",
".",
"array",
"(",
"[",
"i",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"filenames",
"]",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
")",
"idx",
"=",
"np",
".",
"argsort",
"(",
"airflows",
")",
"airflows",
"=",
"(",
"np",
".",
"array",
"(",
"airflows",
")",
"[",
"idx",
"]",
")",
"*",
"u",
".",
"umole",
"/",
"u",
".",
"s",
"filenames",
"=",
"np",
".",
"array",
"(",
"filenames",
")",
"[",
"idx",
"]",
"filepaths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"i",
")",
"for",
"i",
"in",
"filenames",
"]",
"DO_data",
"=",
"[",
"column_of_data",
"(",
"i",
",",
"0",
",",
"DO_column",
",",
"-",
"1",
",",
"'mg/L'",
")",
"for",
"i",
"in",
"filepaths",
"]",
"time_data",
"=",
"[",
"(",
"column_of_time",
"(",
"i",
",",
"0",
",",
"-",
"1",
")",
")",
".",
"to",
"(",
"u",
".",
"s",
")",
"for",
"i",
"in",
"filepaths",
"]",
"aeration_collection",
"=",
"collections",
".",
"namedtuple",
"(",
"'aeration_results'",
",",
"'filepaths airflows DO_data time_data'",
")",
"aeration_results",
"=",
"aeration_collection",
"(",
"filepaths",
",",
"airflows",
",",
"DO_data",
",",
"time_data",
")",
"return",
"aeration_results"
] | python | Extract the data from folder containing tab delimited
files of aeration data. The file must be the original tab delimited file.
All text strings below the header must be removed from these files.
The file names must be the air flow rates with units of micromoles/s.
An example file name would be "300.xls" where 300 is the flow rate in
micromoles/s. The function opens a file dialog for the user to select
the directory containing the data.
:param DO_column: Index of the column that contains the dissolved oxygen concentration data.
:type DO_columm: int
:param dirpath: Path to the directory containing aeration data you want to analyze
:type dirpath: string
:return: collection of
* **filepaths** (*string list*) - All file paths in the directory sorted by flow rate
* **airflows** (*numpy.array*) - Sorted array of air flow rates with units of micromole/s
* **DO_data** (*numpy.array list*) - Sorted list of Numpy arrays. Thus each of the numpy data arrays can have different lengths to accommodate short and long experiments
* **time_data** (*numpy.array list*) - Sorted list of Numpy arrays containing the times with units of seconds | false |
1,668,668 | def setup_plugins(extra_plugin_dir=None):
"""Loads any additional plugins."""
if os.path.isdir(PLUGINS_DIR):
load_plugins([PLUGINS_DIR])
if extra_plugin_dir:
load_plugins(extra_plugin_dir) | [
"def",
"setup_plugins",
"(",
"extra_plugin_dir",
"=",
"None",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"PLUGINS_DIR",
")",
":",
"load_plugins",
"(",
"[",
"PLUGINS_DIR",
"]",
")",
"if",
"extra_plugin_dir",
":",
"load_plugins",
"(",
"extra_plugin_dir",
")"
] | python | Loads any additional plugins. | false |
2,197,867 | def ingest(self, token, endpoint=None, timeout=None, compress=None):
"""Obtain a datapoint and event ingest client."""
from . import ingest
if ingest.sf_pbuf:
client = ingest.ProtoBufSignalFxIngestClient
else:
_logger.warn('Protocol Buffers not installed properly; '
'falling back to JSON.')
client = ingest.JsonSignalFxIngestClient
compress = compress if compress is not None else self._compress
return client(
token=token,
endpoint=endpoint or self._ingest_endpoint,
timeout=timeout or self._timeout,
compress=compress) | [
"def",
"ingest",
"(",
"self",
",",
"token",
",",
"endpoint",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"compress",
"=",
"None",
")",
":",
"from",
".",
"import",
"ingest",
"if",
"ingest",
".",
"sf_pbuf",
":",
"client",
"=",
"ingest",
".",
"ProtoBufSignalFxIngestClient",
"else",
":",
"_logger",
".",
"warn",
"(",
"'Protocol Buffers not installed properly; '",
"'falling back to JSON.'",
")",
"client",
"=",
"ingest",
".",
"JsonSignalFxIngestClient",
"compress",
"=",
"compress",
"if",
"compress",
"is",
"not",
"None",
"else",
"self",
".",
"_compress",
"return",
"client",
"(",
"token",
"=",
"token",
",",
"endpoint",
"=",
"endpoint",
"or",
"self",
".",
"_ingest_endpoint",
",",
"timeout",
"=",
"timeout",
"or",
"self",
".",
"_timeout",
",",
"compress",
"=",
"compress",
")"
] | python | Obtain a datapoint and event ingest client. | false |
1,623,294 | def tab_name_editor(self):
"""Trigger the tab name editor."""
index = self.tabwidget.currentIndex()
self.tabwidget.tabBar().tab_name_editor.edit_tab(index) | [
"def",
"tab_name_editor",
"(",
"self",
")",
":",
"index",
"=",
"self",
".",
"tabwidget",
".",
"currentIndex",
"(",
")",
"self",
".",
"tabwidget",
".",
"tabBar",
"(",
")",
".",
"tab_name_editor",
".",
"edit_tab",
"(",
"index",
")"
] | python | Trigger the tab name editor. | false |
2,006,930 | def generate_output_events(self, source, key, val, line='2', hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP events results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = generate_output(
line=line,
short=HR_RDAP[source][key]['_short'] if hr else key,
name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
if val is not None:
count = 0
for item in val:
try:
action = item['action']
except KeyError:
action = None
try:
timestamp = item['timestamp']
except KeyError:
timestamp = None
try:
actor = item['actor']
except KeyError:
actor = None
if count > 0:
output += generate_output(
line=str(int(line)+1),
is_parent=True,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['action'][
'_short'] if hr else 'action',
name=HR_RDAP_COMMON[key]['action'][
'_name'] if (hr and show_name) else None,
value=action,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['timestamp'][
'_short'] if hr else 'timestamp',
name=HR_RDAP_COMMON[key]['timestamp'][
'_name'] if (hr and show_name) else None,
value=timestamp,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['actor'][
'_short'] if hr else 'actor',
name=HR_RDAP_COMMON[key]['actor'][
'_name'] if (hr and show_name) else None,
value=actor,
colorize=colorize
)
count += 1
return output | [
"def",
"generate_output_events",
"(",
"self",
",",
"source",
",",
"key",
",",
"val",
",",
"line",
"=",
"'2'",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"output",
"=",
"generate_output",
"(",
"line",
"=",
"line",
",",
"short",
"=",
"HR_RDAP",
"[",
"source",
"]",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"key",
",",
"name",
"=",
"HR_RDAP",
"[",
"source",
"]",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"is_parent",
"=",
"False",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"True",
",",
"value",
"=",
"'None'",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"None",
",",
"colorize",
"=",
"colorize",
")",
"if",
"val",
"is",
"not",
"None",
":",
"count",
"=",
"0",
"for",
"item",
"in",
"val",
":",
"try",
":",
"action",
"=",
"item",
"[",
"'action'",
"]",
"except",
"KeyError",
":",
"action",
"=",
"None",
"try",
":",
"timestamp",
"=",
"item",
"[",
"'timestamp'",
"]",
"except",
"KeyError",
":",
"timestamp",
"=",
"None",
"try",
":",
"actor",
"=",
"item",
"[",
"'actor'",
"]",
"except",
"KeyError",
":",
"actor",
"=",
"None",
"if",
"count",
">",
"0",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"is_parent",
"=",
"True",
",",
"colorize",
"=",
"colorize",
")",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"short",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'action'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'action'",
",",
"name",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'action'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"action",
",",
"colorize",
"=",
"colorize",
")",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"short",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'timestamp'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'timestamp'",
",",
"name",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'timestamp'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"timestamp",
",",
"colorize",
"=",
"colorize",
")",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"short",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'actor'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'actor'",
",",
"name",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'actor'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"actor",
",",
"colorize",
"=",
"colorize",
")",
"count",
"+=",
"1",
"return",
"output"
] | python | The function for generating CLI output RDAP events results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | false |
1,636,761 | def _getgrnam(name, root=None):
'''
Alternative implementation for getgrnam, that use only /etc/group
'''
root = root or '/'
passwd = os.path.join(root, 'etc/group')
with salt.utils.files.fopen(passwd) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
comps = line.strip().split(':')
if len(comps) < 4:
log.debug('Ignoring group line: %s', line)
continue
if comps[0] == name:
# Generate a getpwnam compatible output
comps[2] = int(comps[2])
comps[3] = comps[3].split(',') if comps[3] else []
return grp.struct_group(comps)
raise KeyError('getgrnam(): name not found: {}'.format(name)) | [
"def",
"_getgrnam",
"(",
"name",
",",
"root",
"=",
"None",
")",
":",
"root",
"=",
"root",
"or",
"'/'",
"passwd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'etc/group'",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"passwd",
")",
"as",
"fp_",
":",
"for",
"line",
"in",
"fp_",
":",
"line",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"line",
")",
"comps",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"comps",
")",
"<",
"4",
":",
"log",
".",
"debug",
"(",
"'Ignoring group line: %s'",
",",
"line",
")",
"continue",
"if",
"comps",
"[",
"0",
"]",
"==",
"name",
":",
"comps",
"[",
"2",
"]",
"=",
"int",
"(",
"comps",
"[",
"2",
"]",
")",
"comps",
"[",
"3",
"]",
"=",
"comps",
"[",
"3",
"]",
".",
"split",
"(",
"','",
")",
"if",
"comps",
"[",
"3",
"]",
"else",
"[",
"]",
"return",
"grp",
".",
"struct_group",
"(",
"comps",
")",
"raise",
"KeyError",
"(",
"'getgrnam(): name not found: {}'",
".",
"format",
"(",
"name",
")",
")"
] | python | Alternative implementation for getgrnam, that use only /etc/group | false |
1,764,949 | def register_callback_query_handler(self, callback, *custom_filters, state=None, run_task=None, **kwargs):
"""
Register handler for callback query
Example:
.. code-block:: python3
dp.register_callback_query_handler(some_callback_handler, lambda callback_query: True)
:param callback:
:param state:
:param custom_filters:
:param run_task: run callback in task (no wait results)
:param kwargs:
"""
filters_set = self.filters_factory.resolve(self.callback_query_handlers,
*custom_filters,
state=state,
**kwargs)
self.callback_query_handlers.register(self._wrap_async_task(callback, run_task), filters_set) | [
"def",
"register_callback_query_handler",
"(",
"self",
",",
"callback",
",",
"*",
"custom_filters",
",",
"state",
"=",
"None",
",",
"run_task",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"filters_set",
"=",
"self",
".",
"filters_factory",
".",
"resolve",
"(",
"self",
".",
"callback_query_handlers",
",",
"*",
"custom_filters",
",",
"state",
"=",
"state",
",",
"**",
"kwargs",
")",
"self",
".",
"callback_query_handlers",
".",
"register",
"(",
"self",
".",
"_wrap_async_task",
"(",
"callback",
",",
"run_task",
")",
",",
"filters_set",
")"
] | python | Register handler for callback query
Example:
.. code-block:: python3
dp.register_callback_query_handler(some_callback_handler, lambda callback_query: True)
:param callback:
:param state:
:param custom_filters:
:param run_task: run callback in task (no wait results)
:param kwargs: | false |
2,557,037 | def sign_more(self, bucket, cos_path, expired):
"""多次签名(针对上传文件,创建目录, 获取文件目录属性, 拉取目录列表)
:param bucket: bucket名称
:param cos_path: 要操作的cos路径, 以'/'开始
:param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒
:return: 签名字符串
"""
return self.app_sign(bucket, cos_path, expired) | [
"def",
"sign_more",
"(",
"self",
",",
"bucket",
",",
"cos_path",
",",
"expired",
")",
":",
"return",
"self",
".",
"app_sign",
"(",
"bucket",
",",
"cos_path",
",",
"expired",
")"
] | python | 多次签名(针对上传文件,创建目录, 获取文件目录属性, 拉取目录列表)
:param bucket: bucket名称
:param cos_path: 要操作的cos路径, 以'/'开始
:param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒
:return: 签名字符串 | false |
1,633,484 | def delete_ipv4_range(start_addr=None, end_addr=None, **api_opts):
'''
Delete ip range.
CLI Example:
.. code-block:: bash
salt-call infoblox.delete_ipv4_range start_addr=123.123.122.12
'''
r = get_ipv4_range(start_addr, end_addr, **api_opts)
if r:
return delete_object(r['_ref'], **api_opts)
else:
return True | [
"def",
"delete_ipv4_range",
"(",
"start_addr",
"=",
"None",
",",
"end_addr",
"=",
"None",
",",
"**",
"api_opts",
")",
":",
"r",
"=",
"get_ipv4_range",
"(",
"start_addr",
",",
"end_addr",
",",
"**",
"api_opts",
")",
"if",
"r",
":",
"return",
"delete_object",
"(",
"r",
"[",
"'_ref'",
"]",
",",
"**",
"api_opts",
")",
"else",
":",
"return",
"True"
] | python | Delete ip range.
CLI Example:
.. code-block:: bash
salt-call infoblox.delete_ipv4_range start_addr=123.123.122.12 | false |
2,689,527 | def run(self, host=None, port=None, debug=None, **options):
"""
Start the AgoraApp expecting the provided config to have at least REDIS and PORT fields.
"""
tasks = options.get('tasks', [])
for task in tasks:
if task is not None and hasattr(task, '__call__'):
_batch_tasks.append(task)
thread = Thread(target=self.batch_work)
thread.start()
try:
super(AgoraApp, self).run(host='0.0.0.0', port=self.config['PORT'], debug=True, use_reloader=False)
except Exception, e:
print e.message
self._stop_event.set()
if thread.isAlive():
thread.join() | [
"def",
"run",
"(",
"self",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"debug",
"=",
"None",
",",
"**",
"options",
")",
":",
"tasks",
"=",
"options",
".",
"get",
"(",
"'tasks'",
",",
"[",
"]",
")",
"for",
"task",
"in",
"tasks",
":",
"if",
"task",
"is",
"not",
"None",
"and",
"hasattr",
"(",
"task",
",",
"'__call__'",
")",
":",
"_batch_tasks",
".",
"append",
"(",
"task",
")",
"thread",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"batch_work",
")",
"thread",
".",
"start",
"(",
")",
"try",
":",
"super",
"(",
"AgoraApp",
",",
"self",
")",
".",
"run",
"(",
"host",
"=",
"'0.0.0.0'",
",",
"port",
"=",
"self",
".",
"config",
"[",
"'PORT'",
"]",
",",
"debug",
"=",
"True",
",",
"use_reloader",
"=",
"False",
")",
"except",
"Exception",
",",
"e",
":",
"print",
"e",
".",
"message",
"self",
".",
"_stop_event",
".",
"set",
"(",
")",
"if",
"thread",
".",
"isAlive",
"(",
")",
":",
"thread",
".",
"join",
"(",
")"
] | python | Start the AgoraApp expecting the provided config to have at least REDIS and PORT fields. | false |
2,185,849 | def pad_array(v, idx):
"""Expand lists in multidimensional arrays to pad unset values."""
i_v, i_s = idx[0]
if len(idx) > 1:
# Append missing subarrays
v.extend([[] for _ in range(len(v), i_v - i_s + 1)])
# Pad elements
for e in v:
pad_array(e, idx[1:])
else:
v.extend([None for _ in range(len(v), i_v - i_s + 1)]) | [
"def",
"pad_array",
"(",
"v",
",",
"idx",
")",
":",
"i_v",
",",
"i_s",
"=",
"idx",
"[",
"0",
"]",
"if",
"len",
"(",
"idx",
")",
">",
"1",
":",
"v",
".",
"extend",
"(",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"v",
")",
",",
"i_v",
"-",
"i_s",
"+",
"1",
")",
"]",
")",
"for",
"e",
"in",
"v",
":",
"pad_array",
"(",
"e",
",",
"idx",
"[",
"1",
":",
"]",
")",
"else",
":",
"v",
".",
"extend",
"(",
"[",
"None",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"v",
")",
",",
"i_v",
"-",
"i_s",
"+",
"1",
")",
"]",
")"
] | python | Expand lists in multidimensional arrays to pad unset values. | false |
2,198,880 | def load(self, image=None):
'''load an image, either an actual path on the filesystem or a uri.
Parameters
==========
image: the image path or uri to load (e.g., docker://ubuntu
'''
from spython.image import Image
from spython.instance import Instance
self.simage = Image(image)
if image is not None:
if image.startswith('instance://'):
self.simage = Instance(image)
bot.info(self.simage) | [
"def",
"load",
"(",
"self",
",",
"image",
"=",
"None",
")",
":",
"from",
"spython",
".",
"image",
"import",
"Image",
"from",
"spython",
".",
"instance",
"import",
"Instance",
"self",
".",
"simage",
"=",
"Image",
"(",
"image",
")",
"if",
"image",
"is",
"not",
"None",
":",
"if",
"image",
".",
"startswith",
"(",
"'instance://'",
")",
":",
"self",
".",
"simage",
"=",
"Instance",
"(",
"image",
")",
"bot",
".",
"info",
"(",
"self",
".",
"simage",
")"
] | python | load an image, either an actual path on the filesystem or a uri.
Parameters
==========
image: the image path or uri to load (e.g., docker://ubuntu | false |
1,844,940 | def _insert_compressed(
collection_name, docs, check_keys, continue_on_error, opts, ctx):
"""Internal compressed unacknowledged insert message helper."""
op_insert, max_bson_size = _insert(
collection_name, docs, check_keys, continue_on_error, opts)
rid, msg = _compress(2002, op_insert, ctx)
return rid, msg, max_bson_size | [
"def",
"_insert_compressed",
"(",
"collection_name",
",",
"docs",
",",
"check_keys",
",",
"continue_on_error",
",",
"opts",
",",
"ctx",
")",
":",
"op_insert",
",",
"max_bson_size",
"=",
"_insert",
"(",
"collection_name",
",",
"docs",
",",
"check_keys",
",",
"continue_on_error",
",",
"opts",
")",
"rid",
",",
"msg",
"=",
"_compress",
"(",
"2002",
",",
"op_insert",
",",
"ctx",
")",
"return",
"rid",
",",
"msg",
",",
"max_bson_size"
] | python | Internal compressed unacknowledged insert message helper. | false |
2,325,689 | def make_hex_texture(grid_size = 2, resolution=1):
"""Makes a texture consisting on a grid of hexagons.
Args:
grid_size (int): the number of hexagons along each dimension of the grid
resolution (int): the number of midpoints along the line of each hexagon
Returns:
A texture.
"""
grid_x, grid_y = np.meshgrid(
np.arange(grid_size),
np.arange(grid_size)
)
ROOT_3_OVER_2 = np.sqrt(3) / 2
ONE_HALF = 0.5
grid_x = (grid_x * np.sqrt(3) + (grid_y % 2) * ROOT_3_OVER_2).flatten()
grid_y = grid_y.flatten() * 1.5
grid_points = grid_x.shape[0]
x_offsets = np.interp(np.arange(4 * resolution),
np.arange(4) * resolution, [
ROOT_3_OVER_2,
0.,
-ROOT_3_OVER_2,
-ROOT_3_OVER_2,
])
y_offsets = np.interp(np.arange(4 * resolution),
np.arange(4) * resolution, [
-ONE_HALF,
-1.,
-ONE_HALF,
ONE_HALF
])
tmx = 4 * resolution
x_t = np.tile(grid_x, (tmx, 1)) + x_offsets.reshape((tmx, 1))
y_t = np.tile(grid_y, (tmx, 1)) + y_offsets.reshape((tmx, 1))
x_t = np.vstack([x_t, np.tile(np.nan, (1, grid_x.size))])
y_t = np.vstack([y_t, np.tile(np.nan, (1, grid_y.size))])
return fit_texture((x_t.flatten('F'), y_t.flatten('F'))) | [
"def",
"make_hex_texture",
"(",
"grid_size",
"=",
"2",
",",
"resolution",
"=",
"1",
")",
":",
"grid_x",
",",
"grid_y",
"=",
"np",
".",
"meshgrid",
"(",
"np",
".",
"arange",
"(",
"grid_size",
")",
",",
"np",
".",
"arange",
"(",
"grid_size",
")",
")",
"ROOT_3_OVER_2",
"=",
"np",
".",
"sqrt",
"(",
"3",
")",
"/",
"2",
"ONE_HALF",
"=",
"0.5",
"grid_x",
"=",
"(",
"grid_x",
"*",
"np",
".",
"sqrt",
"(",
"3",
")",
"+",
"(",
"grid_y",
"%",
"2",
")",
"*",
"ROOT_3_OVER_2",
")",
".",
"flatten",
"(",
")",
"grid_y",
"=",
"grid_y",
".",
"flatten",
"(",
")",
"*",
"1.5",
"grid_points",
"=",
"grid_x",
".",
"shape",
"[",
"0",
"]",
"x_offsets",
"=",
"np",
".",
"interp",
"(",
"np",
".",
"arange",
"(",
"4",
"*",
"resolution",
")",
",",
"np",
".",
"arange",
"(",
"4",
")",
"*",
"resolution",
",",
"[",
"ROOT_3_OVER_2",
",",
"0.",
",",
"-",
"ROOT_3_OVER_2",
",",
"-",
"ROOT_3_OVER_2",
",",
"]",
")",
"y_offsets",
"=",
"np",
".",
"interp",
"(",
"np",
".",
"arange",
"(",
"4",
"*",
"resolution",
")",
",",
"np",
".",
"arange",
"(",
"4",
")",
"*",
"resolution",
",",
"[",
"-",
"ONE_HALF",
",",
"-",
"1.",
",",
"-",
"ONE_HALF",
",",
"ONE_HALF",
"]",
")",
"tmx",
"=",
"4",
"*",
"resolution",
"x_t",
"=",
"np",
".",
"tile",
"(",
"grid_x",
",",
"(",
"tmx",
",",
"1",
")",
")",
"+",
"x_offsets",
".",
"reshape",
"(",
"(",
"tmx",
",",
"1",
")",
")",
"y_t",
"=",
"np",
".",
"tile",
"(",
"grid_y",
",",
"(",
"tmx",
",",
"1",
")",
")",
"+",
"y_offsets",
".",
"reshape",
"(",
"(",
"tmx",
",",
"1",
")",
")",
"x_t",
"=",
"np",
".",
"vstack",
"(",
"[",
"x_t",
",",
"np",
".",
"tile",
"(",
"np",
".",
"nan",
",",
"(",
"1",
",",
"grid_x",
".",
"size",
")",
")",
"]",
")",
"y_t",
"=",
"np",
".",
"vstack",
"(",
"[",
"y_t",
",",
"np",
".",
"tile",
"(",
"np",
".",
"nan",
",",
"(",
"1",
",",
"grid_y",
".",
"size",
")",
")",
"]",
")",
"return",
"fit_texture",
"(",
"(",
"x_t",
".",
"flatten",
"(",
"'F'",
")",
",",
"y_t",
".",
"flatten",
"(",
"'F'",
")",
")",
")"
] | python | Makes a texture consisting on a grid of hexagons.
Args:
grid_size (int): the number of hexagons along each dimension of the grid
resolution (int): the number of midpoints along the line of each hexagon
Returns:
A texture. | false |
2,095,369 | def from_single(cls, meta: ProgramDescription, source: str):
"""Initialize a single glsl string containing all shaders"""
instance = cls(meta)
instance.vertex_source = ShaderSource(
VERTEX_SHADER,
meta.path or meta.vertex_shader,
source
)
if GEOMETRY_SHADER in source:
instance.geometry_source = ShaderSource(
GEOMETRY_SHADER,
meta.path or meta.geometry_shader,
source,
)
if FRAGMENT_SHADER in source:
instance.fragment_source = ShaderSource(
FRAGMENT_SHADER,
meta.path or meta.fragment_shader,
source,
)
if TESS_CONTROL_SHADER in source:
instance.tess_control_source = ShaderSource(
TESS_CONTROL_SHADER,
meta.path or meta.tess_control_shader,
source,
)
if TESS_EVALUATION_SHADER in source:
instance.tess_evaluation_source = ShaderSource(
TESS_EVALUATION_SHADER,
meta.path or meta.tess_evaluation_shader,
source,
)
return instance | [
"def",
"from_single",
"(",
"cls",
",",
"meta",
":",
"ProgramDescription",
",",
"source",
":",
"str",
")",
":",
"instance",
"=",
"cls",
"(",
"meta",
")",
"instance",
".",
"vertex_source",
"=",
"ShaderSource",
"(",
"VERTEX_SHADER",
",",
"meta",
".",
"path",
"or",
"meta",
".",
"vertex_shader",
",",
"source",
")",
"if",
"GEOMETRY_SHADER",
"in",
"source",
":",
"instance",
".",
"geometry_source",
"=",
"ShaderSource",
"(",
"GEOMETRY_SHADER",
",",
"meta",
".",
"path",
"or",
"meta",
".",
"geometry_shader",
",",
"source",
",",
")",
"if",
"FRAGMENT_SHADER",
"in",
"source",
":",
"instance",
".",
"fragment_source",
"=",
"ShaderSource",
"(",
"FRAGMENT_SHADER",
",",
"meta",
".",
"path",
"or",
"meta",
".",
"fragment_shader",
",",
"source",
",",
")",
"if",
"TESS_CONTROL_SHADER",
"in",
"source",
":",
"instance",
".",
"tess_control_source",
"=",
"ShaderSource",
"(",
"TESS_CONTROL_SHADER",
",",
"meta",
".",
"path",
"or",
"meta",
".",
"tess_control_shader",
",",
"source",
",",
")",
"if",
"TESS_EVALUATION_SHADER",
"in",
"source",
":",
"instance",
".",
"tess_evaluation_source",
"=",
"ShaderSource",
"(",
"TESS_EVALUATION_SHADER",
",",
"meta",
".",
"path",
"or",
"meta",
".",
"tess_evaluation_shader",
",",
"source",
",",
")",
"return",
"instance"
] | python | Initialize a single glsl string containing all shaders | false |
2,198,693 | def read_items(self, buckets=None, results=15, start=0,item_ids=None):
"""
Returns data from the catalog; also expanded for the requested buckets.
This method is provided for backwards-compatibility
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of objects in the catalog; list contains additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[<song - Harmonice Mundi II>]
>>>
"""
warnings.warn("catalog.read_items() is depreciated. Please use catalog.get_item_dicts() instead.")
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList([])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
for item in response['catalog']['items']:
new_item = None
# song items
if 'song_id' in item:
item['id'] = item.pop('song_id')
item['title'] = item.pop('song_name')
request = item['request']
new_item = song.Song(**util.fix(item))
new_item.request = request
# artist item
elif 'artist_id' in item:
item['id'] = item.pop('artist_id')
item['name'] = item.pop('artist_name')
request = item['request']
new_item = artist.Artist(**util.fix(item))
new_item.request = request
# unresolved item
else:
new_item = item
rval.append(new_item)
return rval | [
"def",
"read_items",
"(",
"self",
",",
"buckets",
"=",
"None",
",",
"results",
"=",
"15",
",",
"start",
"=",
"0",
",",
"item_ids",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"\"catalog.read_items() is depreciated. Please use catalog.get_item_dicts() instead.\"",
")",
"kwargs",
"=",
"{",
"}",
"kwargs",
"[",
"'bucket'",
"]",
"=",
"buckets",
"or",
"[",
"]",
"kwargs",
"[",
"'item_id'",
"]",
"=",
"item_ids",
"or",
"[",
"]",
"response",
"=",
"self",
".",
"get_attribute",
"(",
"\"read\"",
",",
"results",
"=",
"results",
",",
"start",
"=",
"start",
",",
"**",
"kwargs",
")",
"rval",
"=",
"ResultList",
"(",
"[",
"]",
")",
"if",
"item_ids",
":",
"rval",
".",
"start",
"=",
"0",
";",
"rval",
".",
"total",
"=",
"len",
"(",
"response",
"[",
"'catalog'",
"]",
"[",
"'items'",
"]",
")",
"else",
":",
"rval",
".",
"start",
"=",
"response",
"[",
"'catalog'",
"]",
"[",
"'start'",
"]",
"rval",
".",
"total",
"=",
"response",
"[",
"'catalog'",
"]",
"[",
"'total'",
"]",
"for",
"item",
"in",
"response",
"[",
"'catalog'",
"]",
"[",
"'items'",
"]",
":",
"new_item",
"=",
"None",
"if",
"'song_id'",
"in",
"item",
":",
"item",
"[",
"'id'",
"]",
"=",
"item",
".",
"pop",
"(",
"'song_id'",
")",
"item",
"[",
"'title'",
"]",
"=",
"item",
".",
"pop",
"(",
"'song_name'",
")",
"request",
"=",
"item",
"[",
"'request'",
"]",
"new_item",
"=",
"song",
".",
"Song",
"(",
"**",
"util",
".",
"fix",
"(",
"item",
")",
")",
"new_item",
".",
"request",
"=",
"request",
"elif",
"'artist_id'",
"in",
"item",
":",
"item",
"[",
"'id'",
"]",
"=",
"item",
".",
"pop",
"(",
"'artist_id'",
")",
"item",
"[",
"'name'",
"]",
"=",
"item",
".",
"pop",
"(",
"'artist_name'",
")",
"request",
"=",
"item",
"[",
"'request'",
"]",
"new_item",
"=",
"artist",
".",
"Artist",
"(",
"**",
"util",
".",
"fix",
"(",
"item",
")",
")",
"new_item",
".",
"request",
"=",
"request",
"else",
":",
"new_item",
"=",
"item",
"rval",
".",
"append",
"(",
"new_item",
")",
"return",
"rval"
] | python | Returns data from the catalog; also expanded for the requested buckets.
This method is provided for backwards-compatibility
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of objects in the catalog; list contains additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[<song - Harmonice Mundi II>]
>>> | false |