id_within_dataset
int64 46
2.71M
| snippet
stringlengths 63
481k
| tokens
sequencelengths 20
15.6k
| language
stringclasses 2
values | nl
stringlengths 1
32.4k
| is_duplicated
bool 2
classes |
---|---|---|---|---|---|
2,091,656 | def interevent_time_recharges(recharges):
"""
Return the distribution of time between consecutive recharges
of the user.
"""
time_pairs = pairwise(r.datetime for r in recharges)
times = [(new - old).total_seconds() for old, new in time_pairs]
return summary_stats(times) | [
"def",
"interevent_time_recharges",
"(",
"recharges",
")",
":",
"time_pairs",
"=",
"pairwise",
"(",
"r",
".",
"datetime",
"for",
"r",
"in",
"recharges",
")",
"times",
"=",
"[",
"(",
"new",
"-",
"old",
")",
".",
"total_seconds",
"(",
")",
"for",
"old",
",",
"new",
"in",
"time_pairs",
"]",
"return",
"summary_stats",
"(",
"times",
")"
] | python | Return the distribution of time between consecutive recharges
of the user. | false |
2,114,555 | def refresh(self):
"""
Refresh access token. Only non-expired tokens can be renewed.
:Example:
token = client.tokens.refresh()
"""
uri = "%s/%s" % (self.uri, "refresh")
response, instance = self.request("GET", uri)
return response.ok | [
"def",
"refresh",
"(",
"self",
")",
":",
"uri",
"=",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"uri",
",",
"\"refresh\"",
")",
"response",
",",
"instance",
"=",
"self",
".",
"request",
"(",
"\"GET\"",
",",
"uri",
")",
"return",
"response",
".",
"ok"
] | python | Refresh access token. Only non-expired tokens can be renewed.
:Example:
token = client.tokens.refresh() | false |
2,448,272 | def get_request_body(self):
"""
Fetch (and cache) the request body as a dictionary.
:raise web.HTTPError:
- if the content type cannot be matched, then the status code
is set to 415 Unsupported Media Type.
- if decoding the content body fails, then the status code is
set to 400 Bad Syntax.
"""
if self._request_body is None:
settings = get_settings(self.application, force_instance=True)
content_type_header = headers.parse_content_type(
self.request.headers.get('Content-Type',
settings.default_content_type))
content_type = '/'.join([content_type_header.content_type,
content_type_header.content_subtype])
if content_type_header.content_suffix is not None:
content_type = '+'.join([content_type,
content_type_header.content_suffix])
try:
handler = settings[content_type]
except KeyError:
raise web.HTTPError(415, 'cannot decode body of type %s',
content_type)
try:
self._request_body = handler.from_bytes(self.request.body)
except Exception:
self._logger.exception('failed to decode request body')
raise web.HTTPError(400, 'failed to decode request')
return self._request_body | [
"def",
"get_request_body",
"(",
"self",
")",
":",
"if",
"self",
".",
"_request_body",
"is",
"None",
":",
"settings",
"=",
"get_settings",
"(",
"self",
".",
"application",
",",
"force_instance",
"=",
"True",
")",
"content_type_header",
"=",
"headers",
".",
"parse_content_type",
"(",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
",",
"settings",
".",
"default_content_type",
")",
")",
"content_type",
"=",
"'/'",
".",
"join",
"(",
"[",
"content_type_header",
".",
"content_type",
",",
"content_type_header",
".",
"content_subtype",
"]",
")",
"if",
"content_type_header",
".",
"content_suffix",
"is",
"not",
"None",
":",
"content_type",
"=",
"'+'",
".",
"join",
"(",
"[",
"content_type",
",",
"content_type_header",
".",
"content_suffix",
"]",
")",
"try",
":",
"handler",
"=",
"settings",
"[",
"content_type",
"]",
"except",
"KeyError",
":",
"raise",
"web",
".",
"HTTPError",
"(",
"415",
",",
"'cannot decode body of type %s'",
",",
"content_type",
")",
"try",
":",
"self",
".",
"_request_body",
"=",
"handler",
".",
"from_bytes",
"(",
"self",
".",
"request",
".",
"body",
")",
"except",
"Exception",
":",
"self",
".",
"_logger",
".",
"exception",
"(",
"'failed to decode request body'",
")",
"raise",
"web",
".",
"HTTPError",
"(",
"400",
",",
"'failed to decode request'",
")",
"return",
"self",
".",
"_request_body"
] | python | Fetch (and cache) the request body as a dictionary.
:raise web.HTTPError:
- if the content type cannot be matched, then the status code
is set to 415 Unsupported Media Type.
- if decoding the content body fails, then the status code is
set to 400 Bad Syntax. | false |
2,131,484 | def on_touch_move(self, touch):
"""If a card is being dragged, move other cards out of the way to show
where the dragged card will go if you drop it.
"""
if (
'card' not in touch.ud or
'layout' not in touch.ud or
touch.ud['layout'] != self
):
return
if (
touch.ud['layout'] == self and
not hasattr(touch.ud['card'], '_topdecked')
):
touch.ud['card']._topdecked = InstructionGroup()
touch.ud['card']._topdecked.add(touch.ud['card'].canvas)
self.canvas.after.add(touch.ud['card']._topdecked)
for i, deck in enumerate(self.decks):
cards = [card for card in deck if not card.dragging]
maxidx = max(card.idx for card in cards) if cards else 0
if self.direction == 'descending':
cards.reverse()
cards_collided = [
card for card in cards if card.collide_point(*touch.pos)
]
if cards_collided:
collided = cards_collided.pop()
for card in cards_collided:
if card.idx > collided.idx:
collided = card
if collided.deck == touch.ud['deck']:
self.insertion_card = (
1 if collided.idx == 0 else
maxidx + 1 if collided.idx == maxidx else
collided.idx + 1 if collided.idx > touch.ud['idx']
else collided.idx
)
else:
dropdeck = self.decks[collided.deck]
maxidx = max(card.idx for card in dropdeck)
self.insertion_card = (
1 if collided.idx == 0 else
maxidx + 1 if collided.idx == maxidx else
collided.idx + 1
)
if self.insertion_deck != collided.deck:
self.insertion_deck = collided.deck
return
else:
if self.insertion_deck == i:
if self.insertion_card in (0, len(deck)):
pass
elif self.point_before_card(
cards[0], *touch.pos
):
self.insertion_card = 0
elif self.point_after_card(
cards[-1], *touch.pos
):
self.insertion_card = cards[-1].idx
else:
for j, found in enumerate(self._foundations):
if (
found is not None and
found.collide_point(*touch.pos)
):
self.insertion_deck = j
self.insertion_card = 0
return | [
"def",
"on_touch_move",
"(",
"self",
",",
"touch",
")",
":",
"if",
"(",
"'card'",
"not",
"in",
"touch",
".",
"ud",
"or",
"'layout'",
"not",
"in",
"touch",
".",
"ud",
"or",
"touch",
".",
"ud",
"[",
"'layout'",
"]",
"!=",
"self",
")",
":",
"return",
"if",
"(",
"touch",
".",
"ud",
"[",
"'layout'",
"]",
"==",
"self",
"and",
"not",
"hasattr",
"(",
"touch",
".",
"ud",
"[",
"'card'",
"]",
",",
"'_topdecked'",
")",
")",
":",
"touch",
".",
"ud",
"[",
"'card'",
"]",
".",
"_topdecked",
"=",
"InstructionGroup",
"(",
")",
"touch",
".",
"ud",
"[",
"'card'",
"]",
".",
"_topdecked",
".",
"add",
"(",
"touch",
".",
"ud",
"[",
"'card'",
"]",
".",
"canvas",
")",
"self",
".",
"canvas",
".",
"after",
".",
"add",
"(",
"touch",
".",
"ud",
"[",
"'card'",
"]",
".",
"_topdecked",
")",
"for",
"i",
",",
"deck",
"in",
"enumerate",
"(",
"self",
".",
"decks",
")",
":",
"cards",
"=",
"[",
"card",
"for",
"card",
"in",
"deck",
"if",
"not",
"card",
".",
"dragging",
"]",
"maxidx",
"=",
"max",
"(",
"card",
".",
"idx",
"for",
"card",
"in",
"cards",
")",
"if",
"cards",
"else",
"0",
"if",
"self",
".",
"direction",
"==",
"'descending'",
":",
"cards",
".",
"reverse",
"(",
")",
"cards_collided",
"=",
"[",
"card",
"for",
"card",
"in",
"cards",
"if",
"card",
".",
"collide_point",
"(",
"*",
"touch",
".",
"pos",
")",
"]",
"if",
"cards_collided",
":",
"collided",
"=",
"cards_collided",
".",
"pop",
"(",
")",
"for",
"card",
"in",
"cards_collided",
":",
"if",
"card",
".",
"idx",
">",
"collided",
".",
"idx",
":",
"collided",
"=",
"card",
"if",
"collided",
".",
"deck",
"==",
"touch",
".",
"ud",
"[",
"'deck'",
"]",
":",
"self",
".",
"insertion_card",
"=",
"(",
"1",
"if",
"collided",
".",
"idx",
"==",
"0",
"else",
"maxidx",
"+",
"1",
"if",
"collided",
".",
"idx",
"==",
"maxidx",
"else",
"collided",
".",
"idx",
"+",
"1",
"if",
"collided",
".",
"idx",
">",
"touch",
".",
"ud",
"[",
"'idx'",
"]",
"else",
"collided",
".",
"idx",
")",
"else",
":",
"dropdeck",
"=",
"self",
".",
"decks",
"[",
"collided",
".",
"deck",
"]",
"maxidx",
"=",
"max",
"(",
"card",
".",
"idx",
"for",
"card",
"in",
"dropdeck",
")",
"self",
".",
"insertion_card",
"=",
"(",
"1",
"if",
"collided",
".",
"idx",
"==",
"0",
"else",
"maxidx",
"+",
"1",
"if",
"collided",
".",
"idx",
"==",
"maxidx",
"else",
"collided",
".",
"idx",
"+",
"1",
")",
"if",
"self",
".",
"insertion_deck",
"!=",
"collided",
".",
"deck",
":",
"self",
".",
"insertion_deck",
"=",
"collided",
".",
"deck",
"return",
"else",
":",
"if",
"self",
".",
"insertion_deck",
"==",
"i",
":",
"if",
"self",
".",
"insertion_card",
"in",
"(",
"0",
",",
"len",
"(",
"deck",
")",
")",
":",
"pass",
"elif",
"self",
".",
"point_before_card",
"(",
"cards",
"[",
"0",
"]",
",",
"*",
"touch",
".",
"pos",
")",
":",
"self",
".",
"insertion_card",
"=",
"0",
"elif",
"self",
".",
"point_after_card",
"(",
"cards",
"[",
"-",
"1",
"]",
",",
"*",
"touch",
".",
"pos",
")",
":",
"self",
".",
"insertion_card",
"=",
"cards",
"[",
"-",
"1",
"]",
".",
"idx",
"else",
":",
"for",
"j",
",",
"found",
"in",
"enumerate",
"(",
"self",
".",
"_foundations",
")",
":",
"if",
"(",
"found",
"is",
"not",
"None",
"and",
"found",
".",
"collide_point",
"(",
"*",
"touch",
".",
"pos",
")",
")",
":",
"self",
".",
"insertion_deck",
"=",
"j",
"self",
".",
"insertion_card",
"=",
"0",
"return"
] | python | If a card is being dragged, move other cards out of the way to show
where the dragged card will go if you drop it. | false |
1,842,188 | def load_module_from_path(i):
"""
Input: {
path - module path
module_code_name - module name
(cfg) - configuration of the module if exists ...
(skip_init) - if 'yes', skip init
(data_uoa) - module UOA (useful when printing error)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
code - python code object
path - full path to the module
cuid - internal UID of the module
}
"""
p=i['path']
n=i['module_code_name']
xcfg=i.get('cfg',None)
# Find module
try:
x=imp.find_module(n, [p])
except ImportError as e: # pragma: no cover
return {'return':1, 'error':'can\'t find module code (path='+p+', name='+n+', err='+format(e)+')'}
ff=x[0]
full_path=x[1]
# Check if code has been already loaded
if full_path in work['cached_module_by_path'] and work['cached_module_by_path_last_modification'][full_path]==os.path.getmtime(full_path):
ff.close()
# Code already loaded
return work['cached_module_by_path'][full_path]
# Check if has dependency on specific CK kernel version
if xcfg!=None:
kd=xcfg.get('min_kernel_dep','')
if kd!='':
rx=check_version({'version':kd})
if rx['return']>0: return rx
ok=rx['ok']
version_str=rx['current_version']
if ok!='yes':
return {'return':1, 'error':'module "'+i.get('data_uoa','')+'" requires minimal CK kernel version '+kd+' while your version is '+version_str}
# Generate uid for the run-time extension of the loaded module
# otherwise modules with the same extension (key.py for example)
# will be reloaded ...
r=gen_uid({})
if r['return']>0: return r
ruid='rt-'+r['data_uid']
try:
c=imp.load_module(ruid, ff, full_path, x[2])
except ImportError as e: # pragma: no cover
return {'return':1, 'error':'can\'t load module code (path='+p+', name='+n+', err='+format(e)+')'}
x[0].close()
# Initialize module with this CK instance
c.ck=sys.modules[__name__]
if xcfg!=None: c.cfg=xcfg
# Initialize module
if i.get('skip_init','')!='yes':
# Check if init function exists
if getattr(c, 'init')!=None:
r=c.init(i)
if r['return']>0: return r
r={'return':0, 'code':c, 'path':full_path, 'cuid':ruid}
# Cache code together with its time of change
work['cached_module_by_path'][full_path]=r
work['cached_module_by_path_last_modification'][full_path]=os.path.getmtime(full_path)
return r | [
"def",
"load_module_from_path",
"(",
"i",
")",
":",
"p",
"=",
"i",
"[",
"'path'",
"]",
"n",
"=",
"i",
"[",
"'module_code_name'",
"]",
"xcfg",
"=",
"i",
".",
"get",
"(",
"'cfg'",
",",
"None",
")",
"try",
":",
"x",
"=",
"imp",
".",
"find_module",
"(",
"n",
",",
"[",
"p",
"]",
")",
"except",
"ImportError",
"as",
"e",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'can\\'t find module code (path='",
"+",
"p",
"+",
"', name='",
"+",
"n",
"+",
"', err='",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"ff",
"=",
"x",
"[",
"0",
"]",
"full_path",
"=",
"x",
"[",
"1",
"]",
"if",
"full_path",
"in",
"work",
"[",
"'cached_module_by_path'",
"]",
"and",
"work",
"[",
"'cached_module_by_path_last_modification'",
"]",
"[",
"full_path",
"]",
"==",
"os",
".",
"path",
".",
"getmtime",
"(",
"full_path",
")",
":",
"ff",
".",
"close",
"(",
")",
"return",
"work",
"[",
"'cached_module_by_path'",
"]",
"[",
"full_path",
"]",
"if",
"xcfg",
"!=",
"None",
":",
"kd",
"=",
"xcfg",
".",
"get",
"(",
"'min_kernel_dep'",
",",
"''",
")",
"if",
"kd",
"!=",
"''",
":",
"rx",
"=",
"check_version",
"(",
"{",
"'version'",
":",
"kd",
"}",
")",
"if",
"rx",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"rx",
"ok",
"=",
"rx",
"[",
"'ok'",
"]",
"version_str",
"=",
"rx",
"[",
"'current_version'",
"]",
"if",
"ok",
"!=",
"'yes'",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'module \"'",
"+",
"i",
".",
"get",
"(",
"'data_uoa'",
",",
"''",
")",
"+",
"'\" requires minimal CK kernel version '",
"+",
"kd",
"+",
"' while your version is '",
"+",
"version_str",
"}",
"r",
"=",
"gen_uid",
"(",
"{",
"}",
")",
"if",
"r",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"r",
"ruid",
"=",
"'rt-'",
"+",
"r",
"[",
"'data_uid'",
"]",
"try",
":",
"c",
"=",
"imp",
".",
"load_module",
"(",
"ruid",
",",
"ff",
",",
"full_path",
",",
"x",
"[",
"2",
"]",
")",
"except",
"ImportError",
"as",
"e",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'can\\'t load module code (path='",
"+",
"p",
"+",
"', name='",
"+",
"n",
"+",
"', err='",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"x",
"[",
"0",
"]",
".",
"close",
"(",
")",
"c",
".",
"ck",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
"if",
"xcfg",
"!=",
"None",
":",
"c",
".",
"cfg",
"=",
"xcfg",
"if",
"i",
".",
"get",
"(",
"'skip_init'",
",",
"''",
")",
"!=",
"'yes'",
":",
"if",
"getattr",
"(",
"c",
",",
"'init'",
")",
"!=",
"None",
":",
"r",
"=",
"c",
".",
"init",
"(",
"i",
")",
"if",
"r",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"r",
"r",
"=",
"{",
"'return'",
":",
"0",
",",
"'code'",
":",
"c",
",",
"'path'",
":",
"full_path",
",",
"'cuid'",
":",
"ruid",
"}",
"work",
"[",
"'cached_module_by_path'",
"]",
"[",
"full_path",
"]",
"=",
"r",
"work",
"[",
"'cached_module_by_path_last_modification'",
"]",
"[",
"full_path",
"]",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"full_path",
")",
"return",
"r"
] | python | Input: {
path - module path
module_code_name - module name
(cfg) - configuration of the module if exists ...
(skip_init) - if 'yes', skip init
(data_uoa) - module UOA (useful when printing error)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
code - python code object
path - full path to the module
cuid - internal UID of the module
} | false |
1,806,287 | def scan_interface(self, address):
""" Scan interface for Crazyflies """
if self._radio_manager is None:
try:
self._radio_manager = _RadioManager(0)
except Exception:
return []
with self._radio_manager as cradio:
# FIXME: implements serial number in the Crazyradio driver!
serial = 'N/A'
logger.info('v%s dongle with serial %s found', cradio.version,
serial)
found = []
if address is not None:
addr = '{:X}'.format(address)
new_addr = struct.unpack('<BBBBB', binascii.unhexlify(addr))
cradio.set_address(new_addr)
cradio.set_arc(1)
cradio.set_data_rate(cradio.DR_250KPS)
if address is None or address == DEFAULT_ADDR:
found += [['radio://0/{}/250K'.format(c), '']
for c in self._scan_radio_channels(cradio)]
cradio.set_data_rate(cradio.DR_1MPS)
found += [['radio://0/{}/1M'.format(c), '']
for c in self._scan_radio_channels(cradio)]
cradio.set_data_rate(cradio.DR_2MPS)
found += [['radio://0/{}/2M'.format(c), '']
for c in self._scan_radio_channels(cradio)]
else:
found += [['radio://0/{}/250K/{:X}'.format(c, address), '']
for c in self._scan_radio_channels(cradio)]
cradio.set_data_rate(cradio.DR_1MPS)
found += [['radio://0/{}/1M/{:X}'.format(c, address), '']
for c in self._scan_radio_channels(cradio)]
cradio.set_data_rate(cradio.DR_2MPS)
found += [['radio://0/{}/2M/{:X}'.format(c, address), '']
for c in self._scan_radio_channels(cradio)]
self._radio_manager.close()
self._radio_manager = None
return found | [
"def",
"scan_interface",
"(",
"self",
",",
"address",
")",
":",
"if",
"self",
".",
"_radio_manager",
"is",
"None",
":",
"try",
":",
"self",
".",
"_radio_manager",
"=",
"_RadioManager",
"(",
"0",
")",
"except",
"Exception",
":",
"return",
"[",
"]",
"with",
"self",
".",
"_radio_manager",
"as",
"cradio",
":",
"serial",
"=",
"'N/A'",
"logger",
".",
"info",
"(",
"'v%s dongle with serial %s found'",
",",
"cradio",
".",
"version",
",",
"serial",
")",
"found",
"=",
"[",
"]",
"if",
"address",
"is",
"not",
"None",
":",
"addr",
"=",
"'{:X}'",
".",
"format",
"(",
"address",
")",
"new_addr",
"=",
"struct",
".",
"unpack",
"(",
"'<BBBBB'",
",",
"binascii",
".",
"unhexlify",
"(",
"addr",
")",
")",
"cradio",
".",
"set_address",
"(",
"new_addr",
")",
"cradio",
".",
"set_arc",
"(",
"1",
")",
"cradio",
".",
"set_data_rate",
"(",
"cradio",
".",
"DR_250KPS",
")",
"if",
"address",
"is",
"None",
"or",
"address",
"==",
"DEFAULT_ADDR",
":",
"found",
"+=",
"[",
"[",
"'radio://0/{}/250K'",
".",
"format",
"(",
"c",
")",
",",
"''",
"]",
"for",
"c",
"in",
"self",
".",
"_scan_radio_channels",
"(",
"cradio",
")",
"]",
"cradio",
".",
"set_data_rate",
"(",
"cradio",
".",
"DR_1MPS",
")",
"found",
"+=",
"[",
"[",
"'radio://0/{}/1M'",
".",
"format",
"(",
"c",
")",
",",
"''",
"]",
"for",
"c",
"in",
"self",
".",
"_scan_radio_channels",
"(",
"cradio",
")",
"]",
"cradio",
".",
"set_data_rate",
"(",
"cradio",
".",
"DR_2MPS",
")",
"found",
"+=",
"[",
"[",
"'radio://0/{}/2M'",
".",
"format",
"(",
"c",
")",
",",
"''",
"]",
"for",
"c",
"in",
"self",
".",
"_scan_radio_channels",
"(",
"cradio",
")",
"]",
"else",
":",
"found",
"+=",
"[",
"[",
"'radio://0/{}/250K/{:X}'",
".",
"format",
"(",
"c",
",",
"address",
")",
",",
"''",
"]",
"for",
"c",
"in",
"self",
".",
"_scan_radio_channels",
"(",
"cradio",
")",
"]",
"cradio",
".",
"set_data_rate",
"(",
"cradio",
".",
"DR_1MPS",
")",
"found",
"+=",
"[",
"[",
"'radio://0/{}/1M/{:X}'",
".",
"format",
"(",
"c",
",",
"address",
")",
",",
"''",
"]",
"for",
"c",
"in",
"self",
".",
"_scan_radio_channels",
"(",
"cradio",
")",
"]",
"cradio",
".",
"set_data_rate",
"(",
"cradio",
".",
"DR_2MPS",
")",
"found",
"+=",
"[",
"[",
"'radio://0/{}/2M/{:X}'",
".",
"format",
"(",
"c",
",",
"address",
")",
",",
"''",
"]",
"for",
"c",
"in",
"self",
".",
"_scan_radio_channels",
"(",
"cradio",
")",
"]",
"self",
".",
"_radio_manager",
".",
"close",
"(",
")",
"self",
".",
"_radio_manager",
"=",
"None",
"return",
"found"
] | python | Scan interface for Crazyflies | false |
2,541,259 | def __ne__(self, other):
"""Check to see if two Vector3 instances are not equal"""
if not isinstance(other, Vector3):
raise TypeError("other must be of type Vector3")
if not(self.x == other.x) \
or not(self.y == other.y) \
or not(self.z == other.z):
#True, the objects are not equal to each other.
return True
else:
#False, the objects are equal to each other
return False | [
"def",
"__ne__",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"Vector3",
")",
":",
"raise",
"TypeError",
"(",
"\"other must be of type Vector3\"",
")",
"if",
"not",
"(",
"self",
".",
"x",
"==",
"other",
".",
"x",
")",
"or",
"not",
"(",
"self",
".",
"y",
"==",
"other",
".",
"y",
")",
"or",
"not",
"(",
"self",
".",
"z",
"==",
"other",
".",
"z",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | python | Check to see if two Vector3 instances are not equal | false |
2,232,196 | def add_layer3_cluster_interface(self, interface_id, cluster_virtual=None,
network_value=None, macaddress=None, nodes=None, cvi_mode='packetdispatch',
zone_ref=None, comment=None, **kw):
"""
Add cluster virtual interface. A "CVI" interface is used as a VIP
address for clustered engines. Providing 'nodes' will create the
node specific interfaces. You can also add a cluster address with only
a CVI, or only NDI's.
Add CVI only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
cluster_virtual='30.30.30.1',
network_value='30.30.30.0/24',
macaddress='02:02:02:02:02:06')
Add NDI's only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
nodes=nodes)
Add CVI and NDI's::
engine.physical_interface.add_cluster_virtual_interface(
cluster_virtual='5.5.5.1',
network_value='5.5.5.0/24',
macaddress='02:03:03:03:03:03',
nodes=[{'address':'5.5.5.2', 'network_value':'5.5.5.0/24', 'nodeid':1},
{'address':'5.5.5.3', 'network_value':'5.5.5.0/24', 'nodeid':2}])
.. versionchanged:: 0.6.1
Renamed from add_cluster_virtual_interface
:param str,int interface_id: physical interface identifier
:param str cluster_virtual: CVI address (VIP) for this interface
:param str network_value: network value for VIP; format: 10.10.10.0/24
:param str macaddress: mandatory mac address if cluster_virtual and
cluster_mask provided
:param list nodes: list of dictionary items identifying cluster nodes
:param str cvi_mode: packetdispatch is recommended setting
:param str zone_ref: zone reference, can be name, href or Zone
:param kw: key word arguments are valid NodeInterface sub-interface
settings passed in during create time. For example, 'backup_mgt=True'
to enable this interface as the management backup.
:raises EngineCommandFailed: failure creating interface
:return: None
"""
interfaces = [{'nodes': nodes if nodes else [],
'cluster_virtual': cluster_virtual, 'network_value': network_value}]
try:
interface = self._engine.interface.get(interface_id)
interface._add_interface(interface_id, interfaces=interfaces)
return interface.update()
except InterfaceNotFound:
interface = ClusterPhysicalInterface(
engine=self._engine,
interface_id=interface_id,
interfaces=interfaces,
cvi_mode=cvi_mode if macaddress else 'none',
macaddress=macaddress,
zone_ref=zone_ref, comment=comment, **kw)
return self._engine.add_interface(interface) | [
"def",
"add_layer3_cluster_interface",
"(",
"self",
",",
"interface_id",
",",
"cluster_virtual",
"=",
"None",
",",
"network_value",
"=",
"None",
",",
"macaddress",
"=",
"None",
",",
"nodes",
"=",
"None",
",",
"cvi_mode",
"=",
"'packetdispatch'",
",",
"zone_ref",
"=",
"None",
",",
"comment",
"=",
"None",
",",
"**",
"kw",
")",
":",
"interfaces",
"=",
"[",
"{",
"'nodes'",
":",
"nodes",
"if",
"nodes",
"else",
"[",
"]",
",",
"'cluster_virtual'",
":",
"cluster_virtual",
",",
"'network_value'",
":",
"network_value",
"}",
"]",
"try",
":",
"interface",
"=",
"self",
".",
"_engine",
".",
"interface",
".",
"get",
"(",
"interface_id",
")",
"interface",
".",
"_add_interface",
"(",
"interface_id",
",",
"interfaces",
"=",
"interfaces",
")",
"return",
"interface",
".",
"update",
"(",
")",
"except",
"InterfaceNotFound",
":",
"interface",
"=",
"ClusterPhysicalInterface",
"(",
"engine",
"=",
"self",
".",
"_engine",
",",
"interface_id",
"=",
"interface_id",
",",
"interfaces",
"=",
"interfaces",
",",
"cvi_mode",
"=",
"cvi_mode",
"if",
"macaddress",
"else",
"'none'",
",",
"macaddress",
"=",
"macaddress",
",",
"zone_ref",
"=",
"zone_ref",
",",
"comment",
"=",
"comment",
",",
"**",
"kw",
")",
"return",
"self",
".",
"_engine",
".",
"add_interface",
"(",
"interface",
")"
] | python | Add cluster virtual interface. A "CVI" interface is used as a VIP
address for clustered engines. Providing 'nodes' will create the
node specific interfaces. You can also add a cluster address with only
a CVI, or only NDI's.
Add CVI only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
cluster_virtual='30.30.30.1',
network_value='30.30.30.0/24',
macaddress='02:02:02:02:02:06')
Add NDI's only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
nodes=nodes)
Add CVI and NDI's::
engine.physical_interface.add_cluster_virtual_interface(
cluster_virtual='5.5.5.1',
network_value='5.5.5.0/24',
macaddress='02:03:03:03:03:03',
nodes=[{'address':'5.5.5.2', 'network_value':'5.5.5.0/24', 'nodeid':1},
{'address':'5.5.5.3', 'network_value':'5.5.5.0/24', 'nodeid':2}])
.. versionchanged:: 0.6.1
Renamed from add_cluster_virtual_interface
:param str,int interface_id: physical interface identifier
:param str cluster_virtual: CVI address (VIP) for this interface
:param str network_value: network value for VIP; format: 10.10.10.0/24
:param str macaddress: mandatory mac address if cluster_virtual and
cluster_mask provided
:param list nodes: list of dictionary items identifying cluster nodes
:param str cvi_mode: packetdispatch is recommended setting
:param str zone_ref: zone reference, can be name, href or Zone
:param kw: key word arguments are valid NodeInterface sub-interface
settings passed in during create time. For example, 'backup_mgt=True'
to enable this interface as the management backup.
:raises EngineCommandFailed: failure creating interface
:return: None | false |
2,508,815 | def pattern(head, *args, mode=1, wc_name=None, conditions=None, **kwargs) \
-> Pattern:
"""'Flat' constructor for the Pattern class
Positional and keyword arguments are mapped into `args` and `kwargs`,
respectively. Useful for defining rules that match an instantiated
Expression with specific arguments
"""
if len(args) == 0:
args = None
if len(kwargs) == 0:
kwargs = None
return Pattern(head, args, kwargs, mode=mode, wc_name=wc_name,
conditions=conditions) | [
"def",
"pattern",
"(",
"head",
",",
"*",
"args",
",",
"mode",
"=",
"1",
",",
"wc_name",
"=",
"None",
",",
"conditions",
"=",
"None",
",",
"**",
"kwargs",
")",
"->",
"Pattern",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"args",
"=",
"None",
"if",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"kwargs",
"=",
"None",
"return",
"Pattern",
"(",
"head",
",",
"args",
",",
"kwargs",
",",
"mode",
"=",
"mode",
",",
"wc_name",
"=",
"wc_name",
",",
"conditions",
"=",
"conditions",
")"
] | python | Flat' constructor for the Pattern class
Positional and keyword arguments are mapped into `args` and `kwargs`,
respectively. Useful for defining rules that match an instantiated
Expression with specific arguments | false |
2,166,178 | def set(self, value, *keys):
"""
Sets the dict of the information as read from the yaml file. To access
the file safely, you can use the keys in the order of the access.
Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy")
will set the value of config["provisioner"]["policy"] in the yaml file if
it does not exists an error will be printing that the value does not
exists. Alternatively you can use the . notation e.g.
set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy")
"""
element = self
if keys is None:
return self
if '.' in keys[0]:
keys = keys[0].split(".")
nested_str = ''.join(["['{0}']".format(x) for x in keys])
# Safely evaluate an expression to see if it is one of the Python
# literal structures: strings, numbers, tuples, lists, dicts, booleans,
# and None. Quoted string will be used if it is none of these types.
try:
ast.literal_eval(str(value))
converted = str(value)
except ValueError:
converted = "'" + str(value) + "'"
exec("self" + nested_str + "=" + converted)
return element | [
"def",
"set",
"(",
"self",
",",
"value",
",",
"*",
"keys",
")",
":",
"element",
"=",
"self",
"if",
"keys",
"is",
"None",
":",
"return",
"self",
"if",
"'.'",
"in",
"keys",
"[",
"0",
"]",
":",
"keys",
"=",
"keys",
"[",
"0",
"]",
".",
"split",
"(",
"\".\"",
")",
"nested_str",
"=",
"''",
".",
"join",
"(",
"[",
"\"['{0}']\"",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"keys",
"]",
")",
"try",
":",
"ast",
".",
"literal_eval",
"(",
"str",
"(",
"value",
")",
")",
"converted",
"=",
"str",
"(",
"value",
")",
"except",
"ValueError",
":",
"converted",
"=",
"\"'\"",
"+",
"str",
"(",
"value",
")",
"+",
"\"'\"",
"exec",
"(",
"\"self\"",
"+",
"nested_str",
"+",
"\"=\"",
"+",
"converted",
")",
"return",
"element"
] | python | Sets the dict of the information as read from the yaml file. To access
the file safely, you can use the keys in the order of the access.
Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy")
will set the value of config["provisioner"]["policy"] in the yaml file if
it does not exists an error will be printing that the value does not
exists. Alternatively you can use the . notation e.g.
set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy") | false |
2,682,015 | def parse_options():
"""Define sub-commands and command line options."""
server, username, password, auto_update_soa = False, False, False, False
prs = argparse.ArgumentParser(description='usage')
prs.add_argument('-v', '--version', action='version',
version=__version__)
if os.environ.get('HOME'):
config_file = os.environ.get('HOME') + '/.tdclirc'
if os.path.isfile(config_file):
(server, username,
password, auto_update_soa) = check_config(config_file)
conn = dict(server=server, username=username,
password=password, auto_update_soa=auto_update_soa)
subprs = prs.add_subparsers(help='commands')
# Convert and print JSON
parse_show(subprs)
# Retrieve records
parse_get(subprs, conn)
# Create record
parse_create(subprs, conn)
# Create bulk_records
parse_bulk_create(subprs, conn)
# Delete record
parse_delete(subprs, conn)
# Delete bulk_records
parse_bulk_delete(subprs, conn)
# Update a record
parse_update(subprs, conn)
# Update SOA serial
parse_update_soa(subprs, conn)
# Create zone
parse_create_zone(subprs, conn)
# Delete zone
parse_delete_zone(subprs, conn)
# Retrieve template
parse_get_tmpl(subprs, conn)
# Delete template
parse_delete_tmpl(subprs, conn)
args = prs.parse_args()
return args | [
"def",
"parse_options",
"(",
")",
":",
"server",
",",
"username",
",",
"password",
",",
"auto_update_soa",
"=",
"False",
",",
"False",
",",
"False",
",",
"False",
"prs",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'usage'",
")",
"prs",
".",
"add_argument",
"(",
"'-v'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"__version__",
")",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'HOME'",
")",
":",
"config_file",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'HOME'",
")",
"+",
"'/.tdclirc'",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"config_file",
")",
":",
"(",
"server",
",",
"username",
",",
"password",
",",
"auto_update_soa",
")",
"=",
"check_config",
"(",
"config_file",
")",
"conn",
"=",
"dict",
"(",
"server",
"=",
"server",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"auto_update_soa",
"=",
"auto_update_soa",
")",
"subprs",
"=",
"prs",
".",
"add_subparsers",
"(",
"help",
"=",
"'commands'",
")",
"parse_show",
"(",
"subprs",
")",
"parse_get",
"(",
"subprs",
",",
"conn",
")",
"parse_create",
"(",
"subprs",
",",
"conn",
")",
"parse_bulk_create",
"(",
"subprs",
",",
"conn",
")",
"parse_delete",
"(",
"subprs",
",",
"conn",
")",
"parse_bulk_delete",
"(",
"subprs",
",",
"conn",
")",
"parse_update",
"(",
"subprs",
",",
"conn",
")",
"parse_update_soa",
"(",
"subprs",
",",
"conn",
")",
"parse_create_zone",
"(",
"subprs",
",",
"conn",
")",
"parse_delete_zone",
"(",
"subprs",
",",
"conn",
")",
"parse_get_tmpl",
"(",
"subprs",
",",
"conn",
")",
"parse_delete_tmpl",
"(",
"subprs",
",",
"conn",
")",
"args",
"=",
"prs",
".",
"parse_args",
"(",
")",
"return",
"args"
] | python | Define sub-commands and command line options. | false |
1,819,404 | def _update_table_cache(self):
"""Clears and updates the table cache to be in sync with self"""
self._table_cache.clear()
for sel, tab, val in self:
try:
self._table_cache[tab].append((sel, val))
except KeyError:
self._table_cache[tab] = [(sel, val)]
assert len(self) == self._len_table_cache() | [
"def",
"_update_table_cache",
"(",
"self",
")",
":",
"self",
".",
"_table_cache",
".",
"clear",
"(",
")",
"for",
"sel",
",",
"tab",
",",
"val",
"in",
"self",
":",
"try",
":",
"self",
".",
"_table_cache",
"[",
"tab",
"]",
".",
"append",
"(",
"(",
"sel",
",",
"val",
")",
")",
"except",
"KeyError",
":",
"self",
".",
"_table_cache",
"[",
"tab",
"]",
"=",
"[",
"(",
"sel",
",",
"val",
")",
"]",
"assert",
"len",
"(",
"self",
")",
"==",
"self",
".",
"_len_table_cache",
"(",
")"
] | python | Clears and updates the table cache to be in sync with self | false |
2,074,732 | def _run_raw(self, cmd, ignore_errors=False):
"""Runs command directly, skipping tmux interface"""
# TODO: capture stdout/stderr for feature parity with aws_backend
result = os.system(cmd)
if result != 0:
if ignore_errors:
self.log(f"command ({cmd}) failed.")
assert False, "_run_raw failed" | [
"def",
"_run_raw",
"(",
"self",
",",
"cmd",
",",
"ignore_errors",
"=",
"False",
")",
":",
"result",
"=",
"os",
".",
"system",
"(",
"cmd",
")",
"if",
"result",
"!=",
"0",
":",
"if",
"ignore_errors",
":",
"self",
".",
"log",
"(",
"f\"command ({cmd}) failed.\"",
")",
"assert",
"False",
",",
"\"_run_raw failed\""
] | python | Runs command directly, skipping tmux interface | false |
2,465,727 | def _add_epsilon_states(self, stateset, gathered_epsilons):
'''
stateset is the list of initial states
gathered_epsilons is a dictionary of (dst: src) epsilon dictionaries
'''
for i in list(stateset):
if i not in gathered_epsilons:
gathered_epsilons[i] = {}
q = _otq()
q.append(i)
while q:
s = q.popleft()
for j in self._transitions.setdefault(s, {}).setdefault(NFA.EPSILON, set()):
gathered_epsilons[i][j] = s if j not in gathered_epsilons[i] else self.choose(s, j)
q.append(j)
stateset.update(gathered_epsilons[i].keys()) | [
"def",
"_add_epsilon_states",
"(",
"self",
",",
"stateset",
",",
"gathered_epsilons",
")",
":",
"for",
"i",
"in",
"list",
"(",
"stateset",
")",
":",
"if",
"i",
"not",
"in",
"gathered_epsilons",
":",
"gathered_epsilons",
"[",
"i",
"]",
"=",
"{",
"}",
"q",
"=",
"_otq",
"(",
")",
"q",
".",
"append",
"(",
"i",
")",
"while",
"q",
":",
"s",
"=",
"q",
".",
"popleft",
"(",
")",
"for",
"j",
"in",
"self",
".",
"_transitions",
".",
"setdefault",
"(",
"s",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"NFA",
".",
"EPSILON",
",",
"set",
"(",
")",
")",
":",
"gathered_epsilons",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"s",
"if",
"j",
"not",
"in",
"gathered_epsilons",
"[",
"i",
"]",
"else",
"self",
".",
"choose",
"(",
"s",
",",
"j",
")",
"q",
".",
"append",
"(",
"j",
")",
"stateset",
".",
"update",
"(",
"gathered_epsilons",
"[",
"i",
"]",
".",
"keys",
"(",
")",
")"
] | python | stateset is the list of initial states
gathered_epsilons is a dictionary of (dst: src) epsilon dictionaries | false |
2,272,424 | def peek(self, size=-1):
"""
Return bytes from the stream without advancing the position.
Args:
size (int): Number of bytes to read. -1 to read the full
stream.
Returns:
bytes: bytes read
"""
if not self._readable:
raise UnsupportedOperation('read')
with self._seek_lock:
self._raw.seek(self._seek)
return self._raw._peek(size) | [
"def",
"peek",
"(",
"self",
",",
"size",
"=",
"-",
"1",
")",
":",
"if",
"not",
"self",
".",
"_readable",
":",
"raise",
"UnsupportedOperation",
"(",
"'read'",
")",
"with",
"self",
".",
"_seek_lock",
":",
"self",
".",
"_raw",
".",
"seek",
"(",
"self",
".",
"_seek",
")",
"return",
"self",
".",
"_raw",
".",
"_peek",
"(",
"size",
")"
] | python | Return bytes from the stream without advancing the position.
Args:
size (int): Number of bytes to read. -1 to read the full
stream.
Returns:
bytes: bytes read | false |
2,403,784 | def cosmetics(flat1, flat2 = None, mask=None, lowercut=6.0, uppercut=6.0, siglev=2.0):
"""Find cosmetic defects in a detector using two flat field images.
Two arrays representing flat fields of different exposure times are
required. Cosmetic defects are selected as points that deviate
significantly of the expected normal distribution of pixels in
the ratio between `flat2` and `flat1`.
The median of the ratio array is computed and subtracted to it.
The standard deviation of the distribution of pixels is computed
obtaining the percentiles nearest the pixel values corresponding to
`nsig` in the normal CDF. The standar deviation is then the distance
between the pixel values divided by two times `nsig`.
The ratio image is then normalized with this standard deviation.
The values in the ratio above `uppercut` are flagged as hot pixels,
and those below '-lowercut` are flagged as dead pixels in the output mask.
:parameter flat1: an array representing a flat illuminated exposure.
:parameter flat2: an array representing a flat illuminated exposure.
:parameter mask: an integer array representing initial mask.
:parameter lowercut: values bellow this sigma level are flagged as dead pixels.
:parameter uppercut: values above this sigma level are flagged as hot pixels.
:parameter siglev: level to estimate the standard deviation.
:returns: the updated mask
"""
if flat2 is None:
flat1, flat2 = flat2, flat1
flat1 = numpy.ones_like(flat2)
if type(mask) is not numpy.ndarray:
mask=numpy.zeros(flat1.shape,dtype='int')
ratio, mask = comp_ratio(flat1, flat2, mask)
fratio1 = ratio[~mask]
central = numpy.median(fratio1)
std = robust_std(fratio1, central, siglev)
mask_u = ratio > central + uppercut * std
mask_d = ratio < central - lowercut * std
mask_final = mask_u | mask_d | mask
return mask_final | [
"def",
"cosmetics",
"(",
"flat1",
",",
"flat2",
"=",
"None",
",",
"mask",
"=",
"None",
",",
"lowercut",
"=",
"6.0",
",",
"uppercut",
"=",
"6.0",
",",
"siglev",
"=",
"2.0",
")",
":",
"if",
"flat2",
"is",
"None",
":",
"flat1",
",",
"flat2",
"=",
"flat2",
",",
"flat1",
"flat1",
"=",
"numpy",
".",
"ones_like",
"(",
"flat2",
")",
"if",
"type",
"(",
"mask",
")",
"is",
"not",
"numpy",
".",
"ndarray",
":",
"mask",
"=",
"numpy",
".",
"zeros",
"(",
"flat1",
".",
"shape",
",",
"dtype",
"=",
"'int'",
")",
"ratio",
",",
"mask",
"=",
"comp_ratio",
"(",
"flat1",
",",
"flat2",
",",
"mask",
")",
"fratio1",
"=",
"ratio",
"[",
"~",
"mask",
"]",
"central",
"=",
"numpy",
".",
"median",
"(",
"fratio1",
")",
"std",
"=",
"robust_std",
"(",
"fratio1",
",",
"central",
",",
"siglev",
")",
"mask_u",
"=",
"ratio",
">",
"central",
"+",
"uppercut",
"*",
"std",
"mask_d",
"=",
"ratio",
"<",
"central",
"-",
"lowercut",
"*",
"std",
"mask_final",
"=",
"mask_u",
"|",
"mask_d",
"|",
"mask",
"return",
"mask_final"
] | python | Find cosmetic defects in a detector using two flat field images.
Two arrays representing flat fields of different exposure times are
required. Cosmetic defects are selected as points that deviate
significantly of the expected normal distribution of pixels in
the ratio between `flat2` and `flat1`.
The median of the ratio array is computed and subtracted to it.
The standard deviation of the distribution of pixels is computed
obtaining the percentiles nearest the pixel values corresponding to
`nsig` in the normal CDF. The standar deviation is then the distance
between the pixel values divided by two times `nsig`.
The ratio image is then normalized with this standard deviation.
The values in the ratio above `uppercut` are flagged as hot pixels,
and those below '-lowercut` are flagged as dead pixels in the output mask.
:parameter flat1: an array representing a flat illuminated exposure.
:parameter flat2: an array representing a flat illuminated exposure.
:parameter mask: an integer array representing initial mask.
:parameter lowercut: values bellow this sigma level are flagged as dead pixels.
:parameter uppercut: values above this sigma level are flagged as hot pixels.
:parameter siglev: level to estimate the standard deviation.
:returns: the updated mask | false |
2,709,104 | def __init__(self, klass=None, args=None, kw=None,
allow_none=True, **metadata ):
"""Construct an Instance trait.
This trait allows values that are instances of a particular
class or its sublclasses. Our implementation is quite different
from that of enthough.traits as we don't allow instances to be used
for klass and we handle the ``args`` and ``kw`` arguments differently.
Parameters
----------
klass : class, str
The class that forms the basis for the trait. Class names
can also be specified as strings, like 'foo.bar.Bar'.
args : tuple
Positional arguments for generating the default value.
kw : dict
Keyword arguments for generating the default value.
allow_none : bool
Indicates whether None is allowed as a value.
Default Value
-------------
If both ``args`` and ``kw`` are None, then the default value is None.
If ``args`` is a tuple and ``kw`` is a dict, then the default is
created as ``klass(*args, **kw)``. If either ``args`` or ``kw`` is
not (but not both), None is replace by ``()`` or ``{}``.
"""
self._allow_none = allow_none
if (klass is None) or (not (inspect.isclass(klass) or isinstance(klass, basestring))):
raise TraitError('The klass argument must be a class'
' you gave: %r' % klass)
self.klass = klass
# self.klass is a class, so handle default_value
if args is None and kw is None:
default_value = None
else:
if args is None:
# kw is not None
args = ()
elif kw is None:
# args is not None
kw = {}
if not isinstance(kw, dict):
raise TraitError("The 'kw' argument must be a dict or None.")
if not isinstance(args, tuple):
raise TraitError("The 'args' argument must be a tuple or None.")
default_value = DefaultValueGenerator(*args, **kw)
super(Instance, self).__init__(default_value, **metadata) | [
"def",
"__init__",
"(",
"self",
",",
"klass",
"=",
"None",
",",
"args",
"=",
"None",
",",
"kw",
"=",
"None",
",",
"allow_none",
"=",
"True",
",",
"**",
"metadata",
")",
":",
"self",
".",
"_allow_none",
"=",
"allow_none",
"if",
"(",
"klass",
"is",
"None",
")",
"or",
"(",
"not",
"(",
"inspect",
".",
"isclass",
"(",
"klass",
")",
"or",
"isinstance",
"(",
"klass",
",",
"basestring",
")",
")",
")",
":",
"raise",
"TraitError",
"(",
"'The klass argument must be a class'",
"' you gave: %r'",
"%",
"klass",
")",
"self",
".",
"klass",
"=",
"klass",
"if",
"args",
"is",
"None",
"and",
"kw",
"is",
"None",
":",
"default_value",
"=",
"None",
"else",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"(",
")",
"elif",
"kw",
"is",
"None",
":",
"kw",
"=",
"{",
"}",
"if",
"not",
"isinstance",
"(",
"kw",
",",
"dict",
")",
":",
"raise",
"TraitError",
"(",
"\"The 'kw' argument must be a dict or None.\"",
")",
"if",
"not",
"isinstance",
"(",
"args",
",",
"tuple",
")",
":",
"raise",
"TraitError",
"(",
"\"The 'args' argument must be a tuple or None.\"",
")",
"default_value",
"=",
"DefaultValueGenerator",
"(",
"*",
"args",
",",
"**",
"kw",
")",
"super",
"(",
"Instance",
",",
"self",
")",
".",
"__init__",
"(",
"default_value",
",",
"**",
"metadata",
")"
] | python | Construct an Instance trait.
This trait allows values that are instances of a particular
class or its sublclasses. Our implementation is quite different
from that of enthough.traits as we don't allow instances to be used
for klass and we handle the ``args`` and ``kw`` arguments differently.
Parameters
----------
klass : class, str
The class that forms the basis for the trait. Class names
can also be specified as strings, like 'foo.bar.Bar'.
args : tuple
Positional arguments for generating the default value.
kw : dict
Keyword arguments for generating the default value.
allow_none : bool
Indicates whether None is allowed as a value.
Default Value
-------------
If both ``args`` and ``kw`` are None, then the default value is None.
If ``args`` is a tuple and ``kw`` is a dict, then the default is
created as ``klass(*args, **kw)``. If either ``args`` or ``kw`` is
not (but not both), None is replace by ``()`` or ``{}``. | false |
1,844,296 | def savefig(writekey, dpi=None, ext=None):
"""Save current figure to file.
The `filename` is generated as follows:
filename = settings.figdir + writekey + settings.plot_suffix + '.' + settings.file_format_figs
"""
if dpi is None:
# we need this as in notebooks, the internal figures are also influenced by 'savefig.dpi' this...
if not isinstance(rcParams['savefig.dpi'], str) and rcParams['savefig.dpi'] < 150:
if settings._low_resolution_warning:
logg.warn(
'You are using a low resolution (dpi<150) for saving figures.\n'
'Consider running `set_figure_params(dpi_save=...)`, which will '
'adjust `matplotlib.rcParams[\'savefig.dpi\']`')
settings._low_resolution_warning = False
else:
dpi = rcParams['savefig.dpi']
if not os.path.exists(settings.figdir): os.makedirs(settings.figdir)
if settings.figdir[-1] != '/': settings.figdir += '/'
if ext is None: ext = settings.file_format_figs
filename = settings.figdir + writekey + settings.plot_suffix + '.' + ext
# output the following msg at warning level; it's really important for the user
logg.msg('saving figure to file', filename, v=1)
pl.savefig(filename, dpi=dpi, bbox_inches='tight') | [
"def",
"savefig",
"(",
"writekey",
",",
"dpi",
"=",
"None",
",",
"ext",
"=",
"None",
")",
":",
"if",
"dpi",
"is",
"None",
":",
"if",
"not",
"isinstance",
"(",
"rcParams",
"[",
"'savefig.dpi'",
"]",
",",
"str",
")",
"and",
"rcParams",
"[",
"'savefig.dpi'",
"]",
"<",
"150",
":",
"if",
"settings",
".",
"_low_resolution_warning",
":",
"logg",
".",
"warn",
"(",
"'You are using a low resolution (dpi<150) for saving figures.\\n'",
"'Consider running `set_figure_params(dpi_save=...)`, which will '",
"'adjust `matplotlib.rcParams[\\'savefig.dpi\\']`'",
")",
"settings",
".",
"_low_resolution_warning",
"=",
"False",
"else",
":",
"dpi",
"=",
"rcParams",
"[",
"'savefig.dpi'",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"settings",
".",
"figdir",
")",
":",
"os",
".",
"makedirs",
"(",
"settings",
".",
"figdir",
")",
"if",
"settings",
".",
"figdir",
"[",
"-",
"1",
"]",
"!=",
"'/'",
":",
"settings",
".",
"figdir",
"+=",
"'/'",
"if",
"ext",
"is",
"None",
":",
"ext",
"=",
"settings",
".",
"file_format_figs",
"filename",
"=",
"settings",
".",
"figdir",
"+",
"writekey",
"+",
"settings",
".",
"plot_suffix",
"+",
"'.'",
"+",
"ext",
"logg",
".",
"msg",
"(",
"'saving figure to file'",
",",
"filename",
",",
"v",
"=",
"1",
")",
"pl",
".",
"savefig",
"(",
"filename",
",",
"dpi",
"=",
"dpi",
",",
"bbox_inches",
"=",
"'tight'",
")"
] | python | Save current figure to file.
The `filename` is generated as follows:
filename = settings.figdir + writekey + settings.plot_suffix + '.' + settings.file_format_figs | false |
1,796,242 | def stop(self, actor, exc=None, exit_code=None):
"""Gracefully stop the ``actor``.
"""
if actor.state <= ACTOR_STATES.RUN:
# The actor has not started the stopping process. Starts it now.
actor.state = ACTOR_STATES.STOPPING
actor.event('start').clear()
if exc:
if not exit_code:
exit_code = getattr(exc, 'exit_code', 1)
if exit_code == 1:
exc_info = sys.exc_info()
if exc_info[0] is not None:
actor.logger.critical('Stopping', exc_info=exc_info)
else:
actor.logger.critical('Stopping: %s', exc)
elif exit_code == 2:
actor.logger.error(str(exc))
elif exit_code:
actor.stream.writeln(str(exc))
else:
if not exit_code:
exit_code = getattr(actor._loop, 'exit_code', 0)
#
# Fire stopping event
actor.exit_code = exit_code
actor.stopping_waiters = []
actor.event('stopping').fire()
if actor.stopping_waiters and actor._loop.is_running():
actor.logger.info('asynchronous stopping')
# make sure to return the future (used by arbiter for waiting)
return actor._loop.create_task(self._async_stopping(actor))
else:
if actor.logger:
actor.logger.info('stopping')
self._stop_actor(actor)
elif actor.stopped():
return self._stop_actor(actor, True) | [
"def",
"stop",
"(",
"self",
",",
"actor",
",",
"exc",
"=",
"None",
",",
"exit_code",
"=",
"None",
")",
":",
"if",
"actor",
".",
"state",
"<=",
"ACTOR_STATES",
".",
"RUN",
":",
"actor",
".",
"state",
"=",
"ACTOR_STATES",
".",
"STOPPING",
"actor",
".",
"event",
"(",
"'start'",
")",
".",
"clear",
"(",
")",
"if",
"exc",
":",
"if",
"not",
"exit_code",
":",
"exit_code",
"=",
"getattr",
"(",
"exc",
",",
"'exit_code'",
",",
"1",
")",
"if",
"exit_code",
"==",
"1",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"if",
"exc_info",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"actor",
".",
"logger",
".",
"critical",
"(",
"'Stopping'",
",",
"exc_info",
"=",
"exc_info",
")",
"else",
":",
"actor",
".",
"logger",
".",
"critical",
"(",
"'Stopping: %s'",
",",
"exc",
")",
"elif",
"exit_code",
"==",
"2",
":",
"actor",
".",
"logger",
".",
"error",
"(",
"str",
"(",
"exc",
")",
")",
"elif",
"exit_code",
":",
"actor",
".",
"stream",
".",
"writeln",
"(",
"str",
"(",
"exc",
")",
")",
"else",
":",
"if",
"not",
"exit_code",
":",
"exit_code",
"=",
"getattr",
"(",
"actor",
".",
"_loop",
",",
"'exit_code'",
",",
"0",
")",
"actor",
".",
"exit_code",
"=",
"exit_code",
"actor",
".",
"stopping_waiters",
"=",
"[",
"]",
"actor",
".",
"event",
"(",
"'stopping'",
")",
".",
"fire",
"(",
")",
"if",
"actor",
".",
"stopping_waiters",
"and",
"actor",
".",
"_loop",
".",
"is_running",
"(",
")",
":",
"actor",
".",
"logger",
".",
"info",
"(",
"'asynchronous stopping'",
")",
"return",
"actor",
".",
"_loop",
".",
"create_task",
"(",
"self",
".",
"_async_stopping",
"(",
"actor",
")",
")",
"else",
":",
"if",
"actor",
".",
"logger",
":",
"actor",
".",
"logger",
".",
"info",
"(",
"'stopping'",
")",
"self",
".",
"_stop_actor",
"(",
"actor",
")",
"elif",
"actor",
".",
"stopped",
"(",
")",
":",
"return",
"self",
".",
"_stop_actor",
"(",
"actor",
",",
"True",
")"
] | python | Gracefully stop the ``actor``. | false |
1,792,498 | def overloaded(self, client_address):
"""
Got too many requests.
Send back a (precompiled) XMLRPC response saying as much
"""
body = {
'status': False,
'indexing': False,
'lastblock': -1,
'error': 'overloaded',
'http_status': 429
}
body_str = json.dumps(body)
resp = 'HTTP/1.0 200 OK\r\nServer: BaseHTTP/0.3 Python/2.7.14+\r\nContent-type: text/xml\r\nContent-length: {}\r\n\r\n'.format(len(body_str))
resp += '<methodResponse><params><param><value><string>{}</string></value></param></params></methodResponse>'.format(body_str)
return resp | [
"def",
"overloaded",
"(",
"self",
",",
"client_address",
")",
":",
"body",
"=",
"{",
"'status'",
":",
"False",
",",
"'indexing'",
":",
"False",
",",
"'lastblock'",
":",
"-",
"1",
",",
"'error'",
":",
"'overloaded'",
",",
"'http_status'",
":",
"429",
"}",
"body_str",
"=",
"json",
".",
"dumps",
"(",
"body",
")",
"resp",
"=",
"'HTTP/1.0 200 OK\\r\\nServer: BaseHTTP/0.3 Python/2.7.14+\\r\\nContent-type: text/xml\\r\\nContent-length: {}\\r\\n\\r\\n'",
".",
"format",
"(",
"len",
"(",
"body_str",
")",
")",
"resp",
"+=",
"'<methodResponse><params><param><value><string>{}</string></value></param></params></methodResponse>'",
".",
"format",
"(",
"body_str",
")",
"return",
"resp"
] | python | Got too many requests.
Send back a (precompiled) XMLRPC response saying as much | false |
2,172,460 | def digestSession(self, mecha=MechanismSHA1):
"""
C_DigestInit/C_DigestUpdate/C_DigestKey/C_DigestFinal
:param mecha: the digesting mechanism to be used
(use `MechanismSHA1` for `CKM_SHA_1`)
:type mecha: :class:`Mechanism`
:return: A :class:`DigestSession` object
:rtype: DigestSession
"""
return DigestSession(self.lib, self.session, mecha) | [
"def",
"digestSession",
"(",
"self",
",",
"mecha",
"=",
"MechanismSHA1",
")",
":",
"return",
"DigestSession",
"(",
"self",
".",
"lib",
",",
"self",
".",
"session",
",",
"mecha",
")"
] | python | C_DigestInit/C_DigestUpdate/C_DigestKey/C_DigestFinal
:param mecha: the digesting mechanism to be used
(use `MechanismSHA1` for `CKM_SHA_1`)
:type mecha: :class:`Mechanism`
:return: A :class:`DigestSession` object
:rtype: DigestSession | false |
2,523,962 | def __init__(
self,
centres,
lenscale=Parameter(gamma(1.), Positive()),
regularizer=None
):
"""See this class's docstring."""
self.M, self.d = centres.shape
self.C = centres
self._init_lenscale(lenscale)
super(_LengthScaleBasis, self).__init__(regularizer) | [
"def",
"__init__",
"(",
"self",
",",
"centres",
",",
"lenscale",
"=",
"Parameter",
"(",
"gamma",
"(",
"1.",
")",
",",
"Positive",
"(",
")",
")",
",",
"regularizer",
"=",
"None",
")",
":",
"self",
".",
"M",
",",
"self",
".",
"d",
"=",
"centres",
".",
"shape",
"self",
".",
"C",
"=",
"centres",
"self",
".",
"_init_lenscale",
"(",
"lenscale",
")",
"super",
"(",
"_LengthScaleBasis",
",",
"self",
")",
".",
"__init__",
"(",
"regularizer",
")"
] | python | See this class's docstring. | false |
2,154,678 | def history_report(history, config=None, html=True):
"""
Test a model and save a history report.
Parameters
----------
history : memote.HistoryManager
The manager grants access to previous results.
config : dict, optional
The final test report configuration.
html : bool, optional
Whether to render the report as full HTML or JSON (default True).
"""
if config is None:
config = ReportConfiguration.load()
report = HistoryReport(history=history, configuration=config)
if html:
return report.render_html()
else:
return report.render_json() | [
"def",
"history_report",
"(",
"history",
",",
"config",
"=",
"None",
",",
"html",
"=",
"True",
")",
":",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"ReportConfiguration",
".",
"load",
"(",
")",
"report",
"=",
"HistoryReport",
"(",
"history",
"=",
"history",
",",
"configuration",
"=",
"config",
")",
"if",
"html",
":",
"return",
"report",
".",
"render_html",
"(",
")",
"else",
":",
"return",
"report",
".",
"render_json",
"(",
")"
] | python | Test a model and save a history report.
Parameters
----------
history : memote.HistoryManager
The manager grants access to previous results.
config : dict, optional
The final test report configuration.
html : bool, optional
Whether to render the report as full HTML or JSON (default True). | false |
2,349,323 | def cublasSsyr(handle, uplo, n, alpha, x, incx, A, lda):
"""
Rank-1 operation on real symmetric matrix.
"""
status = _libcublas.cublasSsyr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx, int(A), lda)
cublasCheckStatus(status) | [
"def",
"cublasSsyr",
"(",
"handle",
",",
"uplo",
",",
"n",
",",
"alpha",
",",
"x",
",",
"incx",
",",
"A",
",",
"lda",
")",
":",
"status",
"=",
"_libcublas",
".",
"cublasSsyr_v2",
"(",
"handle",
",",
"_CUBLAS_FILL_MODE",
"[",
"uplo",
"]",
",",
"n",
",",
"ctypes",
".",
"byref",
"(",
"ctypes",
".",
"c_float",
"(",
"alpha",
")",
")",
",",
"int",
"(",
"x",
")",
",",
"incx",
",",
"int",
"(",
"A",
")",
",",
"lda",
")",
"cublasCheckStatus",
"(",
"status",
")"
] | python | Rank-1 operation on real symmetric matrix. | false |
2,512,330 | def import_string(dotted_path):
"""
Import a dotted module path.
Returns the attribute/class designated by the last name in the path.
Raises ImportError if the import fails.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
raise ImportError('%s doesn\'t look like a valid path' % dotted_path)
module = __import__(module_path, fromlist=[class_name])
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name)
raise ImportError(msg) | [
"def",
"import_string",
"(",
"dotted_path",
")",
":",
"try",
":",
"module_path",
",",
"class_name",
"=",
"dotted_path",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"ImportError",
"(",
"'%s doesn\\'t look like a valid path'",
"%",
"dotted_path",
")",
"module",
"=",
"__import__",
"(",
"module_path",
",",
"fromlist",
"=",
"[",
"class_name",
"]",
")",
"try",
":",
"return",
"getattr",
"(",
"module",
",",
"class_name",
")",
"except",
"AttributeError",
":",
"msg",
"=",
"'Module \"%s\" does not define a \"%s\" attribute/class'",
"%",
"(",
"dotted_path",
",",
"class_name",
")",
"raise",
"ImportError",
"(",
"msg",
")"
] | python | Import a dotted module path.
Returns the attribute/class designated by the last name in the path.
Raises ImportError if the import fails. | false |
2,028,543 | def to_table(self, columns=None, exclude_columns=None):
"""
Create a `~astropy.table.QTable` of properties.
If ``columns`` or ``exclude_columns`` are not input, then the
`~astropy.table.QTable` will include a default list of
scalar-valued properties.
Parameters
----------
columns : str or list of str, optional
Names of columns, in order, to include in the output
`~astropy.table.QTable`. The allowed column names are any
of the attributes of `SourceProperties`.
exclude_columns : str or list of str, optional
Names of columns to exclude from the default properties list
in the output `~astropy.table.QTable`.
Returns
-------
table : `~astropy.table.QTable`
A single-row table of properties of the source.
"""
return _properties_table(self, columns=columns,
exclude_columns=exclude_columns) | [
"def",
"to_table",
"(",
"self",
",",
"columns",
"=",
"None",
",",
"exclude_columns",
"=",
"None",
")",
":",
"return",
"_properties_table",
"(",
"self",
",",
"columns",
"=",
"columns",
",",
"exclude_columns",
"=",
"exclude_columns",
")"
] | python | Create a `~astropy.table.QTable` of properties.
If ``columns`` or ``exclude_columns`` are not input, then the
`~astropy.table.QTable` will include a default list of
scalar-valued properties.
Parameters
----------
columns : str or list of str, optional
Names of columns, in order, to include in the output
`~astropy.table.QTable`. The allowed column names are any
of the attributes of `SourceProperties`.
exclude_columns : str or list of str, optional
Names of columns to exclude from the default properties list
in the output `~astropy.table.QTable`.
Returns
-------
table : `~astropy.table.QTable`
A single-row table of properties of the source. | false |
2,369,057 | def start_time_distance(item_a, item_b, max_value):
"""
Absolute difference between the starting times of each item.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
"""
start_time_diff = np.abs(item_a.times[0] - item_b.times[0])
return np.minimum(start_time_diff, max_value) / float(max_value) | [
"def",
"start_time_distance",
"(",
"item_a",
",",
"item_b",
",",
"max_value",
")",
":",
"start_time_diff",
"=",
"np",
".",
"abs",
"(",
"item_a",
".",
"times",
"[",
"0",
"]",
"-",
"item_b",
".",
"times",
"[",
"0",
"]",
")",
"return",
"np",
".",
"minimum",
"(",
"start_time_diff",
",",
"max_value",
")",
"/",
"float",
"(",
"max_value",
")"
] | python | Absolute difference between the starting times of each item.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1. | false |
2,577,159 | def p_scalar_namespace_name(p):
'''scalar : namespace_name
| NS_SEPARATOR namespace_name
| NAMESPACE NS_SEPARATOR namespace_name'''
if len(p) == 2:
p[0] = ast.Constant(p[1], lineno=p.lineno(1))
elif len(p) == 3:
p[0] = ast.Constant(p[1] + p[2], lineno=p.lineno(1))
else:
p[0] = ast.Constant(p[1] + p[2] + p[3], lineno=p.lineno(1)) | [
"def",
"p_scalar_namespace_name",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Constant",
"(",
"p",
"[",
"1",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"elif",
"len",
"(",
"p",
")",
"==",
"3",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Constant",
"(",
"p",
"[",
"1",
"]",
"+",
"p",
"[",
"2",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Constant",
"(",
"p",
"[",
"1",
"]",
"+",
"p",
"[",
"2",
"]",
"+",
"p",
"[",
"3",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | python | scalar : namespace_name
| NS_SEPARATOR namespace_name
| NAMESPACE NS_SEPARATOR namespace_name | false |
1,924,106 | def heatmap(data, scale, vmin=None, vmax=None, cmap=None, ax=None,
scientific=False, style='triangular', colorbar=True,
permutation=None, use_rgba=False, cbarlabel=None, cb_kwargs=None):
"""
Plots heatmap of given color values.
Parameters
----------
data: dictionary
A dictionary mapping the i, j polygon to the heatmap color, where
i + j + k = scale.
scale: Integer
The scale used to partition the simplex.
vmin: float, None
The minimum color value, used to normalize colors. Computed if absent.
vmax: float, None
The maximum color value, used to normalize colors. Computed if absent.
cmap: String or matplotlib.colors.Colormap, None
The name of the Matplotlib colormap to use.
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
scientific: Bool, False
Whether to use scientific notation for colorbar numbers.
style: String, "triangular"
The style of the heatmap, "triangular", "dual-triangular" or "hexagonal"
colorbar: bool, True
Show colorbar.
permutation: string, None
A permutation of the coordinates
use_rgba: bool, False
Use rgba color values
cbarlabel: string, None
Text label for the colorbar
cb_kwargs: dict
dict of kwargs to pass to colorbar
Returns
-------
ax: The matplotlib axis
"""
if not ax:
fig, ax = pyplot.subplots()
# If use_rgba, make the RGBA values numpy arrays so that they can
# be averaged.
if use_rgba:
for k, v in data.items():
data[k] = numpy.array(v)
else:
cmap = get_cmap(cmap)
if vmin is None:
vmin = min(data.values())
if vmax is None:
vmax = max(data.values())
style = style.lower()[0]
if style not in ["t", "h", 'd']:
raise ValueError("Heatmap style must be 'triangular', 'dual-triangular', or 'hexagonal'")
vertices_values = polygon_generator(data, scale, style,
permutation=permutation)
# Draw the polygons and color them
for vertices, value in vertices_values:
if value is None:
continue
if not use_rgba:
color = colormapper(value, vmin, vmax, cmap=cmap)
else:
color = value # rgba tuple (r,g,b,a) all in [0,1]
# Matplotlib wants a list of xs and a list of ys
xs, ys = unzip(vertices)
ax.fill(xs, ys, facecolor=color, edgecolor=color)
if not cb_kwargs:
cb_kwargs = dict()
if colorbar:
colorbar_hack(ax, vmin, vmax, cmap, scientific=scientific,
cbarlabel=cbarlabel, **cb_kwargs)
return ax | [
"def",
"heatmap",
"(",
"data",
",",
"scale",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"scientific",
"=",
"False",
",",
"style",
"=",
"'triangular'",
",",
"colorbar",
"=",
"True",
",",
"permutation",
"=",
"None",
",",
"use_rgba",
"=",
"False",
",",
"cbarlabel",
"=",
"None",
",",
"cb_kwargs",
"=",
"None",
")",
":",
"if",
"not",
"ax",
":",
"fig",
",",
"ax",
"=",
"pyplot",
".",
"subplots",
"(",
")",
"if",
"use_rgba",
":",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"data",
"[",
"k",
"]",
"=",
"numpy",
".",
"array",
"(",
"v",
")",
"else",
":",
"cmap",
"=",
"get_cmap",
"(",
"cmap",
")",
"if",
"vmin",
"is",
"None",
":",
"vmin",
"=",
"min",
"(",
"data",
".",
"values",
"(",
")",
")",
"if",
"vmax",
"is",
"None",
":",
"vmax",
"=",
"max",
"(",
"data",
".",
"values",
"(",
")",
")",
"style",
"=",
"style",
".",
"lower",
"(",
")",
"[",
"0",
"]",
"if",
"style",
"not",
"in",
"[",
"\"t\"",
",",
"\"h\"",
",",
"'d'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Heatmap style must be 'triangular', 'dual-triangular', or 'hexagonal'\"",
")",
"vertices_values",
"=",
"polygon_generator",
"(",
"data",
",",
"scale",
",",
"style",
",",
"permutation",
"=",
"permutation",
")",
"for",
"vertices",
",",
"value",
"in",
"vertices_values",
":",
"if",
"value",
"is",
"None",
":",
"continue",
"if",
"not",
"use_rgba",
":",
"color",
"=",
"colormapper",
"(",
"value",
",",
"vmin",
",",
"vmax",
",",
"cmap",
"=",
"cmap",
")",
"else",
":",
"color",
"=",
"value",
"xs",
",",
"ys",
"=",
"unzip",
"(",
"vertices",
")",
"ax",
".",
"fill",
"(",
"xs",
",",
"ys",
",",
"facecolor",
"=",
"color",
",",
"edgecolor",
"=",
"color",
")",
"if",
"not",
"cb_kwargs",
":",
"cb_kwargs",
"=",
"dict",
"(",
")",
"if",
"colorbar",
":",
"colorbar_hack",
"(",
"ax",
",",
"vmin",
",",
"vmax",
",",
"cmap",
",",
"scientific",
"=",
"scientific",
",",
"cbarlabel",
"=",
"cbarlabel",
",",
"**",
"cb_kwargs",
")",
"return",
"ax"
] | python | Plots heatmap of given color values.
Parameters
----------
data: dictionary
A dictionary mapping the i, j polygon to the heatmap color, where
i + j + k = scale.
scale: Integer
The scale used to partition the simplex.
vmin: float, None
The minimum color value, used to normalize colors. Computed if absent.
vmax: float, None
The maximum color value, used to normalize colors. Computed if absent.
cmap: String or matplotlib.colors.Colormap, None
The name of the Matplotlib colormap to use.
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
scientific: Bool, False
Whether to use scientific notation for colorbar numbers.
style: String, "triangular"
The style of the heatmap, "triangular", "dual-triangular" or "hexagonal"
colorbar: bool, True
Show colorbar.
permutation: string, None
A permutation of the coordinates
use_rgba: bool, False
Use rgba color values
cbarlabel: string, None
Text label for the colorbar
cb_kwargs: dict
dict of kwargs to pass to colorbar
Returns
-------
ax: The matplotlib axis | false |
2,566,570 | def get_volumes_for_instance(self, arg, device=None):
"""
Return all EC2 Volume objects attached to ``arg`` instance name or ID.
May specify ``device`` to limit to the (single) volume attached as that
device.
"""
instance = self.get(arg)
filters = {'attachment.instance-id': instance.id}
if device is not None:
filters['attachment.device'] = device
return self.get_all_volumes(filters=filters) | [
"def",
"get_volumes_for_instance",
"(",
"self",
",",
"arg",
",",
"device",
"=",
"None",
")",
":",
"instance",
"=",
"self",
".",
"get",
"(",
"arg",
")",
"filters",
"=",
"{",
"'attachment.instance-id'",
":",
"instance",
".",
"id",
"}",
"if",
"device",
"is",
"not",
"None",
":",
"filters",
"[",
"'attachment.device'",
"]",
"=",
"device",
"return",
"self",
".",
"get_all_volumes",
"(",
"filters",
"=",
"filters",
")"
] | python | Return all EC2 Volume objects attached to ``arg`` instance name or ID.
May specify ``device`` to limit to the (single) volume attached as that
device. | false |
2,358,764 | def __init__(self, shouldPack = True):
"""
Class representation of the C{IMAGE_SECTION_HEADER} structure.
@see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms680341%28v=vs.85%29.aspx}
@type shouldPack: bool
@param shouldPack: (Optional) If set to C{True}, the object will be packed. If set to C{False}, the object won't be packed.
"""
baseclasses.BaseStructClass.__init__(self, shouldPack)
self.name = datatypes.String('.travest') #: L{String} name.
self.misc = datatypes.DWORD(0x1000) #: L{DWORD} misc.
self.virtualAddress = datatypes.DWORD(0x1000) #: L{DWORD} virtualAddress.
self.sizeOfRawData = datatypes.DWORD(0x200) #: L{DWORD} sizeOfRawData.
self.pointerToRawData = datatypes.DWORD(0x400) #: L{DWORD} pointerToRawData.
self.pointerToRelocations = datatypes.DWORD(0) #: L{DWORD} pointerToRelocations.
self.pointerToLineNumbers = datatypes.DWORD(0) #: L{DWORD} pointerToLineNumbers.
self.numberOfRelocations = datatypes.WORD(0) #: L{WORD} numberOfRelocations.
self.numberOfLinesNumbers = datatypes.WORD(0) #: L{WORD} numberOfLinesNumbers.
self.characteristics = datatypes.DWORD(0x60000000) #: L{DWORD} characteristics.
self._attrsList = ["name","misc","virtualAddress","sizeOfRawData","pointerToRawData","pointerToRelocations",\
"pointerToLineNumbers","numberOfRelocations","numberOfLinesNumbers","characteristics"] | [
"def",
"__init__",
"(",
"self",
",",
"shouldPack",
"=",
"True",
")",
":",
"baseclasses",
".",
"BaseStructClass",
".",
"__init__",
"(",
"self",
",",
"shouldPack",
")",
"self",
".",
"name",
"=",
"datatypes",
".",
"String",
"(",
"'.travest'",
")",
"self",
".",
"misc",
"=",
"datatypes",
".",
"DWORD",
"(",
"0x1000",
")",
"self",
".",
"virtualAddress",
"=",
"datatypes",
".",
"DWORD",
"(",
"0x1000",
")",
"self",
".",
"sizeOfRawData",
"=",
"datatypes",
".",
"DWORD",
"(",
"0x200",
")",
"self",
".",
"pointerToRawData",
"=",
"datatypes",
".",
"DWORD",
"(",
"0x400",
")",
"self",
".",
"pointerToRelocations",
"=",
"datatypes",
".",
"DWORD",
"(",
"0",
")",
"self",
".",
"pointerToLineNumbers",
"=",
"datatypes",
".",
"DWORD",
"(",
"0",
")",
"self",
".",
"numberOfRelocations",
"=",
"datatypes",
".",
"WORD",
"(",
"0",
")",
"self",
".",
"numberOfLinesNumbers",
"=",
"datatypes",
".",
"WORD",
"(",
"0",
")",
"self",
".",
"characteristics",
"=",
"datatypes",
".",
"DWORD",
"(",
"0x60000000",
")",
"self",
".",
"_attrsList",
"=",
"[",
"\"name\"",
",",
"\"misc\"",
",",
"\"virtualAddress\"",
",",
"\"sizeOfRawData\"",
",",
"\"pointerToRawData\"",
",",
"\"pointerToRelocations\"",
",",
"\"pointerToLineNumbers\"",
",",
"\"numberOfRelocations\"",
",",
"\"numberOfLinesNumbers\"",
",",
"\"characteristics\"",
"]"
] | python | Class representation of the C{IMAGE_SECTION_HEADER} structure.
@see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms680341%28v=vs.85%29.aspx}
@type shouldPack: bool
@param shouldPack: (Optional) If set to C{True}, the object will be packed. If set to C{False}, the object won't be packed. | false |
2,326,311 | def page_crawled(self, page_resp):
"""Check if page has been crawled by hashing its text content.
Add new pages to the page cache.
Return whether page was found in cache.
"""
page_text = utils.parse_text(page_resp)
page_hash = utils.hash_text(''.join(page_text))
if page_hash not in self.page_cache:
utils.cache_page(self.page_cache, page_hash, self.args['cache_size'])
return False
return True | [
"def",
"page_crawled",
"(",
"self",
",",
"page_resp",
")",
":",
"page_text",
"=",
"utils",
".",
"parse_text",
"(",
"page_resp",
")",
"page_hash",
"=",
"utils",
".",
"hash_text",
"(",
"''",
".",
"join",
"(",
"page_text",
")",
")",
"if",
"page_hash",
"not",
"in",
"self",
".",
"page_cache",
":",
"utils",
".",
"cache_page",
"(",
"self",
".",
"page_cache",
",",
"page_hash",
",",
"self",
".",
"args",
"[",
"'cache_size'",
"]",
")",
"return",
"False",
"return",
"True"
] | python | Check if page has been crawled by hashing its text content.
Add new pages to the page cache.
Return whether page was found in cache. | false |
2,524,835 | def and_(self, other):
"""
Creates a new compound query using the
<orb.QueryCompound.Op.And> type.
:param other <Query> || <orb.QueryCompound>
:return <orb.QueryCompound>
:sa __and__
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1).and_((Q('name') == 'Eric')
|>>> print query
|(test is not 1 and name is Eric)
"""
if not isinstance(other, (Query, QueryCompound)) or other.isNull():
return self.copy()
elif not self:
return other.copy()
else:
return orb.QueryCompound(self, other, op=orb.QueryCompound.Op.And) | [
"def",
"and_",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"(",
"Query",
",",
"QueryCompound",
")",
")",
"or",
"other",
".",
"isNull",
"(",
")",
":",
"return",
"self",
".",
"copy",
"(",
")",
"elif",
"not",
"self",
":",
"return",
"other",
".",
"copy",
"(",
")",
"else",
":",
"return",
"orb",
".",
"QueryCompound",
"(",
"self",
",",
"other",
",",
"op",
"=",
"orb",
".",
"QueryCompound",
".",
"Op",
".",
"And",
")"
] | python | Creates a new compound query using the
<orb.QueryCompound.Op.And> type.
:param other <Query> || <orb.QueryCompound>
:return <orb.QueryCompound>
:sa __and__
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1).and_((Q('name') == 'Eric')
|>>> print query
|(test is not 1 and name is Eric) | false |
2,234,601 | def get_tests_from_description(name,
descriptions,
parsed=None):
"""
Recursively collect all tests in test description.
Args:
name (str): Yaml test description file name.
descriptions (dict): Dict of test description name
(key) and absolute file paths
(value).
parsed (list): List of description paths which have
already been parsed to prevent infinte
recursion.
Returns:
A list of expanded test files.
"""
tests = []
if not parsed:
parsed = []
description = descriptions.get(name, None)
if not description:
raise IpaUtilsException(
'Test description file with name: %s cannot be located.'
% name
)
if description in parsed:
return tests
parsed.append(description)
test_data = get_yaml_config(description)
if 'tests' in test_data:
tests += test_data.get('tests')
if 'include' in test_data:
for description_name in test_data.get('include'):
tests += get_tests_from_description(
description_name,
descriptions,
parsed
)
return tests | [
"def",
"get_tests_from_description",
"(",
"name",
",",
"descriptions",
",",
"parsed",
"=",
"None",
")",
":",
"tests",
"=",
"[",
"]",
"if",
"not",
"parsed",
":",
"parsed",
"=",
"[",
"]",
"description",
"=",
"descriptions",
".",
"get",
"(",
"name",
",",
"None",
")",
"if",
"not",
"description",
":",
"raise",
"IpaUtilsException",
"(",
"'Test description file with name: %s cannot be located.'",
"%",
"name",
")",
"if",
"description",
"in",
"parsed",
":",
"return",
"tests",
"parsed",
".",
"append",
"(",
"description",
")",
"test_data",
"=",
"get_yaml_config",
"(",
"description",
")",
"if",
"'tests'",
"in",
"test_data",
":",
"tests",
"+=",
"test_data",
".",
"get",
"(",
"'tests'",
")",
"if",
"'include'",
"in",
"test_data",
":",
"for",
"description_name",
"in",
"test_data",
".",
"get",
"(",
"'include'",
")",
":",
"tests",
"+=",
"get_tests_from_description",
"(",
"description_name",
",",
"descriptions",
",",
"parsed",
")",
"return",
"tests"
] | python | Recursively collect all tests in test description.
Args:
name (str): Yaml test description file name.
descriptions (dict): Dict of test description name
(key) and absolute file paths
(value).
parsed (list): List of description paths which have
already been parsed to prevent infinte
recursion.
Returns:
A list of expanded test files. | false |
1,924,580 | def stack(self, k=5, stratify=False, shuffle=True, seed=100, full_test=True):
"""Stack a single model. You should rarely be using this method. Use `ModelsPipeline.stack` instead.
Parameters
----------
k : int, default 5
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
full_test : bool, default True
If `True` then evaluate test dataset on the full data otherwise take the mean of every fold.
Returns
-------
`Dataset` with out of fold predictions.
"""
train = None
test = []
if self.use_cache:
pdict = {'k': k, 'stratify': stratify, 'shuffle': shuffle, 'seed': seed, 'full_test': full_test}
dhash = self._dhash(pdict)
c = Cache(dhash, prefix='s')
if c.available:
logger.info('Loading %s\'s stack results from cache.' % self._name)
train = c.retrieve('train')
test = c.retrieve('test')
y_train = c.retrieve('y_train')
return Dataset(X_train=train, y_train=y_train, X_test=test)
elif not self.dataset.loaded:
self.dataset.load()
for i, fold in enumerate(self.dataset.kfold(k, stratify=stratify, seed=seed, shuffle=shuffle)):
X_train, y_train, X_test, y_test, train_index, test_index = fold
logger.info('Calculating %s\'s fold #%s' % (self._name, i + 1))
if full_test:
prediction = reshape_1d(self._predict(X_train, y_train, X_test, y_test))
else:
xt_shape = X_test.shape[0]
x_t = concat(X_test, self.dataset.X_test)
prediction_concat = reshape_1d(self._predict(X_train, y_train, x_t))
prediction, prediction_test = tsplit(prediction_concat, xt_shape)
test.append(prediction_test)
if train is None:
train = np.zeros((self.dataset.X_train.shape[0], prediction.shape[1]))
train[test_index] = prediction
if full_test:
logger.info('Calculating %s\'s test data' % self._name)
test = self._predict(self.dataset.X_train, self.dataset.y_train, self.dataset.X_test)
else:
test = np.mean(test, axis=0)
test = reshape_1d(test)
if self.use_cache:
c.store('train', train)
c.store('test', test)
c.store('y_train', self.dataset.y_train)
return Dataset(X_train=train, y_train=self.dataset.y_train, X_test=test) | [
"def",
"stack",
"(",
"self",
",",
"k",
"=",
"5",
",",
"stratify",
"=",
"False",
",",
"shuffle",
"=",
"True",
",",
"seed",
"=",
"100",
",",
"full_test",
"=",
"True",
")",
":",
"train",
"=",
"None",
"test",
"=",
"[",
"]",
"if",
"self",
".",
"use_cache",
":",
"pdict",
"=",
"{",
"'k'",
":",
"k",
",",
"'stratify'",
":",
"stratify",
",",
"'shuffle'",
":",
"shuffle",
",",
"'seed'",
":",
"seed",
",",
"'full_test'",
":",
"full_test",
"}",
"dhash",
"=",
"self",
".",
"_dhash",
"(",
"pdict",
")",
"c",
"=",
"Cache",
"(",
"dhash",
",",
"prefix",
"=",
"'s'",
")",
"if",
"c",
".",
"available",
":",
"logger",
".",
"info",
"(",
"'Loading %s\\'s stack results from cache.'",
"%",
"self",
".",
"_name",
")",
"train",
"=",
"c",
".",
"retrieve",
"(",
"'train'",
")",
"test",
"=",
"c",
".",
"retrieve",
"(",
"'test'",
")",
"y_train",
"=",
"c",
".",
"retrieve",
"(",
"'y_train'",
")",
"return",
"Dataset",
"(",
"X_train",
"=",
"train",
",",
"y_train",
"=",
"y_train",
",",
"X_test",
"=",
"test",
")",
"elif",
"not",
"self",
".",
"dataset",
".",
"loaded",
":",
"self",
".",
"dataset",
".",
"load",
"(",
")",
"for",
"i",
",",
"fold",
"in",
"enumerate",
"(",
"self",
".",
"dataset",
".",
"kfold",
"(",
"k",
",",
"stratify",
"=",
"stratify",
",",
"seed",
"=",
"seed",
",",
"shuffle",
"=",
"shuffle",
")",
")",
":",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"train_index",
",",
"test_index",
"=",
"fold",
"logger",
".",
"info",
"(",
"'Calculating %s\\'s fold #%s'",
"%",
"(",
"self",
".",
"_name",
",",
"i",
"+",
"1",
")",
")",
"if",
"full_test",
":",
"prediction",
"=",
"reshape_1d",
"(",
"self",
".",
"_predict",
"(",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
")",
")",
"else",
":",
"xt_shape",
"=",
"X_test",
".",
"shape",
"[",
"0",
"]",
"x_t",
"=",
"concat",
"(",
"X_test",
",",
"self",
".",
"dataset",
".",
"X_test",
")",
"prediction_concat",
"=",
"reshape_1d",
"(",
"self",
".",
"_predict",
"(",
"X_train",
",",
"y_train",
",",
"x_t",
")",
")",
"prediction",
",",
"prediction_test",
"=",
"tsplit",
"(",
"prediction_concat",
",",
"xt_shape",
")",
"test",
".",
"append",
"(",
"prediction_test",
")",
"if",
"train",
"is",
"None",
":",
"train",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"dataset",
".",
"X_train",
".",
"shape",
"[",
"0",
"]",
",",
"prediction",
".",
"shape",
"[",
"1",
"]",
")",
")",
"train",
"[",
"test_index",
"]",
"=",
"prediction",
"if",
"full_test",
":",
"logger",
".",
"info",
"(",
"'Calculating %s\\'s test data'",
"%",
"self",
".",
"_name",
")",
"test",
"=",
"self",
".",
"_predict",
"(",
"self",
".",
"dataset",
".",
"X_train",
",",
"self",
".",
"dataset",
".",
"y_train",
",",
"self",
".",
"dataset",
".",
"X_test",
")",
"else",
":",
"test",
"=",
"np",
".",
"mean",
"(",
"test",
",",
"axis",
"=",
"0",
")",
"test",
"=",
"reshape_1d",
"(",
"test",
")",
"if",
"self",
".",
"use_cache",
":",
"c",
".",
"store",
"(",
"'train'",
",",
"train",
")",
"c",
".",
"store",
"(",
"'test'",
",",
"test",
")",
"c",
".",
"store",
"(",
"'y_train'",
",",
"self",
".",
"dataset",
".",
"y_train",
")",
"return",
"Dataset",
"(",
"X_train",
"=",
"train",
",",
"y_train",
"=",
"self",
".",
"dataset",
".",
"y_train",
",",
"X_test",
"=",
"test",
")"
] | python | Stack a single model. You should rarely be using this method. Use `ModelsPipeline.stack` instead.
Parameters
----------
k : int, default 5
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
full_test : bool, default True
If `True` then evaluate test dataset on the full data otherwise take the mean of every fold.
Returns
-------
`Dataset` with out of fold predictions. | false |
2,501,072 | def PKCS_GET_query(self, req_hook, req_args):
''' Generic GET query method '''
# GET request methods only require sessionTokens
headers = {'content-type': 'application/json',
'sessionToken': self.__session__}
# HTTP GET query method using requests module
try:
if req_args is None:
response = requests.get(self.__url__ + req_hook,
headers=headers,
cert=(self.__crt__, self.__key__),
verify=True)
else:
response = requests.get(self.__url__ + req_hook + str(req_args),
headers=headers,
cert=(self.__crt__, self.__key__),
verify=True)
except requests.exceptions.RequestException as err:
self.logger.error(err)
return '500', 'Internal Error in PKCS_RESTful.GET_query()'
# return the token
self.logger.debug('%s: %s' % (response.status_code, response.text))
return response.status_code, response.text | [
"def",
"PKCS_GET_query",
"(",
"self",
",",
"req_hook",
",",
"req_args",
")",
":",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
",",
"'sessionToken'",
":",
"self",
".",
"__session__",
"}",
"try",
":",
"if",
"req_args",
"is",
"None",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"__url__",
"+",
"req_hook",
",",
"headers",
"=",
"headers",
",",
"cert",
"=",
"(",
"self",
".",
"__crt__",
",",
"self",
".",
"__key__",
")",
",",
"verify",
"=",
"True",
")",
"else",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"__url__",
"+",
"req_hook",
"+",
"str",
"(",
"req_args",
")",
",",
"headers",
"=",
"headers",
",",
"cert",
"=",
"(",
"self",
".",
"__crt__",
",",
"self",
".",
"__key__",
")",
",",
"verify",
"=",
"True",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"err",
":",
"self",
".",
"logger",
".",
"error",
"(",
"err",
")",
"return",
"'500'",
",",
"'Internal Error in PKCS_RESTful.GET_query()'",
"self",
".",
"logger",
".",
"debug",
"(",
"'%s: %s'",
"%",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
")",
"return",
"response",
".",
"status_code",
",",
"response",
".",
"text"
] | python | Generic GET query method | false |
1,760,454 | def _install_container_bcbio_system(datadir):
"""Install limited bcbio_system.yaml file for setting core and memory usage.
Adds any non-specific programs to the exposed bcbio_system.yaml file, only
when upgrade happening inside a docker container.
"""
base_file = os.path.join(datadir, "config", "bcbio_system.yaml")
if not os.path.exists(base_file):
return
expose_file = os.path.join(datadir, "galaxy", "bcbio_system.yaml")
expose = set(["memory", "cores", "jvm_opts"])
with open(base_file) as in_handle:
config = yaml.safe_load(in_handle)
if os.path.exists(expose_file):
with open(expose_file) as in_handle:
expose_config = yaml.safe_load(in_handle)
else:
expose_config = {"resources": {}}
for pname, vals in config["resources"].items():
expose_vals = {}
for k, v in vals.items():
if k in expose:
expose_vals[k] = v
if len(expose_vals) > 0 and pname not in expose_config["resources"]:
expose_config["resources"][pname] = expose_vals
if expose_file and os.path.exists(os.path.dirname(expose_file)):
with open(expose_file, "w") as out_handle:
yaml.safe_dump(expose_config, out_handle, default_flow_style=False, allow_unicode=False)
return expose_file | [
"def",
"_install_container_bcbio_system",
"(",
"datadir",
")",
":",
"base_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"datadir",
",",
"\"config\"",
",",
"\"bcbio_system.yaml\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"base_file",
")",
":",
"return",
"expose_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"datadir",
",",
"\"galaxy\"",
",",
"\"bcbio_system.yaml\"",
")",
"expose",
"=",
"set",
"(",
"[",
"\"memory\"",
",",
"\"cores\"",
",",
"\"jvm_opts\"",
"]",
")",
"with",
"open",
"(",
"base_file",
")",
"as",
"in_handle",
":",
"config",
"=",
"yaml",
".",
"safe_load",
"(",
"in_handle",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"expose_file",
")",
":",
"with",
"open",
"(",
"expose_file",
")",
"as",
"in_handle",
":",
"expose_config",
"=",
"yaml",
".",
"safe_load",
"(",
"in_handle",
")",
"else",
":",
"expose_config",
"=",
"{",
"\"resources\"",
":",
"{",
"}",
"}",
"for",
"pname",
",",
"vals",
"in",
"config",
"[",
"\"resources\"",
"]",
".",
"items",
"(",
")",
":",
"expose_vals",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"vals",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"expose",
":",
"expose_vals",
"[",
"k",
"]",
"=",
"v",
"if",
"len",
"(",
"expose_vals",
")",
">",
"0",
"and",
"pname",
"not",
"in",
"expose_config",
"[",
"\"resources\"",
"]",
":",
"expose_config",
"[",
"\"resources\"",
"]",
"[",
"pname",
"]",
"=",
"expose_vals",
"if",
"expose_file",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"expose_file",
")",
")",
":",
"with",
"open",
"(",
"expose_file",
",",
"\"w\"",
")",
"as",
"out_handle",
":",
"yaml",
".",
"safe_dump",
"(",
"expose_config",
",",
"out_handle",
",",
"default_flow_style",
"=",
"False",
",",
"allow_unicode",
"=",
"False",
")",
"return",
"expose_file"
] | python | Install limited bcbio_system.yaml file for setting core and memory usage.
Adds any non-specific programs to the exposed bcbio_system.yaml file, only
when upgrade happening inside a docker container. | false |
1,578,395 | def create_transform_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to transform job creation
"""
self.check_s3_url(config['TransformInput']['DataSource']['S3DataSource']['S3Uri'])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(config['TransformJobName'],
'TransformJobStatus',
self.describe_transform_job,
check_interval, max_ingestion_time
)
return response | [
"def",
"create_transform_job",
"(",
"self",
",",
"config",
",",
"wait_for_completion",
"=",
"True",
",",
"check_interval",
"=",
"30",
",",
"max_ingestion_time",
"=",
"None",
")",
":",
"self",
".",
"check_s3_url",
"(",
"config",
"[",
"'TransformInput'",
"]",
"[",
"'DataSource'",
"]",
"[",
"'S3DataSource'",
"]",
"[",
"'S3Uri'",
"]",
")",
"response",
"=",
"self",
".",
"get_conn",
"(",
")",
".",
"create_transform_job",
"(",
"**",
"config",
")",
"if",
"wait_for_completion",
":",
"self",
".",
"check_status",
"(",
"config",
"[",
"'TransformJobName'",
"]",
",",
"'TransformJobStatus'",
",",
"self",
".",
"describe_transform_job",
",",
"check_interval",
",",
"max_ingestion_time",
")",
"return",
"response"
] | python | Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to transform job creation | false |
2,173,776 | def simplified_edges(self):
"""
A generator for getting all of the edges without consuming extra
memory.
"""
for group, edgelist in self.edges.items():
for u, v, d in edgelist:
yield (u, v) | [
"def",
"simplified_edges",
"(",
"self",
")",
":",
"for",
"group",
",",
"edgelist",
"in",
"self",
".",
"edges",
".",
"items",
"(",
")",
":",
"for",
"u",
",",
"v",
",",
"d",
"in",
"edgelist",
":",
"yield",
"(",
"u",
",",
"v",
")"
] | python | A generator for getting all of the edges without consuming extra
memory. | false |
2,366,921 | def __init__(self, status=SetNodeNameConfirmationStatus.OK, node_id=0):
"""Init Frame."""
super().__init__(Command.GW_SET_NODE_NAME_CFM)
self.status = status
self.node_id = node_id | [
"def",
"__init__",
"(",
"self",
",",
"status",
"=",
"SetNodeNameConfirmationStatus",
".",
"OK",
",",
"node_id",
"=",
"0",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"Command",
".",
"GW_SET_NODE_NAME_CFM",
")",
"self",
".",
"status",
"=",
"status",
"self",
".",
"node_id",
"=",
"node_id"
] | python | Init Frame. | false |
2,201,369 | def get_read_type(self, bam_file, n=10):
"""
Gets the read type (single, paired) and length of bam file.
:param str bam_file: Bam file to determine read attributes.
:param int n: Number of lines to read from bam file.
:return str, int: tuple of read type and read length
"""
from collections import Counter
try:
p = subprocess.Popen([self.tools.samtools, 'view', bam_file],
stdout=subprocess.PIPE)
# Count paired alignments
paired = 0
read_length = Counter()
while n > 0:
line = p.stdout.next().split("\t")
flag = int(line[1])
read_length[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
n -= 1
p.kill()
except IOError("Cannot read provided bam file.") as e:
raise e
# Get most abundant read read_length
read_length = sorted(read_length)[-1]
# If at least half is paired, return True
if paired > (n / 2.):
return "PE", read_length
else:
return "SE", read_length | [
"def",
"get_read_type",
"(",
"self",
",",
"bam_file",
",",
"n",
"=",
"10",
")",
":",
"from",
"collections",
"import",
"Counter",
"try",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"self",
".",
"tools",
".",
"samtools",
",",
"'view'",
",",
"bam_file",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"paired",
"=",
"0",
"read_length",
"=",
"Counter",
"(",
")",
"while",
"n",
">",
"0",
":",
"line",
"=",
"p",
".",
"stdout",
".",
"next",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"flag",
"=",
"int",
"(",
"line",
"[",
"1",
"]",
")",
"read_length",
"[",
"len",
"(",
"line",
"[",
"9",
"]",
")",
"]",
"+=",
"1",
"if",
"1",
"&",
"flag",
":",
"paired",
"+=",
"1",
"n",
"-=",
"1",
"p",
".",
"kill",
"(",
")",
"except",
"IOError",
"(",
"\"Cannot read provided bam file.\"",
")",
"as",
"e",
":",
"raise",
"e",
"read_length",
"=",
"sorted",
"(",
"read_length",
")",
"[",
"-",
"1",
"]",
"if",
"paired",
">",
"(",
"n",
"/",
"2.",
")",
":",
"return",
"\"PE\"",
",",
"read_length",
"else",
":",
"return",
"\"SE\"",
",",
"read_length"
] | python | Gets the read type (single, paired) and length of bam file.
:param str bam_file: Bam file to determine read attributes.
:param int n: Number of lines to read from bam file.
:return str, int: tuple of read type and read length | false |
1,906,266 | def initialize_callbacks(self):
"""Initializes all callbacks and save the result in the
``callbacks_`` attribute.
Both ``default_callbacks`` and ``callbacks`` are used (in that
order). Callbacks may either be initialized or not, and if
they don't have a name, the name is inferred from the class
name. The ``initialize`` method is called on all callbacks.
The final result will be a list of tuples, where each tuple
consists of a name and an initialized callback. If names are
not unique, a ValueError is raised.
"""
callbacks_ = []
class Dummy:
# We cannot use None as dummy value since None is a
# legitimate value to be set.
pass
for name, cb in self._uniquely_named_callbacks():
# check if callback itself is changed
param_callback = getattr(self, 'callbacks__' + name, Dummy)
if param_callback is not Dummy: # callback itself was set
cb = param_callback
# below: check for callback params
# don't set a parameter for non-existing callback
params = self._get_params_for('callbacks__{}'.format(name))
if (cb is None) and params:
raise ValueError("Trying to set a parameter for callback {} "
"which does not exist.".format(name))
if cb is None:
continue
if isinstance(cb, type): # uninitialized:
cb = cb(**params)
else:
cb.set_params(**params)
cb.initialize()
callbacks_.append((name, cb))
self.callbacks_ = callbacks_
return self | [
"def",
"initialize_callbacks",
"(",
"self",
")",
":",
"callbacks_",
"=",
"[",
"]",
"class",
"Dummy",
":",
"pass",
"for",
"name",
",",
"cb",
"in",
"self",
".",
"_uniquely_named_callbacks",
"(",
")",
":",
"param_callback",
"=",
"getattr",
"(",
"self",
",",
"'callbacks__'",
"+",
"name",
",",
"Dummy",
")",
"if",
"param_callback",
"is",
"not",
"Dummy",
":",
"cb",
"=",
"param_callback",
"params",
"=",
"self",
".",
"_get_params_for",
"(",
"'callbacks__{}'",
".",
"format",
"(",
"name",
")",
")",
"if",
"(",
"cb",
"is",
"None",
")",
"and",
"params",
":",
"raise",
"ValueError",
"(",
"\"Trying to set a parameter for callback {} \"",
"\"which does not exist.\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"cb",
"is",
"None",
":",
"continue",
"if",
"isinstance",
"(",
"cb",
",",
"type",
")",
":",
"cb",
"=",
"cb",
"(",
"**",
"params",
")",
"else",
":",
"cb",
".",
"set_params",
"(",
"**",
"params",
")",
"cb",
".",
"initialize",
"(",
")",
"callbacks_",
".",
"append",
"(",
"(",
"name",
",",
"cb",
")",
")",
"self",
".",
"callbacks_",
"=",
"callbacks_",
"return",
"self"
] | python | Initializes all callbacks and save the result in the
``callbacks_`` attribute.
Both ``default_callbacks`` and ``callbacks`` are used (in that
order). Callbacks may either be initialized or not, and if
they don't have a name, the name is inferred from the class
name. The ``initialize`` method is called on all callbacks.
The final result will be a list of tuples, where each tuple
consists of a name and an initialized callback. If names are
not unique, a ValueError is raised. | false |
1,640,977 | def directory(name, profile=None, **kwargs):
'''
Create a directory in etcd.
name
The etcd directory name, for example: ``/foo/bar/baz``.
profile
Optional, defaults to ``None``. Sets the etcd profile to use which has
been defined in the Salt Master config.
.. code-block:: yaml
my_etd_config:
etcd.host: 127.0.0.1
etcd.port: 4001
'''
created = False
rtn = {
'name': name,
'comment': 'Directory exists',
'result': True,
'changes': {}
}
current = __salt__['etcd.get'](name, profile=profile, recurse=True, **kwargs)
if not current:
created = True
result = __salt__['etcd.set'](name, None, directory=True, profile=profile, **kwargs)
if result and result != current:
if created:
rtn['comment'] = 'New directory created'
rtn['changes'] = {
name: 'Created'
}
return rtn | [
"def",
"directory",
"(",
"name",
",",
"profile",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"created",
"=",
"False",
"rtn",
"=",
"{",
"'name'",
":",
"name",
",",
"'comment'",
":",
"'Directory exists'",
",",
"'result'",
":",
"True",
",",
"'changes'",
":",
"{",
"}",
"}",
"current",
"=",
"__salt__",
"[",
"'etcd.get'",
"]",
"(",
"name",
",",
"profile",
"=",
"profile",
",",
"recurse",
"=",
"True",
",",
"**",
"kwargs",
")",
"if",
"not",
"current",
":",
"created",
"=",
"True",
"result",
"=",
"__salt__",
"[",
"'etcd.set'",
"]",
"(",
"name",
",",
"None",
",",
"directory",
"=",
"True",
",",
"profile",
"=",
"profile",
",",
"**",
"kwargs",
")",
"if",
"result",
"and",
"result",
"!=",
"current",
":",
"if",
"created",
":",
"rtn",
"[",
"'comment'",
"]",
"=",
"'New directory created'",
"rtn",
"[",
"'changes'",
"]",
"=",
"{",
"name",
":",
"'Created'",
"}",
"return",
"rtn"
] | python | Create a directory in etcd.
name
The etcd directory name, for example: ``/foo/bar/baz``.
profile
Optional, defaults to ``None``. Sets the etcd profile to use which has
been defined in the Salt Master config.
.. code-block:: yaml
my_etd_config:
etcd.host: 127.0.0.1
etcd.port: 4001 | false |
1,733,008 | def get(self, txn_id):
"""Returns the TransactionReceipt
Args:
txn_id (str): the id of the transaction for which the receipt
should be retrieved.
Returns:
TransactionReceipt: The receipt for the given transaction id.
Raises:
KeyError: if the transaction id is unknown.
"""
if txn_id not in self._receipt_db:
raise KeyError('Unknown transaction id {}'.format(txn_id))
txn_receipt_bytes = self._receipt_db[txn_id]
txn_receipt = TransactionReceipt()
txn_receipt.ParseFromString(txn_receipt_bytes)
return txn_receipt | [
"def",
"get",
"(",
"self",
",",
"txn_id",
")",
":",
"if",
"txn_id",
"not",
"in",
"self",
".",
"_receipt_db",
":",
"raise",
"KeyError",
"(",
"'Unknown transaction id {}'",
".",
"format",
"(",
"txn_id",
")",
")",
"txn_receipt_bytes",
"=",
"self",
".",
"_receipt_db",
"[",
"txn_id",
"]",
"txn_receipt",
"=",
"TransactionReceipt",
"(",
")",
"txn_receipt",
".",
"ParseFromString",
"(",
"txn_receipt_bytes",
")",
"return",
"txn_receipt"
] | python | Returns the TransactionReceipt
Args:
txn_id (str): the id of the transaction for which the receipt
should be retrieved.
Returns:
TransactionReceipt: The receipt for the given transaction id.
Raises:
KeyError: if the transaction id is unknown. | false |
2,124,379 | def memoize_nullary(f):
"""
Memoizes a function that takes no arguments. The memoization lasts only as
long as we hold a reference to the returned function.
"""
def func():
if not hasattr(func, 'retval'):
func.retval = f()
return func.retval
return func | [
"def",
"memoize_nullary",
"(",
"f",
")",
":",
"def",
"func",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"func",
",",
"'retval'",
")",
":",
"func",
".",
"retval",
"=",
"f",
"(",
")",
"return",
"func",
".",
"retval",
"return",
"func"
] | python | Memoizes a function that takes no arguments. The memoization lasts only as
long as we hold a reference to the returned function. | false |
2,502,993 | def diff(old, new):
"""
Returns differences of two network topologies old and new
in NetJSON NetworkGraph compatible format
"""
protocol = new.protocol
version = new.version
revision = new.revision
metric = new.metric
# calculate differences
in_both = _find_unchanged(old.graph, new.graph)
added_nodes, added_edges = _make_diff(old.graph, new.graph, in_both)
removed_nodes, removed_edges = _make_diff(new.graph, old.graph, in_both)
changed_edges = _find_changed(old.graph, new.graph, in_both)
# create netjson objects
# or assign None if no changes
if added_nodes.nodes() or added_edges.edges():
added = _netjson_networkgraph(protocol, version, revision, metric,
added_nodes.nodes(data=True),
added_edges.edges(data=True),
dict=True)
else:
added = None
if removed_nodes.nodes() or removed_edges.edges():
removed = _netjson_networkgraph(protocol, version, revision, metric,
removed_nodes.nodes(data=True),
removed_edges.edges(data=True),
dict=True)
else:
removed = None
if changed_edges:
changed = _netjson_networkgraph(protocol, version, revision, metric,
[],
changed_edges,
dict=True)
else:
changed = None
return OrderedDict((
('added', added),
('removed', removed),
('changed', changed)
)) | [
"def",
"diff",
"(",
"old",
",",
"new",
")",
":",
"protocol",
"=",
"new",
".",
"protocol",
"version",
"=",
"new",
".",
"version",
"revision",
"=",
"new",
".",
"revision",
"metric",
"=",
"new",
".",
"metric",
"in_both",
"=",
"_find_unchanged",
"(",
"old",
".",
"graph",
",",
"new",
".",
"graph",
")",
"added_nodes",
",",
"added_edges",
"=",
"_make_diff",
"(",
"old",
".",
"graph",
",",
"new",
".",
"graph",
",",
"in_both",
")",
"removed_nodes",
",",
"removed_edges",
"=",
"_make_diff",
"(",
"new",
".",
"graph",
",",
"old",
".",
"graph",
",",
"in_both",
")",
"changed_edges",
"=",
"_find_changed",
"(",
"old",
".",
"graph",
",",
"new",
".",
"graph",
",",
"in_both",
")",
"if",
"added_nodes",
".",
"nodes",
"(",
")",
"or",
"added_edges",
".",
"edges",
"(",
")",
":",
"added",
"=",
"_netjson_networkgraph",
"(",
"protocol",
",",
"version",
",",
"revision",
",",
"metric",
",",
"added_nodes",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
",",
"added_edges",
".",
"edges",
"(",
"data",
"=",
"True",
")",
",",
"dict",
"=",
"True",
")",
"else",
":",
"added",
"=",
"None",
"if",
"removed_nodes",
".",
"nodes",
"(",
")",
"or",
"removed_edges",
".",
"edges",
"(",
")",
":",
"removed",
"=",
"_netjson_networkgraph",
"(",
"protocol",
",",
"version",
",",
"revision",
",",
"metric",
",",
"removed_nodes",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
",",
"removed_edges",
".",
"edges",
"(",
"data",
"=",
"True",
")",
",",
"dict",
"=",
"True",
")",
"else",
":",
"removed",
"=",
"None",
"if",
"changed_edges",
":",
"changed",
"=",
"_netjson_networkgraph",
"(",
"protocol",
",",
"version",
",",
"revision",
",",
"metric",
",",
"[",
"]",
",",
"changed_edges",
",",
"dict",
"=",
"True",
")",
"else",
":",
"changed",
"=",
"None",
"return",
"OrderedDict",
"(",
"(",
"(",
"'added'",
",",
"added",
")",
",",
"(",
"'removed'",
",",
"removed",
")",
",",
"(",
"'changed'",
",",
"changed",
")",
")",
")"
] | python | Returns differences of two network topologies old and new
in NetJSON NetworkGraph compatible format | false |
2,561,628 | def delete_url(self, url, token=''):
"""
Returns a delete resquest object taking in a url and user token.
Arguments:
url (str): The url to make post to
token (str): The authentication token
Returns:
obj: Delete request object
"""
if (token == ''):
token = self._user_token
return requests.delete(url,
headers={
'Authorization': 'Token {}'.format(token)},
verify=False,) | [
"def",
"delete_url",
"(",
"self",
",",
"url",
",",
"token",
"=",
"''",
")",
":",
"if",
"(",
"token",
"==",
"''",
")",
":",
"token",
"=",
"self",
".",
"_user_token",
"return",
"requests",
".",
"delete",
"(",
"url",
",",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Token {}'",
".",
"format",
"(",
"token",
")",
"}",
",",
"verify",
"=",
"False",
",",
")"
] | python | Returns a delete resquest object taking in a url and user token.
Arguments:
url (str): The url to make post to
token (str): The authentication token
Returns:
obj: Delete request object | false |
1,913,919 | def load_image(self, file_path, redraw=True):
"""
Accepts a path to an 8 x 8 image file and updates the LED matrix with
the image
"""
if not os.path.exists(file_path):
raise IOError('%s not found' % file_path)
img = Image.open(file_path).convert('RGB')
pixel_list = list(map(list, img.getdata()))
if redraw:
self.set_pixels(pixel_list)
return pixel_list | [
"def",
"load_image",
"(",
"self",
",",
"file_path",
",",
"redraw",
"=",
"True",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"IOError",
"(",
"'%s not found'",
"%",
"file_path",
")",
"img",
"=",
"Image",
".",
"open",
"(",
"file_path",
")",
".",
"convert",
"(",
"'RGB'",
")",
"pixel_list",
"=",
"list",
"(",
"map",
"(",
"list",
",",
"img",
".",
"getdata",
"(",
")",
")",
")",
"if",
"redraw",
":",
"self",
".",
"set_pixels",
"(",
"pixel_list",
")",
"return",
"pixel_list"
] | python | Accepts a path to an 8 x 8 image file and updates the LED matrix with
the image | false |
1,926,756 | def passthrough(args):
"""
%prog passthrough chrY.vcf chrY.new.vcf
Pass through Y and MT vcf.
"""
p = OptionParser(passthrough.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, newvcffile = args
fp = open(vcffile)
fw = open(newvcffile, "w")
gg = ["0/0", "0/1", "1/1"]
for row in fp:
if row[0] == "#":
print(row.strip(), file=fw)
continue
v = VcfLine(row)
v.filter = "PASS"
v.format = "GT:GP"
probs = [0] * 3
probs[gg.index(v.genotype)] = 1
v.genotype = v.genotype.replace("/", "|") + \
":{0}".format(",".join("{0:.3f}".format(x) for x in probs))
print(v, file=fw)
fw.close() | [
"def",
"passthrough",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"passthrough",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"vcffile",
",",
"newvcffile",
"=",
"args",
"fp",
"=",
"open",
"(",
"vcffile",
")",
"fw",
"=",
"open",
"(",
"newvcffile",
",",
"\"w\"",
")",
"gg",
"=",
"[",
"\"0/0\"",
",",
"\"0/1\"",
",",
"\"1/1\"",
"]",
"for",
"row",
"in",
"fp",
":",
"if",
"row",
"[",
"0",
"]",
"==",
"\"#\"",
":",
"print",
"(",
"row",
".",
"strip",
"(",
")",
",",
"file",
"=",
"fw",
")",
"continue",
"v",
"=",
"VcfLine",
"(",
"row",
")",
"v",
".",
"filter",
"=",
"\"PASS\"",
"v",
".",
"format",
"=",
"\"GT:GP\"",
"probs",
"=",
"[",
"0",
"]",
"*",
"3",
"probs",
"[",
"gg",
".",
"index",
"(",
"v",
".",
"genotype",
")",
"]",
"=",
"1",
"v",
".",
"genotype",
"=",
"v",
".",
"genotype",
".",
"replace",
"(",
"\"/\"",
",",
"\"|\"",
")",
"+",
"\":{0}\"",
".",
"format",
"(",
"\",\"",
".",
"join",
"(",
"\"{0:.3f}\"",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"probs",
")",
")",
"print",
"(",
"v",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")"
] | python | %prog passthrough chrY.vcf chrY.new.vcf
Pass through Y and MT vcf. | false |
1,986,234 | def context(fname, node):
"""
Context manager managing exceptions and adding line number of the
current node and name of the current file to the error message.
:param fname: the current file being processed
:param node: the current node being processed
"""
try:
yield node
except Exception:
etype, exc, tb = sys.exc_info()
msg = 'node %s: %s, line %s of %s' % (
striptag(node.tag), exc, getattr(node, 'lineno', '?'), fname)
raise_(etype, msg, tb) | [
"def",
"context",
"(",
"fname",
",",
"node",
")",
":",
"try",
":",
"yield",
"node",
"except",
"Exception",
":",
"etype",
",",
"exc",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"msg",
"=",
"'node %s: %s, line %s of %s'",
"%",
"(",
"striptag",
"(",
"node",
".",
"tag",
")",
",",
"exc",
",",
"getattr",
"(",
"node",
",",
"'lineno'",
",",
"'?'",
")",
",",
"fname",
")",
"raise_",
"(",
"etype",
",",
"msg",
",",
"tb",
")"
] | python | Context manager managing exceptions and adding line number of the
current node and name of the current file to the error message.
:param fname: the current file being processed
:param node: the current node being processed | false |
1,580,874 | def export_datasource_schema(back_references):
"""Export datasource YAML schema to stdout"""
data = dict_import_export.export_schema_to_dict(
back_references=back_references)
yaml.safe_dump(data, stdout, default_flow_style=False) | [
"def",
"export_datasource_schema",
"(",
"back_references",
")",
":",
"data",
"=",
"dict_import_export",
".",
"export_schema_to_dict",
"(",
"back_references",
"=",
"back_references",
")",
"yaml",
".",
"safe_dump",
"(",
"data",
",",
"stdout",
",",
"default_flow_style",
"=",
"False",
")"
] | python | Export datasource YAML schema to stdout | false |
2,464,142 | def parserunstats(self):
"""Parses the XML run statistics file (GenerateFASTQRunStatistics.xml). In some cases, the file is not
available. Equivalent data can be pulled from Basespace.Generate a text file name indexingQC.txt containing
the copied tables from the Indexing QC tab of the run on Basespace"""
# metadata = GenObject()
# If the default file GenerateFASTQRunStatistics.xml is present, parse it
if os.path.isfile(os.path.join(self.path, "GenerateFASTQRunStatistics.xml")):
# Create a list of keys for which values are to be extracted
datalist = ["SampleNumber", "SampleID", "SampleName", "NumberOfClustersPF"]
# Load the file as an xml ElementTree object
runstatistics = ElementTree.ElementTree(file=os.path.join(self.path, "GenerateFASTQRunStatistics.xml"))
# Iterate through all the elements in the object
# .iterfind() allow for the matching and iterating though matches
# This is stored as a float to allow subsequent calculations
tclusterspf = [float(element.text) for element in runstatistics.iterfind("RunStats/NumberOfClustersPF")][0]
# Iterate through all the elements (strains) in the OverallSamples/SummarizedSampleStatistics category
for element in runstatistics.iterfind("OverallSamples/SummarizedSampleStatistics"):
# List comprehension. Essentially iterate through each element for each category in datalist:
# (element.iter(category) and pull out the value for nestedelement
straindata = [nestedelement.text for category in datalist for nestedelement in element.iter(category)]
# Try and replicate the Illumina rules to create file names from "Sample_Name"
samplename = samplenamer(straindata, 1)
# Calculate the percentage of clusters associated with each strain
# noinspection PyTypeChecker
percentperstrain = "{:.2f}".format((float(straindata[3]) / tclusterspf * 100))
try:
# Use the sample number -1 as the index in the list of objects created in parsesamplesheet
strainindex = int(straindata[0]) - 1
# Set run to the .run object of self.samples[index]
run = self.samples[strainindex].run
# An assertion that compares the sample computer above to the previously entered sample name
# to ensure that the samples are the same
assert self.samples[strainindex].name == samplename, \
"Sample name does not match object name {0!r:s}".format(straindata[1])
# Add the appropriate values to the strain metadata object
run.SampleNumber = straindata[0]
run.NumberofClustersPF = straindata[3]
run.TotalClustersinRun = tclusterspf
run.PercentOfClusters = percentperstrain
run.flowcell = self.flowcell
run.instrument = self.instrument
except IndexError:
pass
elif os.path.isfile(os.path.join(self.path, 'indexingQC.txt')):
# Grab the first element from the second line in the file
tclusterspf = float(getline(os.path.join(self.path, "indexingQC.txt"), 2).split("\t")[0])
# Open the file and extract the relevant data
with open(os.path.join("indexingQC.txt")) as indexqc:
# Iterate through the file
for line in indexqc:
# Once "Index" is encountered, iterate through the rest of the file
if "Index" in line:
for subline in indexqc:
straindata = [x.rstrip() for x in subline.rstrip().split("\t")]
# Try and replicate the Illumina rules to create file names from "Sample_Name"
samplename = samplenamer(straindata, 1)
# Use the sample number -1 as the index in the list of objects created in parsesamplesheet
strainindex = int(straindata[0]) - 1
# Set run to the .run object of self.samples[index]
run = self.samples[strainindex].run
# An assertion that compares the sample computer above to the previously entered sample name
# to ensure that the samples are the same
assert self.samples[strainindex].name == samplename, \
"Sample name {} does not match object name {}" \
.format(self.samples[strainindex].name, samplename)
# Extract and format the percent of reads (passing filter) associated with each sample
# noinspection PyTypeChecker
percentperstrain = float("{:.2f}".format(float(straindata[5])))
# Calculate the number of reads passing filter associated with each sample:
# percentage of reads per strain times the total reads passing filter divided by 100
numberofclusterspf = int(percentperstrain * tclusterspf / 100)
# Update the object with the variables
run.SampleNumber = straindata[0]
run.NumberofClustersPF = numberofclusterspf
run.TotalClustersinRun = tclusterspf
run.PercentOfClusters = percentperstrain
run.flowcell = self.flowcell
run.instrument = self.instrument
else:
strainindex = 0
for i in range(len(self.samples)):
# Set run to the .run object of self.samples[index]
run = self.samples[strainindex].run
# Update the object with the variables
run.SampleNumber = strainindex + 1
run.NumberofClustersPF = 'NA'
run.TotalClustersinRun = 'NA'
run.PercentOfClusters = 'NA'
run.flowcell = self.flowcell
run.instrument = self.instrument
strainindex += 1 | [
"def",
"parserunstats",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"\"GenerateFASTQRunStatistics.xml\"",
")",
")",
":",
"datalist",
"=",
"[",
"\"SampleNumber\"",
",",
"\"SampleID\"",
",",
"\"SampleName\"",
",",
"\"NumberOfClustersPF\"",
"]",
"runstatistics",
"=",
"ElementTree",
".",
"ElementTree",
"(",
"file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"\"GenerateFASTQRunStatistics.xml\"",
")",
")",
"tclusterspf",
"=",
"[",
"float",
"(",
"element",
".",
"text",
")",
"for",
"element",
"in",
"runstatistics",
".",
"iterfind",
"(",
"\"RunStats/NumberOfClustersPF\"",
")",
"]",
"[",
"0",
"]",
"for",
"element",
"in",
"runstatistics",
".",
"iterfind",
"(",
"\"OverallSamples/SummarizedSampleStatistics\"",
")",
":",
"straindata",
"=",
"[",
"nestedelement",
".",
"text",
"for",
"category",
"in",
"datalist",
"for",
"nestedelement",
"in",
"element",
".",
"iter",
"(",
"category",
")",
"]",
"samplename",
"=",
"samplenamer",
"(",
"straindata",
",",
"1",
")",
"percentperstrain",
"=",
"\"{:.2f}\"",
".",
"format",
"(",
"(",
"float",
"(",
"straindata",
"[",
"3",
"]",
")",
"/",
"tclusterspf",
"*",
"100",
")",
")",
"try",
":",
"strainindex",
"=",
"int",
"(",
"straindata",
"[",
"0",
"]",
")",
"-",
"1",
"run",
"=",
"self",
".",
"samples",
"[",
"strainindex",
"]",
".",
"run",
"assert",
"self",
".",
"samples",
"[",
"strainindex",
"]",
".",
"name",
"==",
"samplename",
",",
"\"Sample name does not match object name {0!r:s}\"",
".",
"format",
"(",
"straindata",
"[",
"1",
"]",
")",
"run",
".",
"SampleNumber",
"=",
"straindata",
"[",
"0",
"]",
"run",
".",
"NumberofClustersPF",
"=",
"straindata",
"[",
"3",
"]",
"run",
".",
"TotalClustersinRun",
"=",
"tclusterspf",
"run",
".",
"PercentOfClusters",
"=",
"percentperstrain",
"run",
".",
"flowcell",
"=",
"self",
".",
"flowcell",
"run",
".",
"instrument",
"=",
"self",
".",
"instrument",
"except",
"IndexError",
":",
"pass",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"'indexingQC.txt'",
")",
")",
":",
"tclusterspf",
"=",
"float",
"(",
"getline",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"\"indexingQC.txt\"",
")",
",",
"2",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"0",
"]",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"indexingQC.txt\"",
")",
")",
"as",
"indexqc",
":",
"for",
"line",
"in",
"indexqc",
":",
"if",
"\"Index\"",
"in",
"line",
":",
"for",
"subline",
"in",
"indexqc",
":",
"straindata",
"=",
"[",
"x",
".",
"rstrip",
"(",
")",
"for",
"x",
"in",
"subline",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"]",
"samplename",
"=",
"samplenamer",
"(",
"straindata",
",",
"1",
")",
"strainindex",
"=",
"int",
"(",
"straindata",
"[",
"0",
"]",
")",
"-",
"1",
"run",
"=",
"self",
".",
"samples",
"[",
"strainindex",
"]",
".",
"run",
"assert",
"self",
".",
"samples",
"[",
"strainindex",
"]",
".",
"name",
"==",
"samplename",
",",
"\"Sample name {} does not match object name {}\"",
".",
"format",
"(",
"self",
".",
"samples",
"[",
"strainindex",
"]",
".",
"name",
",",
"samplename",
")",
"percentperstrain",
"=",
"float",
"(",
"\"{:.2f}\"",
".",
"format",
"(",
"float",
"(",
"straindata",
"[",
"5",
"]",
")",
")",
")",
"numberofclusterspf",
"=",
"int",
"(",
"percentperstrain",
"*",
"tclusterspf",
"/",
"100",
")",
"run",
".",
"SampleNumber",
"=",
"straindata",
"[",
"0",
"]",
"run",
".",
"NumberofClustersPF",
"=",
"numberofclusterspf",
"run",
".",
"TotalClustersinRun",
"=",
"tclusterspf",
"run",
".",
"PercentOfClusters",
"=",
"percentperstrain",
"run",
".",
"flowcell",
"=",
"self",
".",
"flowcell",
"run",
".",
"instrument",
"=",
"self",
".",
"instrument",
"else",
":",
"strainindex",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"samples",
")",
")",
":",
"run",
"=",
"self",
".",
"samples",
"[",
"strainindex",
"]",
".",
"run",
"run",
".",
"SampleNumber",
"=",
"strainindex",
"+",
"1",
"run",
".",
"NumberofClustersPF",
"=",
"'NA'",
"run",
".",
"TotalClustersinRun",
"=",
"'NA'",
"run",
".",
"PercentOfClusters",
"=",
"'NA'",
"run",
".",
"flowcell",
"=",
"self",
".",
"flowcell",
"run",
".",
"instrument",
"=",
"self",
".",
"instrument",
"strainindex",
"+=",
"1"
] | python | Parses the XML run statistics file (GenerateFASTQRunStatistics.xml). In some cases, the file is not
available. Equivalent data can be pulled from Basespace.Generate a text file name indexingQC.txt containing
the copied tables from the Indexing QC tab of the run on Basespace | false |
2,526,073 | def train(X_train, y_train, **kwargs):
'''
>>> corpus = CorpusReader('annot.opcorpora.xml')
>>> X_train, x_test, y_train, y_test = get_train_data(corpus, test_size=0.33, random_state=42)
>>> crf = train(X_train, y_train)
'''
crf = Trainer()
crf.set_params({
'c1': 1.0,
'c2': 0.001,
'max_iterations': 200,
'feature.possible_transitions': True,
})
for xseq, yseq in zip(X_train, y_train):
crf.append(xseq, yseq)
crf.train(TOKENIZATION_MODEL_PATH)
return crf | [
"def",
"train",
"(",
"X_train",
",",
"y_train",
",",
"**",
"kwargs",
")",
":",
"crf",
"=",
"Trainer",
"(",
")",
"crf",
".",
"set_params",
"(",
"{",
"'c1'",
":",
"1.0",
",",
"'c2'",
":",
"0.001",
",",
"'max_iterations'",
":",
"200",
",",
"'feature.possible_transitions'",
":",
"True",
",",
"}",
")",
"for",
"xseq",
",",
"yseq",
"in",
"zip",
"(",
"X_train",
",",
"y_train",
")",
":",
"crf",
".",
"append",
"(",
"xseq",
",",
"yseq",
")",
"crf",
".",
"train",
"(",
"TOKENIZATION_MODEL_PATH",
")",
"return",
"crf"
] | python | >>> corpus = CorpusReader('annot.opcorpora.xml')
>>> X_train, x_test, y_train, y_test = get_train_data(corpus, test_size=0.33, random_state=42)
>>> crf = train(X_train, y_train) | false |
2,691,873 | def post(self, command, output_dir, vars):
"""
Do some tasks after install
"""
if command.simulate:
return
# Find the 'project/' dir in the created paste project
project_path = join(getcwd(), vars['project'], 'project')
# 1. Mods
mods = self.get_mods(project_path, vars)
# 2. Create symlinks
for target, linkfile in self.get_symlinks(project_path, vars, mods):
print "* Symlink TO:", target, 'INTO:', linkfile
symlink(target, linkfile)
# 3. Git first initialization
call = Caller(vars['project'])
call('git', 'init', '.')
call('git', 'add', '.')
call('git', 'commit', '-m', 'First commit') | [
"def",
"post",
"(",
"self",
",",
"command",
",",
"output_dir",
",",
"vars",
")",
":",
"if",
"command",
".",
"simulate",
":",
"return",
"project_path",
"=",
"join",
"(",
"getcwd",
"(",
")",
",",
"vars",
"[",
"'project'",
"]",
",",
"'project'",
")",
"mods",
"=",
"self",
".",
"get_mods",
"(",
"project_path",
",",
"vars",
")",
"for",
"target",
",",
"linkfile",
"in",
"self",
".",
"get_symlinks",
"(",
"project_path",
",",
"vars",
",",
"mods",
")",
":",
"print",
"\"* Symlink TO:\"",
",",
"target",
",",
"'INTO:'",
",",
"linkfile",
"symlink",
"(",
"target",
",",
"linkfile",
")",
"call",
"=",
"Caller",
"(",
"vars",
"[",
"'project'",
"]",
")",
"call",
"(",
"'git'",
",",
"'init'",
",",
"'.'",
")",
"call",
"(",
"'git'",
",",
"'add'",
",",
"'.'",
")",
"call",
"(",
"'git'",
",",
"'commit'",
",",
"'-m'",
",",
"'First commit'",
")"
] | python | Do some tasks after install | false |
2,353,580 | def abs(x):
"""
Absolute value
"""
if isinstance(x, UncertainFunction):
mcpts = np.abs(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.abs(x) | [
"def",
"abs",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"UncertainFunction",
")",
":",
"mcpts",
"=",
"np",
".",
"abs",
"(",
"x",
".",
"_mcpts",
")",
"return",
"UncertainFunction",
"(",
"mcpts",
")",
"else",
":",
"return",
"np",
".",
"abs",
"(",
"x",
")"
] | python | Absolute value | false |
2,102,275 | def __init__(self, requests=None, **unknown_fields):
'''
requests : typing.Sequence[~StatusHistoryRequest]
'''
self.requests = [StatusHistoryRequest.from_json(o) for o in requests or []] | [
"def",
"__init__",
"(",
"self",
",",
"requests",
"=",
"None",
",",
"**",
"unknown_fields",
")",
":",
"self",
".",
"requests",
"=",
"[",
"StatusHistoryRequest",
".",
"from_json",
"(",
"o",
")",
"for",
"o",
"in",
"requests",
"or",
"[",
"]",
"]"
] | python | requests : typing.Sequence[~StatusHistoryRequest] | false |
2,466,242 | def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False | [
"def",
"is_important_traceback",
"(",
"self",
",",
"important_module",
",",
"tb",
")",
":",
"while",
"tb",
"is",
"not",
"None",
":",
"if",
"self",
".",
"is_important_frame",
"(",
"important_module",
",",
"tb",
")",
":",
"return",
"True",
"tb",
"=",
"tb",
".",
"tb_next",
"return",
"False"
] | python | Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed). | false |
2,401,651 | def create_drop_query(self, tokens):
"""
Parse tokens of drop query
:param tokens: A list of InfluxDB query tokens
"""
if not tokens[Keyword.SERIES]:
return None
return DropQuery(self.parse_keyword(Keyword.SERIES, tokens)) | [
"def",
"create_drop_query",
"(",
"self",
",",
"tokens",
")",
":",
"if",
"not",
"tokens",
"[",
"Keyword",
".",
"SERIES",
"]",
":",
"return",
"None",
"return",
"DropQuery",
"(",
"self",
".",
"parse_keyword",
"(",
"Keyword",
".",
"SERIES",
",",
"tokens",
")",
")"
] | python | Parse tokens of drop query
:param tokens: A list of InfluxDB query tokens | false |
2,630,212 | def __init__(self, *args):
"""
Constructor: receive an event from the zyre node, wraps zyre_recv.
The event may be a control message (ENTER, EXIT, JOIN, LEAVE) or
data (WHISPER, SHOUT).
"""
if len(args) == 2 and type(args[0]) is c_void_p and isinstance(args[1], bool):
self._as_parameter_ = cast(args[0], zyre_event_p) # Conversion from raw type to binding
self.allow_destruct = args[1] # This is a 'fresh' value, owned by us
elif len(args) == 2 and type(args[0]) is zyre_event_p and isinstance(args[1], bool):
self._as_parameter_ = args[0] # Conversion from raw type to binding
self.allow_destruct = args[1] # This is a 'fresh' value, owned by us
else:
assert(len(args) == 1)
self._as_parameter_ = lib.zyre_event_new(args[0]) # Creation of new raw type
self.allow_destruct = True | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"2",
"and",
"type",
"(",
"args",
"[",
"0",
"]",
")",
"is",
"c_void_p",
"and",
"isinstance",
"(",
"args",
"[",
"1",
"]",
",",
"bool",
")",
":",
"self",
".",
"_as_parameter_",
"=",
"cast",
"(",
"args",
"[",
"0",
"]",
",",
"zyre_event_p",
")",
"self",
".",
"allow_destruct",
"=",
"args",
"[",
"1",
"]",
"elif",
"len",
"(",
"args",
")",
"==",
"2",
"and",
"type",
"(",
"args",
"[",
"0",
"]",
")",
"is",
"zyre_event_p",
"and",
"isinstance",
"(",
"args",
"[",
"1",
"]",
",",
"bool",
")",
":",
"self",
".",
"_as_parameter_",
"=",
"args",
"[",
"0",
"]",
"self",
".",
"allow_destruct",
"=",
"args",
"[",
"1",
"]",
"else",
":",
"assert",
"(",
"len",
"(",
"args",
")",
"==",
"1",
")",
"self",
".",
"_as_parameter_",
"=",
"lib",
".",
"zyre_event_new",
"(",
"args",
"[",
"0",
"]",
")",
"self",
".",
"allow_destruct",
"=",
"True"
] | python | Constructor: receive an event from the zyre node, wraps zyre_recv.
The event may be a control message (ENTER, EXIT, JOIN, LEAVE) or
data (WHISPER, SHOUT). | false |
1,803,508 | def __init__(self, complex_optional_roots):
"""Initialize empty tree of optional root Locations (elements of complex_optional_roots).
This object construst a tree of complex optional roots. These are locations preceding an
@optional traverse that expand vertex fields within. Simple @optional traverses i.e.
ones that do not expand vertex fields within them are excluded.
Args:
complex_optional_roots: list of @optional locations (location preceding an @optional
traverse) that expand vertex fields within
"""
self._location_to_children = {
optional_root_location: set()
for optional_root_location in complex_optional_roots
}
self._root_location = None
self._location_to_children[self._root_location] = set() | [
"def",
"__init__",
"(",
"self",
",",
"complex_optional_roots",
")",
":",
"self",
".",
"_location_to_children",
"=",
"{",
"optional_root_location",
":",
"set",
"(",
")",
"for",
"optional_root_location",
"in",
"complex_optional_roots",
"}",
"self",
".",
"_root_location",
"=",
"None",
"self",
".",
"_location_to_children",
"[",
"self",
".",
"_root_location",
"]",
"=",
"set",
"(",
")"
] | python | Initialize empty tree of optional root Locations (elements of complex_optional_roots).
This object construst a tree of complex optional roots. These are locations preceding an
@optional traverse that expand vertex fields within. Simple @optional traverses i.e.
ones that do not expand vertex fields within them are excluded.
Args:
complex_optional_roots: list of @optional locations (location preceding an @optional
traverse) that expand vertex fields within | false |
2,399,790 | def trigger(self, *args, **kargs):
"""
Execute all event handlers with optional arguments for the observable.
"""
event = args[0]
if isinstance(event, str) and ' ' in event:
event = event.split(' ') # split event names ...
if isinstance(event, list): # event is a list of events
for each in event:
self.events[each].trigger(*args[1:], **kargs)
else:
self.events[event].trigger(*args[1:], **kargs) | [
"def",
"trigger",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kargs",
")",
":",
"event",
"=",
"args",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"event",
",",
"str",
")",
"and",
"' '",
"in",
"event",
":",
"event",
"=",
"event",
".",
"split",
"(",
"' '",
")",
"if",
"isinstance",
"(",
"event",
",",
"list",
")",
":",
"for",
"each",
"in",
"event",
":",
"self",
".",
"events",
"[",
"each",
"]",
".",
"trigger",
"(",
"*",
"args",
"[",
"1",
":",
"]",
",",
"**",
"kargs",
")",
"else",
":",
"self",
".",
"events",
"[",
"event",
"]",
".",
"trigger",
"(",
"*",
"args",
"[",
"1",
":",
"]",
",",
"**",
"kargs",
")"
] | python | Execute all event handlers with optional arguments for the observable. | false |
2,318,254 | def dataframe_setup(self):
"""
Set-up a report to store the desired header: sanitized string combinations
"""
# Initialise a dictionary to store the sanitized headers and strings
genesippr_dict = dict()
# Try to open all the reports - use pandas to extract the results from any report that exists
try:
sippr_matrix = pd.read_csv(os.path.join(self.reportpath, 'genesippr.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
sippr_matrix = dict()
try:
conf_matrix = pd.read_csv(os.path.join(self.reportpath, 'confindr_report.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
conf_matrix = dict()
try:
gdcs_matrix = pd.read_csv(os.path.join(self.reportpath, 'GDCS.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
gdcs_matrix = dict()
# Populate the header:sanitized string dictionary with results from all strains
for sample in self.metadata:
genesippr_dict[sample.name] = dict()
try:
genesippr_dict[sample.name]['eae'] = self.data_sanitise(sippr_matrix[sample.name]['eae'])
except KeyError:
genesippr_dict[sample.name]['eae'] = 0
try:
genesippr_dict[sample.name]['hlyAEc'] = self.data_sanitise(sippr_matrix[sample.name]['hlyAEc'])
except KeyError:
genesippr_dict[sample.name]['hlyAEc'] = 0
try:
genesippr_dict[sample.name]['VT1'] = self.data_sanitise(sippr_matrix[sample.name]['VT1'])
except KeyError:
genesippr_dict[sample.name]['VT1'] = 0
try:
genesippr_dict[sample.name]['VT2'] = self.data_sanitise(sippr_matrix[sample.name]['VT2'])
except KeyError:
genesippr_dict[sample.name]['VT2'] = 0
try:
genesippr_dict[sample.name]['hlyALm'] = self.data_sanitise(sippr_matrix[sample.name]['hlyALm'])
except KeyError:
genesippr_dict[sample.name]['hlyALm'] = 0
try:
genesippr_dict[sample.name]['IGS'] = self.data_sanitise(sippr_matrix[sample.name]['IGS'])
except KeyError:
genesippr_dict[sample.name]['IGS'] = 0
try:
genesippr_dict[sample.name]['inlJ'] = self.data_sanitise(sippr_matrix[sample.name]['inlJ'])
except KeyError:
genesippr_dict[sample.name]['inlJ'] = 0
try:
genesippr_dict[sample.name]['invA'] = self.data_sanitise(sippr_matrix[sample.name]['invA'])
except KeyError:
genesippr_dict[sample.name]['invA'] = 0
try:
genesippr_dict[sample.name]['stn'] = self.data_sanitise(sippr_matrix[sample.name]['stn'])
except KeyError:
genesippr_dict[sample.name]['stn'] = 0
try:
genesippr_dict[sample.name]['GDCS'] = self.data_sanitise(gdcs_matrix[sample.name]['Pass/Fail'],
header='Pass/Fail')
except KeyError:
genesippr_dict[sample.name]['GDCS'] = 0
try:
genesippr_dict[sample.name]['Contamination'] = self.data_sanitise(
conf_matrix[sample.name]['ContamStatus'], header='ContamStatus')
except KeyError:
genesippr_dict[sample.name]['Contamination'] = 0
try:
genesippr_dict[sample.name]['Coverage'] = self.data_sanitise(
gdcs_matrix[sample.name]['MeanCoverage'], header='MeanCoverage')
except KeyError:
genesippr_dict[sample.name]['Coverage'] = 0
# Create a report from the header: sanitized string dictionary to be used in the creation of the report image
with open(self.image_report, 'w') as csv:
data = '{}\n'.format(','.join(self.header_list))
for strain in sorted(genesippr_dict):
data += '{str},'.format(str=strain)
for header in self.header_list[1:]:
data += '{value},'.format(value=genesippr_dict[strain][header])
data = data.rstrip(',')
data += '\n'
csv.write(data) | [
"def",
"dataframe_setup",
"(",
"self",
")",
":",
"genesippr_dict",
"=",
"dict",
"(",
")",
"try",
":",
"sippr_matrix",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'genesippr.csv'",
")",
",",
"delimiter",
"=",
"','",
",",
"index_col",
"=",
"0",
")",
".",
"T",
".",
"to_dict",
"(",
")",
"except",
"FileNotFoundError",
":",
"sippr_matrix",
"=",
"dict",
"(",
")",
"try",
":",
"conf_matrix",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'confindr_report.csv'",
")",
",",
"delimiter",
"=",
"','",
",",
"index_col",
"=",
"0",
")",
".",
"T",
".",
"to_dict",
"(",
")",
"except",
"FileNotFoundError",
":",
"conf_matrix",
"=",
"dict",
"(",
")",
"try",
":",
"gdcs_matrix",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'GDCS.csv'",
")",
",",
"delimiter",
"=",
"','",
",",
"index_col",
"=",
"0",
")",
".",
"T",
".",
"to_dict",
"(",
")",
"except",
"FileNotFoundError",
":",
"gdcs_matrix",
"=",
"dict",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"=",
"dict",
"(",
")",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'eae'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'eae'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'eae'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyAEc'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyAEc'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyAEc'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT1'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT1'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT1'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT2'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT2'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT2'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyALm'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyALm'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyALm'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'IGS'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'IGS'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'IGS'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'inlJ'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'inlJ'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'inlJ'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'invA'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'invA'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'invA'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'stn'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'stn'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'stn'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'GDCS'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"gdcs_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'Pass/Fail'",
"]",
",",
"header",
"=",
"'Pass/Fail'",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'GDCS'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'Contamination'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"conf_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'ContamStatus'",
"]",
",",
"header",
"=",
"'ContamStatus'",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'Contamination'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'Coverage'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"gdcs_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'MeanCoverage'",
"]",
",",
"header",
"=",
"'MeanCoverage'",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'Coverage'",
"]",
"=",
"0",
"with",
"open",
"(",
"self",
".",
"image_report",
",",
"'w'",
")",
"as",
"csv",
":",
"data",
"=",
"'{}\\n'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"self",
".",
"header_list",
")",
")",
"for",
"strain",
"in",
"sorted",
"(",
"genesippr_dict",
")",
":",
"data",
"+=",
"'{str},'",
".",
"format",
"(",
"str",
"=",
"strain",
")",
"for",
"header",
"in",
"self",
".",
"header_list",
"[",
"1",
":",
"]",
":",
"data",
"+=",
"'{value},'",
".",
"format",
"(",
"value",
"=",
"genesippr_dict",
"[",
"strain",
"]",
"[",
"header",
"]",
")",
"data",
"=",
"data",
".",
"rstrip",
"(",
"','",
")",
"data",
"+=",
"'\\n'",
"csv",
".",
"write",
"(",
"data",
")"
] | python | Set-up a report to store the desired header: sanitized string combinations | false |
1,778,850 | def _get_unfolding_weights(self):
"""Calculate Eq. (7)
k = K + G + g
K -> _qpoints_s[_q_index] (in SBZ)
k -> _qpoints_p[_q_index] (in PBZ)
G -> _comm_points (in PBZ)
g -> a reciprocal lattice point in PBZ (unused explicitly)
j -> Primitive translations in supercell (_trans_p)
J -> Band indices of supercell phonon modes (axis=1 or eigvecs)
The phase factor corresponding to K is not included in eigvecs
with our choice of dynamical matrix.
"""
eigvecs = self._eigvecs[self._q_count]
dtype = "c%d" % (np.dtype('double').itemsize * 2)
weights = np.zeros(eigvecs.shape[1], dtype=dtype)
q_p = self._qpoints_p[self._q_count] # k
q_s = self._qpoints_s[self._q_count] # K
diff = q_p - np.dot(q_s, np.linalg.inv(self._supercell_matrix))
# Search G points corresponding to k = G + K
for G in self._comm_points:
d = diff - G
d -= np.rint(d)
if (np.abs(d) < 1e-5).all():
break
e = np.zeros(eigvecs.shape[:2], dtype=dtype)
phases = np.exp(2j * np.pi * np.dot(self._trans_p, G))
for phase, indices in zip(
phases, self._atom_mapping[self._index_map_inv]):
eig_indices = (
np.c_[indices * 3, indices * 3 + 1, indices * 3 + 2]).ravel()
e += eigvecs[eig_indices, :] * phase
e /= self._N
weights[:] = (e.conj() * e).sum(axis=0)
# e = np.zeros(eigvecs.shape[:2], dtype=dtype)
# phases = np.exp(2j * np.pi * np.dot(self._trans_p, G))
# indices = self._atom_mapping
# eig_indices_r = (
# np.c_[indices * 3, indices * 3 + 1, indices * 3 + 2]).ravel()
# for phase, indices in zip(
# phases, self._atom_mapping[self._index_map_inv]):
# eig_indices_l = (
# np.c_[indices * 3, indices * 3 + 1, indices * 3 + 2]).ravel()
# e += eigvecs[eig_indices_l, :] * eigvecs[eig_indices_r, :].conj() * phase
# e /= self._N
# weights[:] = e.sum(axis=0)
if (weights.imag > 1e-5).any():
print("Phonopy warning: Encountered imaginary values.")
# assert (np.abs(weights.real.sum(axis=1) - 1) < 1e-5).all()
return weights.real | [
"def",
"_get_unfolding_weights",
"(",
"self",
")",
":",
"eigvecs",
"=",
"self",
".",
"_eigvecs",
"[",
"self",
".",
"_q_count",
"]",
"dtype",
"=",
"\"c%d\"",
"%",
"(",
"np",
".",
"dtype",
"(",
"'double'",
")",
".",
"itemsize",
"*",
"2",
")",
"weights",
"=",
"np",
".",
"zeros",
"(",
"eigvecs",
".",
"shape",
"[",
"1",
"]",
",",
"dtype",
"=",
"dtype",
")",
"q_p",
"=",
"self",
".",
"_qpoints_p",
"[",
"self",
".",
"_q_count",
"]",
"q_s",
"=",
"self",
".",
"_qpoints_s",
"[",
"self",
".",
"_q_count",
"]",
"diff",
"=",
"q_p",
"-",
"np",
".",
"dot",
"(",
"q_s",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"self",
".",
"_supercell_matrix",
")",
")",
"for",
"G",
"in",
"self",
".",
"_comm_points",
":",
"d",
"=",
"diff",
"-",
"G",
"d",
"-=",
"np",
".",
"rint",
"(",
"d",
")",
"if",
"(",
"np",
".",
"abs",
"(",
"d",
")",
"<",
"1e-5",
")",
".",
"all",
"(",
")",
":",
"break",
"e",
"=",
"np",
".",
"zeros",
"(",
"eigvecs",
".",
"shape",
"[",
":",
"2",
"]",
",",
"dtype",
"=",
"dtype",
")",
"phases",
"=",
"np",
".",
"exp",
"(",
"2j",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"dot",
"(",
"self",
".",
"_trans_p",
",",
"G",
")",
")",
"for",
"phase",
",",
"indices",
"in",
"zip",
"(",
"phases",
",",
"self",
".",
"_atom_mapping",
"[",
"self",
".",
"_index_map_inv",
"]",
")",
":",
"eig_indices",
"=",
"(",
"np",
".",
"c_",
"[",
"indices",
"*",
"3",
",",
"indices",
"*",
"3",
"+",
"1",
",",
"indices",
"*",
"3",
"+",
"2",
"]",
")",
".",
"ravel",
"(",
")",
"e",
"+=",
"eigvecs",
"[",
"eig_indices",
",",
":",
"]",
"*",
"phase",
"e",
"/=",
"self",
".",
"_N",
"weights",
"[",
":",
"]",
"=",
"(",
"e",
".",
"conj",
"(",
")",
"*",
"e",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"if",
"(",
"weights",
".",
"imag",
">",
"1e-5",
")",
".",
"any",
"(",
")",
":",
"print",
"(",
"\"Phonopy warning: Encountered imaginary values.\"",
")",
"return",
"weights",
".",
"real"
] | python | Calculate Eq. (7)
k = K + G + g
K -> _qpoints_s[_q_index] (in SBZ)
k -> _qpoints_p[_q_index] (in PBZ)
G -> _comm_points (in PBZ)
g -> a reciprocal lattice point in PBZ (unused explicitly)
j -> Primitive translations in supercell (_trans_p)
J -> Band indices of supercell phonon modes (axis=1 or eigvecs)
The phase factor corresponding to K is not included in eigvecs
with our choice of dynamical matrix. | false |
2,154,452 | def reconcileLimits(self):
"""If self.minValue is not less than self.maxValue, fix the problem.
If self.minValue is not less than self.maxValue, adjust
self.minValue and/or self.maxValue (depending on which was not
specified explicitly by the user) to make self.minValue <
self.maxValue. If the user specified both limits explicitly, then
raise GraphError.
"""
if self.minValue < self.maxValue:
# The limits are already OK.
return
minFixed = (self.minValueSource in ['min'])
maxFixed = (self.maxValueSource in ['max', 'limit'])
if minFixed and maxFixed:
raise GraphError('The %s must be less than the %s' %
(self.minValueSource, self.maxValueSource))
elif minFixed:
self.maxValue = self.minValue + self.chooseDelta(self.minValue)
elif maxFixed:
self.minValue = self.maxValue - self.chooseDelta(self.maxValue)
else:
delta = self.chooseDelta(max(abs(self.minValue),
abs(self.maxValue)))
average = (self.minValue + self.maxValue) / 2.0
self.minValue = average - delta
self.maxValue = average + delta | [
"def",
"reconcileLimits",
"(",
"self",
")",
":",
"if",
"self",
".",
"minValue",
"<",
"self",
".",
"maxValue",
":",
"return",
"minFixed",
"=",
"(",
"self",
".",
"minValueSource",
"in",
"[",
"'min'",
"]",
")",
"maxFixed",
"=",
"(",
"self",
".",
"maxValueSource",
"in",
"[",
"'max'",
",",
"'limit'",
"]",
")",
"if",
"minFixed",
"and",
"maxFixed",
":",
"raise",
"GraphError",
"(",
"'The %s must be less than the %s'",
"%",
"(",
"self",
".",
"minValueSource",
",",
"self",
".",
"maxValueSource",
")",
")",
"elif",
"minFixed",
":",
"self",
".",
"maxValue",
"=",
"self",
".",
"minValue",
"+",
"self",
".",
"chooseDelta",
"(",
"self",
".",
"minValue",
")",
"elif",
"maxFixed",
":",
"self",
".",
"minValue",
"=",
"self",
".",
"maxValue",
"-",
"self",
".",
"chooseDelta",
"(",
"self",
".",
"maxValue",
")",
"else",
":",
"delta",
"=",
"self",
".",
"chooseDelta",
"(",
"max",
"(",
"abs",
"(",
"self",
".",
"minValue",
")",
",",
"abs",
"(",
"self",
".",
"maxValue",
")",
")",
")",
"average",
"=",
"(",
"self",
".",
"minValue",
"+",
"self",
".",
"maxValue",
")",
"/",
"2.0",
"self",
".",
"minValue",
"=",
"average",
"-",
"delta",
"self",
".",
"maxValue",
"=",
"average",
"+",
"delta"
] | python | If self.minValue is not less than self.maxValue, fix the problem.
If self.minValue is not less than self.maxValue, adjust
self.minValue and/or self.maxValue (depending on which was not
specified explicitly by the user) to make self.minValue <
self.maxValue. If the user specified both limits explicitly, then
raise GraphError. | false |
1,905,117 | def wrap (text, width, **kwargs):
"""Adjust lines of text to be not longer than width. The text will be
returned unmodified if width <= 0.
See textwrap.wrap() for a list of supported kwargs.
Returns text with lines no longer than given width."""
if width <= 0 or not text:
return text
ret = []
for para in get_paragraphs(text):
text = " ".join(para.strip().split())
ret.extend(textwrap.wrap(text, width, **kwargs))
return os.linesep.join(ret) | [
"def",
"wrap",
"(",
"text",
",",
"width",
",",
"**",
"kwargs",
")",
":",
"if",
"width",
"<=",
"0",
"or",
"not",
"text",
":",
"return",
"text",
"ret",
"=",
"[",
"]",
"for",
"para",
"in",
"get_paragraphs",
"(",
"text",
")",
":",
"text",
"=",
"\" \"",
".",
"join",
"(",
"para",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
")",
"ret",
".",
"extend",
"(",
"textwrap",
".",
"wrap",
"(",
"text",
",",
"width",
",",
"**",
"kwargs",
")",
")",
"return",
"os",
".",
"linesep",
".",
"join",
"(",
"ret",
")"
] | python | Adjust lines of text to be not longer than width. The text will be
returned unmodified if width <= 0.
See textwrap.wrap() for a list of supported kwargs.
Returns text with lines no longer than given width. | false |
2,375,151 | def __eq__(self, other):
"""self == other"""
if self._impl.is_scalar():
return self._impl.single_value == other
elif isinstance(other, Cells):
return self is other
else:
raise TypeError | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"_impl",
".",
"is_scalar",
"(",
")",
":",
"return",
"self",
".",
"_impl",
".",
"single_value",
"==",
"other",
"elif",
"isinstance",
"(",
"other",
",",
"Cells",
")",
":",
"return",
"self",
"is",
"other",
"else",
":",
"raise",
"TypeError"
] | python | self == other | false |
2,343,984 | def __init__(self, api_url, user, passwd, timeout=5, scheme='http'):
"""
:param string api_url: base url for the broker API
:param string user: Username used to authenticate to the API.
:param string passwd: Password used to authenticate to the API.
:param int timeout: Integer number of seconds to wait for each call.
:param string scheme: HTTP scheme used to make the connection
Populates server attributes using passed-in parameters and
the HTTP API's 'overview' information.
"""
self.api_url = api_url
self.user = user
self.passwd = passwd
self.timeout = timeout
self.scheme = scheme
self.http = http.HTTPClient(
self.api_url,
self.user,
self.passwd,
self.timeout,
self.scheme
)
return | [
"def",
"__init__",
"(",
"self",
",",
"api_url",
",",
"user",
",",
"passwd",
",",
"timeout",
"=",
"5",
",",
"scheme",
"=",
"'http'",
")",
":",
"self",
".",
"api_url",
"=",
"api_url",
"self",
".",
"user",
"=",
"user",
"self",
".",
"passwd",
"=",
"passwd",
"self",
".",
"timeout",
"=",
"timeout",
"self",
".",
"scheme",
"=",
"scheme",
"self",
".",
"http",
"=",
"http",
".",
"HTTPClient",
"(",
"self",
".",
"api_url",
",",
"self",
".",
"user",
",",
"self",
".",
"passwd",
",",
"self",
".",
"timeout",
",",
"self",
".",
"scheme",
")",
"return"
] | python | :param string api_url: base url for the broker API
:param string user: Username used to authenticate to the API.
:param string passwd: Password used to authenticate to the API.
:param int timeout: Integer number of seconds to wait for each call.
:param string scheme: HTTP scheme used to make the connection
Populates server attributes using passed-in parameters and
the HTTP API's 'overview' information. | false |
2,675,713 | def load_module(self, name):
"""Load and return a module"""
if name in sys.modules:
return sys.modules[name]
# load the actual import hook module
module_name = self.mount2name(name)
__import__(module_name)
# alias the import hook module to the mount, so both can be used interchangeably
module = sys.modules[name] = sys.modules[module_name]
module.install()
return module | [
"def",
"load_module",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"in",
"sys",
".",
"modules",
":",
"return",
"sys",
".",
"modules",
"[",
"name",
"]",
"module_name",
"=",
"self",
".",
"mount2name",
"(",
"name",
")",
"__import__",
"(",
"module_name",
")",
"module",
"=",
"sys",
".",
"modules",
"[",
"name",
"]",
"=",
"sys",
".",
"modules",
"[",
"module_name",
"]",
"module",
".",
"install",
"(",
")",
"return",
"module"
] | python | Load and return a module | false |
1,595,279 | def find_windows_executable(bin_path, exe_name):
"""Given an executable name, search the given location for an executable"""
requested_path = get_windows_path(bin_path, exe_name)
if os.path.isfile(requested_path):
return requested_path
try:
pathext = os.environ["PATHEXT"]
except KeyError:
pass
else:
for ext in pathext.split(os.pathsep):
path = get_windows_path(bin_path, exe_name + ext.strip().lower())
if os.path.isfile(path):
return path
return find_executable(exe_name) | [
"def",
"find_windows_executable",
"(",
"bin_path",
",",
"exe_name",
")",
":",
"requested_path",
"=",
"get_windows_path",
"(",
"bin_path",
",",
"exe_name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"requested_path",
")",
":",
"return",
"requested_path",
"try",
":",
"pathext",
"=",
"os",
".",
"environ",
"[",
"\"PATHEXT\"",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"for",
"ext",
"in",
"pathext",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"path",
"=",
"get_windows_path",
"(",
"bin_path",
",",
"exe_name",
"+",
"ext",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"path",
"return",
"find_executable",
"(",
"exe_name",
")"
] | python | Given an executable name, search the given location for an executable | false |
2,462,624 | def fileprep(self):
"""Decompress and concatenate .fastq files"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.prep, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata:
# Set the name of the decompressed, combined .fastq file
sample.general.combined = os.path.join(sample.general.outputdirectory, '{sample_name}_combined.fastq'
.format(sample_name=sample.name))
self.queue.put(sample)
self.queue.join() | [
"def",
"fileprep",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"cpus",
")",
":",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"prep",
",",
"args",
"=",
"(",
")",
")",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"threads",
".",
"start",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"sample",
".",
"general",
".",
"combined",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"outputdirectory",
",",
"'{sample_name}_combined.fastq'",
".",
"format",
"(",
"sample_name",
"=",
"sample",
".",
"name",
")",
")",
"self",
".",
"queue",
".",
"put",
"(",
"sample",
")",
"self",
".",
"queue",
".",
"join",
"(",
")"
] | python | Decompress and concatenate .fastq files | false |
2,063,589 | def _ip_setter(self, ipaddr_name, ipaddrs_name, ips):
"""Setter for ip fields
Accept as input string or list of IP instances.
String case:
only ipvXaddr is going to be filled, that is enough to perform
host record search using ip
List of IP instances case:
ipvXaddrs is going to be filled with ips content,
so create can be issues, since fully prepared IP objects in place.
ipXaddr is also filled to be able perform search on NIOS
and verify that no such host record exists yet.
"""
if isinstance(ips, six.string_types):
setattr(self, ipaddr_name, ips)
elif isinstance(ips, (list, tuple)) and isinstance(ips[0], IP):
setattr(self, ipaddr_name, ips[0].ip)
setattr(self, ipaddrs_name, ips)
elif isinstance(ips, IP):
setattr(self, ipaddr_name, ips.ip)
setattr(self, ipaddrs_name, [ips])
elif ips is None:
setattr(self, ipaddr_name, None)
setattr(self, ipaddrs_name, None)
else:
raise ValueError(
"Invalid format of ip passed in: %s."
"Should be string or list of NIOS IP objects." % ips) | [
"def",
"_ip_setter",
"(",
"self",
",",
"ipaddr_name",
",",
"ipaddrs_name",
",",
"ips",
")",
":",
"if",
"isinstance",
"(",
"ips",
",",
"six",
".",
"string_types",
")",
":",
"setattr",
"(",
"self",
",",
"ipaddr_name",
",",
"ips",
")",
"elif",
"isinstance",
"(",
"ips",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"isinstance",
"(",
"ips",
"[",
"0",
"]",
",",
"IP",
")",
":",
"setattr",
"(",
"self",
",",
"ipaddr_name",
",",
"ips",
"[",
"0",
"]",
".",
"ip",
")",
"setattr",
"(",
"self",
",",
"ipaddrs_name",
",",
"ips",
")",
"elif",
"isinstance",
"(",
"ips",
",",
"IP",
")",
":",
"setattr",
"(",
"self",
",",
"ipaddr_name",
",",
"ips",
".",
"ip",
")",
"setattr",
"(",
"self",
",",
"ipaddrs_name",
",",
"[",
"ips",
"]",
")",
"elif",
"ips",
"is",
"None",
":",
"setattr",
"(",
"self",
",",
"ipaddr_name",
",",
"None",
")",
"setattr",
"(",
"self",
",",
"ipaddrs_name",
",",
"None",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid format of ip passed in: %s.\"",
"\"Should be string or list of NIOS IP objects.\"",
"%",
"ips",
")"
] | python | Setter for ip fields
Accept as input string or list of IP instances.
String case:
only ipvXaddr is going to be filled, that is enough to perform
host record search using ip
List of IP instances case:
ipvXaddrs is going to be filled with ips content,
so create can be issues, since fully prepared IP objects in place.
ipXaddr is also filled to be able perform search on NIOS
and verify that no such host record exists yet. | false |
1,732,879 | def __init__(self,
block_manager,
transaction_committed,
get_committed_batch_by_id,
get_committed_batch_by_txn_id,
get_chain_head,
gossip,
cache_keep_time=1200,
cache_purge_frequency=30,
requested_keep_time=300):
"""
:param block_manager (BlockManager) An object for getting and storing
blocks safely
:param transaction_committed (fn(transaction_id) -> bool) A function to
determine if a transaction is committed.
:param batch_committed (fn(batch_id) -> bool) A function to
determine if a batch is committed.
:param get_committed_batch_by_txn_id
(fn(transaction_id) -> Batch) A function for retrieving a committed
batch from a committed transction id.
:param get_chain_head (fn() -> Block) A function for getting the
current chain head.
:param gossip (gossip.Gossip) Broadcasts block and batch request to
peers
:param cache_keep_time (float) Time in seconds to keep values in
TimedCaches.
:param cache_purge_frequency (float) Time between purging the
TimedCaches.
:param requested_keep_time (float) Time in seconds to keep the ids
of requested objects. WARNING this time should always be less than
cache_keep_time or the validator can get into a state where it
fails to make progress because it thinks it has already requested
something that it is missing.
"""
self._gossip = gossip
self._batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
self._block_manager = block_manager
self._transaction_committed = transaction_committed
self._get_committed_batch_by_id = get_committed_batch_by_id
self._get_committed_batch_by_txn_id = get_committed_batch_by_txn_id
self._get_chain_head = get_chain_head
self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
self._incomplete_batches = TimedCache(cache_keep_time,
cache_purge_frequency)
self._incomplete_blocks = TimedCache(cache_keep_time,
cache_purge_frequency)
self._requested = TimedCache(requested_keep_time,
cache_purge_frequency)
self._on_block_received = None
self._on_batch_received = None
self.lock = RLock()
# Tracks how many times an unsatisfied dependency is found
self._unsatisfied_dependency_count = COLLECTOR.counter(
'unsatisfied_dependency_count', instance=self)
# Tracks the length of the completer's _seen_txns
self._seen_txns_length = COLLECTOR.gauge(
'seen_txns_length', instance=self)
self._seen_txns_length.set_value(0)
# Tracks the length of the completer's _incomplete_blocks
self._incomplete_blocks_length = COLLECTOR.gauge(
'incomplete_blocks_length', instance=self)
self._incomplete_blocks_length.set_value(0)
# Tracks the length of the completer's _incomplete_batches
self._incomplete_batches_length = COLLECTOR.gauge(
'incomplete_batches_length', instance=self)
self._incomplete_batches_length.set_value(0) | [
"def",
"__init__",
"(",
"self",
",",
"block_manager",
",",
"transaction_committed",
",",
"get_committed_batch_by_id",
",",
"get_committed_batch_by_txn_id",
",",
"get_chain_head",
",",
"gossip",
",",
"cache_keep_time",
"=",
"1200",
",",
"cache_purge_frequency",
"=",
"30",
",",
"requested_keep_time",
"=",
"300",
")",
":",
"self",
".",
"_gossip",
"=",
"gossip",
"self",
".",
"_batch_cache",
"=",
"TimedCache",
"(",
"cache_keep_time",
",",
"cache_purge_frequency",
")",
"self",
".",
"_block_manager",
"=",
"block_manager",
"self",
".",
"_transaction_committed",
"=",
"transaction_committed",
"self",
".",
"_get_committed_batch_by_id",
"=",
"get_committed_batch_by_id",
"self",
".",
"_get_committed_batch_by_txn_id",
"=",
"get_committed_batch_by_txn_id",
"self",
".",
"_get_chain_head",
"=",
"get_chain_head",
"self",
".",
"_seen_txns",
"=",
"TimedCache",
"(",
"cache_keep_time",
",",
"cache_purge_frequency",
")",
"self",
".",
"_incomplete_batches",
"=",
"TimedCache",
"(",
"cache_keep_time",
",",
"cache_purge_frequency",
")",
"self",
".",
"_incomplete_blocks",
"=",
"TimedCache",
"(",
"cache_keep_time",
",",
"cache_purge_frequency",
")",
"self",
".",
"_requested",
"=",
"TimedCache",
"(",
"requested_keep_time",
",",
"cache_purge_frequency",
")",
"self",
".",
"_on_block_received",
"=",
"None",
"self",
".",
"_on_batch_received",
"=",
"None",
"self",
".",
"lock",
"=",
"RLock",
"(",
")",
"self",
".",
"_unsatisfied_dependency_count",
"=",
"COLLECTOR",
".",
"counter",
"(",
"'unsatisfied_dependency_count'",
",",
"instance",
"=",
"self",
")",
"self",
".",
"_seen_txns_length",
"=",
"COLLECTOR",
".",
"gauge",
"(",
"'seen_txns_length'",
",",
"instance",
"=",
"self",
")",
"self",
".",
"_seen_txns_length",
".",
"set_value",
"(",
"0",
")",
"self",
".",
"_incomplete_blocks_length",
"=",
"COLLECTOR",
".",
"gauge",
"(",
"'incomplete_blocks_length'",
",",
"instance",
"=",
"self",
")",
"self",
".",
"_incomplete_blocks_length",
".",
"set_value",
"(",
"0",
")",
"self",
".",
"_incomplete_batches_length",
"=",
"COLLECTOR",
".",
"gauge",
"(",
"'incomplete_batches_length'",
",",
"instance",
"=",
"self",
")",
"self",
".",
"_incomplete_batches_length",
".",
"set_value",
"(",
"0",
")"
] | python | :param block_manager (BlockManager) An object for getting and storing
blocks safely
:param transaction_committed (fn(transaction_id) -> bool) A function to
determine if a transaction is committed.
:param batch_committed (fn(batch_id) -> bool) A function to
determine if a batch is committed.
:param get_committed_batch_by_txn_id
(fn(transaction_id) -> Batch) A function for retrieving a committed
batch from a committed transction id.
:param get_chain_head (fn() -> Block) A function for getting the
current chain head.
:param gossip (gossip.Gossip) Broadcasts block and batch request to
peers
:param cache_keep_time (float) Time in seconds to keep values in
TimedCaches.
:param cache_purge_frequency (float) Time between purging the
TimedCaches.
:param requested_keep_time (float) Time in seconds to keep the ids
of requested objects. WARNING this time should always be less than
cache_keep_time or the validator can get into a state where it
fails to make progress because it thinks it has already requested
something that it is missing. | false |
2,199,816 | def gen_age(output, ascii_props=False, append=False, prefix=""):
"""Generate `age` property."""
obj = {}
all_chars = ALL_ASCII if ascii_props else ALL_CHARS
with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedAge.txt'), 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split('#')[0].split(';')
if len(data) < 2:
continue
span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props)
name = format_name(data[1])
if name not in obj:
obj[name] = []
if span is None:
continue
obj[name].extend(span)
unassigned = set()
for x in obj.values():
unassigned |= set(x)
obj['na'] = list(all_chars - unassigned)
for name in list(obj.keys()):
s = set(obj[name])
obj[name] = sorted(s)
# Convert characters values to ranges
char2range(obj, is_bytes=ascii_props)
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
# Write out the Unicode properties
f.write('%s_age = {\n' % prefix)
count = len(obj) - 1
i = 0
for k1, v1 in sorted(obj.items()):
f.write(' "%s": "%s"' % (k1, v1))
if i == count:
f.write('\n}\n')
else:
f.write(',\n')
i += 1 | [
"def",
"gen_age",
"(",
"output",
",",
"ascii_props",
"=",
"False",
",",
"append",
"=",
"False",
",",
"prefix",
"=",
"\"\"",
")",
":",
"obj",
"=",
"{",
"}",
"all_chars",
"=",
"ALL_ASCII",
"if",
"ascii_props",
"else",
"ALL_CHARS",
"with",
"codecs",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"HOME",
",",
"'unicodedata'",
",",
"UNIVERSION",
",",
"'DerivedAge.txt'",
")",
",",
"'r'",
",",
"'utf-8'",
")",
"as",
"uf",
":",
"for",
"line",
"in",
"uf",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"data",
"=",
"line",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"';'",
")",
"if",
"len",
"(",
"data",
")",
"<",
"2",
":",
"continue",
"span",
"=",
"create_span",
"(",
"[",
"int",
"(",
"i",
",",
"16",
")",
"for",
"i",
"in",
"data",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'..'",
")",
"]",
",",
"is_bytes",
"=",
"ascii_props",
")",
"name",
"=",
"format_name",
"(",
"data",
"[",
"1",
"]",
")",
"if",
"name",
"not",
"in",
"obj",
":",
"obj",
"[",
"name",
"]",
"=",
"[",
"]",
"if",
"span",
"is",
"None",
":",
"continue",
"obj",
"[",
"name",
"]",
".",
"extend",
"(",
"span",
")",
"unassigned",
"=",
"set",
"(",
")",
"for",
"x",
"in",
"obj",
".",
"values",
"(",
")",
":",
"unassigned",
"|=",
"set",
"(",
"x",
")",
"obj",
"[",
"'na'",
"]",
"=",
"list",
"(",
"all_chars",
"-",
"unassigned",
")",
"for",
"name",
"in",
"list",
"(",
"obj",
".",
"keys",
"(",
")",
")",
":",
"s",
"=",
"set",
"(",
"obj",
"[",
"name",
"]",
")",
"obj",
"[",
"name",
"]",
"=",
"sorted",
"(",
"s",
")",
"char2range",
"(",
"obj",
",",
"is_bytes",
"=",
"ascii_props",
")",
"with",
"codecs",
".",
"open",
"(",
"output",
",",
"'a'",
"if",
"append",
"else",
"'w'",
",",
"'utf-8'",
")",
"as",
"f",
":",
"if",
"not",
"append",
":",
"f",
".",
"write",
"(",
"HEADER",
")",
"f",
".",
"write",
"(",
"'%s_age = {\\n'",
"%",
"prefix",
")",
"count",
"=",
"len",
"(",
"obj",
")",
"-",
"1",
"i",
"=",
"0",
"for",
"k1",
",",
"v1",
"in",
"sorted",
"(",
"obj",
".",
"items",
"(",
")",
")",
":",
"f",
".",
"write",
"(",
"' \"%s\": \"%s\"'",
"%",
"(",
"k1",
",",
"v1",
")",
")",
"if",
"i",
"==",
"count",
":",
"f",
".",
"write",
"(",
"'\\n}\\n'",
")",
"else",
":",
"f",
".",
"write",
"(",
"',\\n'",
")",
"i",
"+=",
"1"
] | python | Generate `age` property. | false |
2,085,590 | def __init__(self, source=None):
''' Constructor. If source specified, object will be initialized
with the contents of source. Otherwise the object will be empty.
@param source source for initialization
(file name of HEX file, file object, addr dict or
other IntelHex object)
'''
# public members
self.padding = 0x0FF
# Start Address
self.start_addr = None
# private members
self._buf = {}
self._offset = 0
if source is not None:
if isinstance(source, StrType) or getattr(source, "read", None):
# load hex file
self.loadhex(source)
elif isinstance(source, dict):
self.fromdict(source)
elif isinstance(source, IntelHex):
self.padding = source.padding
if source.start_addr:
self.start_addr = source.start_addr.copy()
self._buf = source._buf.copy()
else:
raise ValueError("source: bad initializer type") | [
"def",
"__init__",
"(",
"self",
",",
"source",
"=",
"None",
")",
":",
"self",
".",
"padding",
"=",
"0x0FF",
"self",
".",
"start_addr",
"=",
"None",
"self",
".",
"_buf",
"=",
"{",
"}",
"self",
".",
"_offset",
"=",
"0",
"if",
"source",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"source",
",",
"StrType",
")",
"or",
"getattr",
"(",
"source",
",",
"\"read\"",
",",
"None",
")",
":",
"self",
".",
"loadhex",
"(",
"source",
")",
"elif",
"isinstance",
"(",
"source",
",",
"dict",
")",
":",
"self",
".",
"fromdict",
"(",
"source",
")",
"elif",
"isinstance",
"(",
"source",
",",
"IntelHex",
")",
":",
"self",
".",
"padding",
"=",
"source",
".",
"padding",
"if",
"source",
".",
"start_addr",
":",
"self",
".",
"start_addr",
"=",
"source",
".",
"start_addr",
".",
"copy",
"(",
")",
"self",
".",
"_buf",
"=",
"source",
".",
"_buf",
".",
"copy",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"source: bad initializer type\"",
")"
] | python | Constructor. If source specified, object will be initialized
with the contents of source. Otherwise the object will be empty.
@param source source for initialization
(file name of HEX file, file object, addr dict or
other IntelHex object) | false |
2,272,589 | def aggregate(self, reducer, seed=default, result_selector=identity):
'''Apply a function over a sequence to produce a single result.
Apply a binary function cumulatively to the elements of the source
sequence so as to reduce the iterable to a single value.
Note: This method uses immediate execution.
Args:
reducer: A binary function the first positional argument of which
is an accumulated value and the second is the update value from
the source sequence. The return value should be the new
accumulated value after the update value has been incorporated.
seed: An optional value used to initialise the accumulator before
iteration over the source sequence. If seed is omitted the
and the source sequence contains only one item, then that item
is returned.
result_selector: An optional unary function applied to the final
accumulator value to produce the result. If omitted, defaults
to the identity function.
Raises:
ValueError: If called on an empty sequence with no seed value.
TypeError: If reducer is not callable.
TypeError: If result_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call aggregate() on a "
"closed Queryable.")
if not is_callable(reducer):
raise TypeError("aggregate() parameter reducer={0} is "
"not callable".format(repr(reducer)))
if not is_callable(result_selector):
raise TypeError("aggregate() parameter result_selector={0} is "
"not callable".format(repr(result_selector)))
if seed is default:
try:
return result_selector(fold(reducer, self))
except TypeError as e:
if 'empty sequence' in str(e):
raise ValueError("Cannot aggregate() empty sequence with "
"no seed value")
return result_selector(fold(reducer, self, seed)) | [
"def",
"aggregate",
"(",
"self",
",",
"reducer",
",",
"seed",
"=",
"default",
",",
"result_selector",
"=",
"identity",
")",
":",
"if",
"self",
".",
"closed",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Attempt to call aggregate() on a \"",
"\"closed Queryable.\"",
")",
"if",
"not",
"is_callable",
"(",
"reducer",
")",
":",
"raise",
"TypeError",
"(",
"\"aggregate() parameter reducer={0} is \"",
"\"not callable\"",
".",
"format",
"(",
"repr",
"(",
"reducer",
")",
")",
")",
"if",
"not",
"is_callable",
"(",
"result_selector",
")",
":",
"raise",
"TypeError",
"(",
"\"aggregate() parameter result_selector={0} is \"",
"\"not callable\"",
".",
"format",
"(",
"repr",
"(",
"result_selector",
")",
")",
")",
"if",
"seed",
"is",
"default",
":",
"try",
":",
"return",
"result_selector",
"(",
"fold",
"(",
"reducer",
",",
"self",
")",
")",
"except",
"TypeError",
"as",
"e",
":",
"if",
"'empty sequence'",
"in",
"str",
"(",
"e",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot aggregate() empty sequence with \"",
"\"no seed value\"",
")",
"return",
"result_selector",
"(",
"fold",
"(",
"reducer",
",",
"self",
",",
"seed",
")",
")"
] | python | Apply a function over a sequence to produce a single result.
Apply a binary function cumulatively to the elements of the source
sequence so as to reduce the iterable to a single value.
Note: This method uses immediate execution.
Args:
reducer: A binary function the first positional argument of which
is an accumulated value and the second is the update value from
the source sequence. The return value should be the new
accumulated value after the update value has been incorporated.
seed: An optional value used to initialise the accumulator before
iteration over the source sequence. If seed is omitted the
and the source sequence contains only one item, then that item
is returned.
result_selector: An optional unary function applied to the final
accumulator value to produce the result. If omitted, defaults
to the identity function.
Raises:
ValueError: If called on an empty sequence with no seed value.
TypeError: If reducer is not callable.
TypeError: If result_selector is not callable. | false |
2,642,194 | def get(self, part):
"""
Retrieves a part of the model from redis and stores it.
:param part: The part of the model to retrieve.
:raises RedisORMException: If the redis type is different from string
or list (the only two supported types at this time.)
"""
redis_key = ':'.join([self.namespace, self.key, part])
objectType = self.conn.type(redis_key)
if objectType == "string":
self._data[part] = self.conn.get(redis_key)
elif objectType == "list":
self._data[part] = RedisList(redis_key, self.conn)
else:
raise RedisORMException("Other types besides string and list are unsupported at this time.") | [
"def",
"get",
"(",
"self",
",",
"part",
")",
":",
"redis_key",
"=",
"':'",
".",
"join",
"(",
"[",
"self",
".",
"namespace",
",",
"self",
".",
"key",
",",
"part",
"]",
")",
"objectType",
"=",
"self",
".",
"conn",
".",
"type",
"(",
"redis_key",
")",
"if",
"objectType",
"==",
"\"string\"",
":",
"self",
".",
"_data",
"[",
"part",
"]",
"=",
"self",
".",
"conn",
".",
"get",
"(",
"redis_key",
")",
"elif",
"objectType",
"==",
"\"list\"",
":",
"self",
".",
"_data",
"[",
"part",
"]",
"=",
"RedisList",
"(",
"redis_key",
",",
"self",
".",
"conn",
")",
"else",
":",
"raise",
"RedisORMException",
"(",
"\"Other types besides string and list are unsupported at this time.\"",
")"
] | python | Retrieves a part of the model from redis and stores it.
:param part: The part of the model to retrieve.
:raises RedisORMException: If the redis type is different from string
or list (the only two supported types at this time.) | false |
2,441,378 | def wait(self, condition, interval, *args):
"""
:Description: Create an interval in vm.window, will clear interval after condition met.
:param condition: Condition in javascript to pass to interval.
:example: '$el.innerText == "cheesecake"'
:example: '$el[0].disabled && $el[1].disabled'
:type condition: string
:param interval: Time in milliseconds to execute interval.
:type interval: int or float
:param *args: WebElement or selector of condition element.
:type *args: tuple
:return: string
"""
hid = lambda: '$' + str(uuid.uuid1())[:8]
handle = hid()
if len(args):
element_handle = hid()
self.browser.execute_script(
'window["{}"] = [];'.format(element_handle)
) # create element container in window scope
for el in args:
if isinstance(el, string_types):
# assume selector
self.browser.execute_script('window["{}"].push({});'.format(
element_handle, 'function() { return document.querySelector("%s") }' % el))
else:
# assume web element
self.browser.execute_script(
'window["{}"].push(arguments[0]);'.format(element_handle), el)
if len(args) == 1:
condition = condition.replace('$el', 'window["{}"][0]{}'.format(
element_handle, '()' if isinstance(args[0], string_types) else ''))
else:
regex = r'(\$el\[([0-9]{0,3})\])'
results = re.findall(regex, condition) # [('$el[0]', '0'), ('$el[1]', '1'), ...]
for result in results:
pos = eval(result[1])
if pos + 1 <= len(args):
condition = condition.replace(result[0], 'window["{}"][{}]{}'.format(
element_handle, pos, '()' if isinstance(args[pos], string_types) else ''))
self.browser.execute_script(
'window["%s"]=window.setInterval(function(){if(%s){ \
(window.clearInterval(window["%s"])||true)&&(window["%s"]=-1); \
delete window["%s"];}}, %s)' % (handle, condition, handle, handle, \
element_handle, interval)) # create interval
else:
self.browser.execute_script(
'window["%s"]=window.setInterval(function(){if(%s){ \
(window.clearInterval(window["%s"])||true)&&(window["%s"]=-1);}}, %s)' % (
handle, condition, handle, handle, interval)) # create interval
return handle | [
"def",
"wait",
"(",
"self",
",",
"condition",
",",
"interval",
",",
"*",
"args",
")",
":",
"hid",
"=",
"lambda",
":",
"'$'",
"+",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
"[",
":",
"8",
"]",
"handle",
"=",
"hid",
"(",
")",
"if",
"len",
"(",
"args",
")",
":",
"element_handle",
"=",
"hid",
"(",
")",
"self",
".",
"browser",
".",
"execute_script",
"(",
"'window[\"{}\"] = [];'",
".",
"format",
"(",
"element_handle",
")",
")",
"for",
"el",
"in",
"args",
":",
"if",
"isinstance",
"(",
"el",
",",
"string_types",
")",
":",
"self",
".",
"browser",
".",
"execute_script",
"(",
"'window[\"{}\"].push({});'",
".",
"format",
"(",
"element_handle",
",",
"'function() { return document.querySelector(\"%s\") }'",
"%",
"el",
")",
")",
"else",
":",
"self",
".",
"browser",
".",
"execute_script",
"(",
"'window[\"{}\"].push(arguments[0]);'",
".",
"format",
"(",
"element_handle",
")",
",",
"el",
")",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"condition",
"=",
"condition",
".",
"replace",
"(",
"'$el'",
",",
"'window[\"{}\"][0]{}'",
".",
"format",
"(",
"element_handle",
",",
"'()'",
"if",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"string_types",
")",
"else",
"''",
")",
")",
"else",
":",
"regex",
"=",
"r'(\\$el\\[([0-9]{0,3})\\])'",
"results",
"=",
"re",
".",
"findall",
"(",
"regex",
",",
"condition",
")",
"for",
"result",
"in",
"results",
":",
"pos",
"=",
"eval",
"(",
"result",
"[",
"1",
"]",
")",
"if",
"pos",
"+",
"1",
"<=",
"len",
"(",
"args",
")",
":",
"condition",
"=",
"condition",
".",
"replace",
"(",
"result",
"[",
"0",
"]",
",",
"'window[\"{}\"][{}]{}'",
".",
"format",
"(",
"element_handle",
",",
"pos",
",",
"'()'",
"if",
"isinstance",
"(",
"args",
"[",
"pos",
"]",
",",
"string_types",
")",
"else",
"''",
")",
")",
"self",
".",
"browser",
".",
"execute_script",
"(",
"'window[\"%s\"]=window.setInterval(function(){if(%s){ \\\n (window.clearInterval(window[\"%s\"])||true)&&(window[\"%s\"]=-1); \\\n delete window[\"%s\"];}}, %s)'",
"%",
"(",
"handle",
",",
"condition",
",",
"handle",
",",
"handle",
",",
"element_handle",
",",
"interval",
")",
")",
"else",
":",
"self",
".",
"browser",
".",
"execute_script",
"(",
"'window[\"%s\"]=window.setInterval(function(){if(%s){ \\\n (window.clearInterval(window[\"%s\"])||true)&&(window[\"%s\"]=-1);}}, %s)'",
"%",
"(",
"handle",
",",
"condition",
",",
"handle",
",",
"handle",
",",
"interval",
")",
")",
"return",
"handle"
] | python | :Description: Create an interval in vm.window, will clear interval after condition met.
:param condition: Condition in javascript to pass to interval.
:example: '$el.innerText == "cheesecake"'
:example: '$el[0].disabled && $el[1].disabled'
:type condition: string
:param interval: Time in milliseconds to execute interval.
:type interval: int or float
:param *args: WebElement or selector of condition element.
:type *args: tuple
:return: string | false |
2,274,815 | def node(self, n):
"""Process each node."""
if n.id not in self.node_ids:
return
try:
self.nodes[n.id] =\
Node(n.id,
n.location.lon,
n.location.lat,
{t.k: t.v for t in n.tags})
except o.InvalidLocationError:
logging.debug('InvalidLocationError at node %s', n.id) | [
"def",
"node",
"(",
"self",
",",
"n",
")",
":",
"if",
"n",
".",
"id",
"not",
"in",
"self",
".",
"node_ids",
":",
"return",
"try",
":",
"self",
".",
"nodes",
"[",
"n",
".",
"id",
"]",
"=",
"Node",
"(",
"n",
".",
"id",
",",
"n",
".",
"location",
".",
"lon",
",",
"n",
".",
"location",
".",
"lat",
",",
"{",
"t",
".",
"k",
":",
"t",
".",
"v",
"for",
"t",
"in",
"n",
".",
"tags",
"}",
")",
"except",
"o",
".",
"InvalidLocationError",
":",
"logging",
".",
"debug",
"(",
"'InvalidLocationError at node %s'",
",",
"n",
".",
"id",
")"
] | python | Process each node. | false |
1,986,245 | def append(self, node):
"Append a new subnode"
if not isinstance(node, self.__class__):
raise TypeError('Expected Node instance, got %r' % node)
self.nodes.append(node) | [
"def",
"append",
"(",
"self",
",",
"node",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
",",
"self",
".",
"__class__",
")",
":",
"raise",
"TypeError",
"(",
"'Expected Node instance, got %r'",
"%",
"node",
")",
"self",
".",
"nodes",
".",
"append",
"(",
"node",
")"
] | python | Append a new subnode | false |
1,814,935 | def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
# open the log file
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps)
data = []
mag = None
offsets = Vector3(0,0,0)
# now gather all the data
while True:
# get the next MAVLink message in the log
m = mlog.recv_match(condition=args.condition)
if m is None:
break
if m.get_type() == "SENSOR_OFFSETS":
# update offsets that were used during this flight
offsets = Vector3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z)
if m.get_type() == "RAW_IMU" and offsets != None:
# extract one mag vector, removing the offsets that were
# used during that flight to get the raw sensor values
mag = Vector3(m.xmag, m.ymag, m.zmag) - offsets
data.append(mag)
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % offsets)
# run the fitting algorithm
ofs = offsets
ofs = Vector3(0,0,0)
for r in range(args.repeat):
ofs = find_offsets(data, ofs)
print('Loop %u offsets %s' % (r, ofs))
sys.stdout.flush()
print("New offsets: %s" % ofs) | [
"def",
"magfit",
"(",
"logfile",
")",
":",
"print",
"(",
"\"Processing log %s\"",
"%",
"filename",
")",
"mlog",
"=",
"mavutil",
".",
"mavlink_connection",
"(",
"filename",
",",
"notimestamps",
"=",
"args",
".",
"notimestamps",
")",
"data",
"=",
"[",
"]",
"mag",
"=",
"None",
"offsets",
"=",
"Vector3",
"(",
"0",
",",
"0",
",",
"0",
")",
"while",
"True",
":",
"m",
"=",
"mlog",
".",
"recv_match",
"(",
"condition",
"=",
"args",
".",
"condition",
")",
"if",
"m",
"is",
"None",
":",
"break",
"if",
"m",
".",
"get_type",
"(",
")",
"==",
"\"SENSOR_OFFSETS\"",
":",
"offsets",
"=",
"Vector3",
"(",
"m",
".",
"mag_ofs_x",
",",
"m",
".",
"mag_ofs_y",
",",
"m",
".",
"mag_ofs_z",
")",
"if",
"m",
".",
"get_type",
"(",
")",
"==",
"\"RAW_IMU\"",
"and",
"offsets",
"!=",
"None",
":",
"mag",
"=",
"Vector3",
"(",
"m",
".",
"xmag",
",",
"m",
".",
"ymag",
",",
"m",
".",
"zmag",
")",
"-",
"offsets",
"data",
".",
"append",
"(",
"mag",
")",
"print",
"(",
"\"Extracted %u data points\"",
"%",
"len",
"(",
"data",
")",
")",
"print",
"(",
"\"Current offsets: %s\"",
"%",
"offsets",
")",
"ofs",
"=",
"offsets",
"ofs",
"=",
"Vector3",
"(",
"0",
",",
"0",
",",
"0",
")",
"for",
"r",
"in",
"range",
"(",
"args",
".",
"repeat",
")",
":",
"ofs",
"=",
"find_offsets",
"(",
"data",
",",
"ofs",
")",
"print",
"(",
"'Loop %u offsets %s'",
"%",
"(",
"r",
",",
"ofs",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"print",
"(",
"\"New offsets: %s\"",
"%",
"ofs",
")"
] | python | find best magnetometer offset fit to a log file | false |
2,474,941 | def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk) | [
"def",
"split_into",
"(",
"max_num_chunks",
",",
"list_to_chunk",
")",
":",
"max_chunk_size",
"=",
"math",
".",
"ceil",
"(",
"len",
"(",
"list_to_chunk",
")",
"/",
"max_num_chunks",
")",
"return",
"chunks_of",
"(",
"max_chunk_size",
",",
"list_to_chunk",
")"
] | python | Yields the list with a max total size of max_num_chunks | false |
2,169,772 | def fetchChildren(self):
""" Creates child items and returns them.
Opens the tree item first if it's not yet open.
"""
assert self._canFetchChildren, "canFetchChildren must be True"
try:
self.clearException()
if not self.isOpen:
self.open() # Will set self._exception in case of failure
if not self.isOpen:
logger.warn("Opening item failed during fetch (aborted)")
return [] # no need to continue if opening failed.
childItems = []
try:
childItems = self._fetchAllChildren()
assert is_a_sequence(childItems), "ChildItems must be a sequence"
except Exception as ex:
# This can happen, for example, when a NCDF/HDF5 file contains data types that
# are not supported by the Python library that is used to read them.
if DEBUGGING:
raise
logger.error("Unable fetch tree item children: {}".format(ex))
self.setException(ex)
return childItems
finally:
self._canFetchChildren = False | [
"def",
"fetchChildren",
"(",
"self",
")",
":",
"assert",
"self",
".",
"_canFetchChildren",
",",
"\"canFetchChildren must be True\"",
"try",
":",
"self",
".",
"clearException",
"(",
")",
"if",
"not",
"self",
".",
"isOpen",
":",
"self",
".",
"open",
"(",
")",
"if",
"not",
"self",
".",
"isOpen",
":",
"logger",
".",
"warn",
"(",
"\"Opening item failed during fetch (aborted)\"",
")",
"return",
"[",
"]",
"childItems",
"=",
"[",
"]",
"try",
":",
"childItems",
"=",
"self",
".",
"_fetchAllChildren",
"(",
")",
"assert",
"is_a_sequence",
"(",
"childItems",
")",
",",
"\"ChildItems must be a sequence\"",
"except",
"Exception",
"as",
"ex",
":",
"if",
"DEBUGGING",
":",
"raise",
"logger",
".",
"error",
"(",
"\"Unable fetch tree item children: {}\"",
".",
"format",
"(",
"ex",
")",
")",
"self",
".",
"setException",
"(",
"ex",
")",
"return",
"childItems",
"finally",
":",
"self",
".",
"_canFetchChildren",
"=",
"False"
] | python | Creates child items and returns them.
Opens the tree item first if it's not yet open. | false |
2,013,995 | def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u('<a href="%s"%s>%s</a>') % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text) | [
"def",
"linkify",
"(",
"text",
",",
"shorten",
"=",
"False",
",",
"extra_params",
"=",
"\"\"",
",",
"require_protocol",
"=",
"False",
",",
"permitted_protocols",
"=",
"[",
"\"http\"",
",",
"\"https\"",
"]",
")",
":",
"if",
"extra_params",
"and",
"not",
"callable",
"(",
"extra_params",
")",
":",
"extra_params",
"=",
"\" \"",
"+",
"extra_params",
".",
"strip",
"(",
")",
"def",
"make_link",
"(",
"m",
")",
":",
"url",
"=",
"m",
".",
"group",
"(",
"1",
")",
"proto",
"=",
"m",
".",
"group",
"(",
"2",
")",
"if",
"require_protocol",
"and",
"not",
"proto",
":",
"return",
"url",
"if",
"proto",
"and",
"proto",
"not",
"in",
"permitted_protocols",
":",
"return",
"url",
"href",
"=",
"m",
".",
"group",
"(",
"1",
")",
"if",
"not",
"proto",
":",
"href",
"=",
"\"http://\"",
"+",
"href",
"if",
"callable",
"(",
"extra_params",
")",
":",
"params",
"=",
"\" \"",
"+",
"extra_params",
"(",
"href",
")",
".",
"strip",
"(",
")",
"else",
":",
"params",
"=",
"extra_params",
"max_len",
"=",
"30",
"if",
"shorten",
"and",
"len",
"(",
"url",
")",
">",
"max_len",
":",
"before_clip",
"=",
"url",
"if",
"proto",
":",
"proto_len",
"=",
"len",
"(",
"proto",
")",
"+",
"1",
"+",
"len",
"(",
"m",
".",
"group",
"(",
"3",
")",
"or",
"\"\"",
")",
"else",
":",
"proto_len",
"=",
"0",
"parts",
"=",
"url",
"[",
"proto_len",
":",
"]",
".",
"split",
"(",
"\"/\"",
")",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"url",
"=",
"url",
"[",
":",
"proto_len",
"]",
"+",
"parts",
"[",
"0",
"]",
"+",
"\"/\"",
"+",
"parts",
"[",
"1",
"]",
"[",
":",
"8",
"]",
".",
"split",
"(",
"'?'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"url",
")",
">",
"max_len",
"*",
"1.5",
":",
"url",
"=",
"url",
"[",
":",
"max_len",
"]",
"if",
"url",
"!=",
"before_clip",
":",
"amp",
"=",
"url",
".",
"rfind",
"(",
"'&'",
")",
"if",
"amp",
">",
"max_len",
"-",
"5",
":",
"url",
"=",
"url",
"[",
":",
"amp",
"]",
"url",
"+=",
"\"...\"",
"if",
"len",
"(",
"url",
")",
">=",
"len",
"(",
"before_clip",
")",
":",
"url",
"=",
"before_clip",
"else",
":",
"params",
"+=",
"' title=\"%s\"'",
"%",
"href",
"return",
"u",
"(",
"'<a href=\"%s\"%s>%s</a>'",
")",
"%",
"(",
"href",
",",
"params",
",",
"url",
")",
"text",
"=",
"_unicode",
"(",
"xhtml_escape",
"(",
"text",
")",
")",
"return",
"_URL_RE",
".",
"sub",
"(",
"make_link",
",",
"text",
")"
] | python | Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``. | false |
1,935,424 | def read(self, count=None, block=None, last_id=None):
"""
Monitor the stream for new messages within the context of the parent
:py:class:`ConsumerGroup`.
:param int count: limit number of messages returned
:param int block: milliseconds to block, 0 for indefinitely.
:param str last_id: optional last ID, by default uses the special
token ">", which reads the oldest unread message.
:returns: a list of (message id, data) 2-tuples.
"""
key = {self.key: '>' if last_id is None else last_id}
resp = self.database.xreadgroup(self.group, self._consumer, key, count,
block)
return resp[0][1] if resp else [] | [
"def",
"read",
"(",
"self",
",",
"count",
"=",
"None",
",",
"block",
"=",
"None",
",",
"last_id",
"=",
"None",
")",
":",
"key",
"=",
"{",
"self",
".",
"key",
":",
"'>'",
"if",
"last_id",
"is",
"None",
"else",
"last_id",
"}",
"resp",
"=",
"self",
".",
"database",
".",
"xreadgroup",
"(",
"self",
".",
"group",
",",
"self",
".",
"_consumer",
",",
"key",
",",
"count",
",",
"block",
")",
"return",
"resp",
"[",
"0",
"]",
"[",
"1",
"]",
"if",
"resp",
"else",
"[",
"]"
] | python | Monitor the stream for new messages within the context of the parent
:py:class:`ConsumerGroup`.
:param int count: limit number of messages returned
:param int block: milliseconds to block, 0 for indefinitely.
:param str last_id: optional last ID, by default uses the special
token ">", which reads the oldest unread message.
:returns: a list of (message id, data) 2-tuples. | false |
2,501,785 | def score_candidates(nodes):
"""Given a list of potential nodes, find some initial scores to start"""
MIN_HIT_LENTH = 25
candidates = {}
for node in nodes:
logger.debug("* Scoring candidate %s %r", node.tag, node.attrib)
# if the node has no parent it knows of then it ends up creating a
# body & html tag to parent the html fragment
parent = node.getparent()
if parent is None:
logger.debug("Skipping candidate - parent node is 'None'.")
continue
grand = parent.getparent()
if grand is None:
logger.debug("Skipping candidate - grand parent node is 'None'.")
continue
# if paragraph is < `MIN_HIT_LENTH` characters don't even count it
inner_text = node.text_content().strip()
if len(inner_text) < MIN_HIT_LENTH:
logger.debug(
"Skipping candidate - inner text < %d characters.",
MIN_HIT_LENTH)
continue
# initialize readability data for the parent
# add parent node if it isn't in the candidate list
if parent not in candidates:
candidates[parent] = ScoredNode(parent)
if grand not in candidates:
candidates[grand] = ScoredNode(grand)
# add a point for the paragraph itself as a base
content_score = 1
if inner_text:
# add 0.25 points for any commas within this paragraph
commas_count = inner_text.count(",")
content_score += commas_count * 0.25
logger.debug("Bonus points for %d commas.", commas_count)
# subtract 0.5 points for each double quote within this paragraph
double_quotes_count = inner_text.count('"')
content_score += double_quotes_count * -0.5
logger.debug(
"Penalty points for %d double-quotes.", double_quotes_count)
# for every 100 characters in this paragraph, add another point
# up to 3 points
length_points = len(inner_text) / 100
content_score += min(length_points, 3.0)
logger.debug("Bonus points for length of text: %f", length_points)
# add the score to the parent
logger.debug(
"Bonus points for parent %s %r with score %f: %f",
parent.tag, parent.attrib, candidates[parent].content_score,
content_score)
candidates[parent].content_score += content_score
# the grand node gets half
logger.debug(
"Bonus points for grand %s %r with score %f: %f",
grand.tag, grand.attrib, candidates[grand].content_score,
content_score / 2.0)
candidates[grand].content_score += content_score / 2.0
if node not in candidates:
candidates[node] = ScoredNode(node)
candidates[node].content_score += content_score
for candidate in candidates.values():
adjustment = 1.0 - get_link_density(candidate.node)
candidate.content_score *= adjustment
logger.debug(
"Link density adjustment for %s %r: %f",
candidate.node.tag, candidate.node.attrib, adjustment)
return candidates | [
"def",
"score_candidates",
"(",
"nodes",
")",
":",
"MIN_HIT_LENTH",
"=",
"25",
"candidates",
"=",
"{",
"}",
"for",
"node",
"in",
"nodes",
":",
"logger",
".",
"debug",
"(",
"\"* Scoring candidate %s %r\"",
",",
"node",
".",
"tag",
",",
"node",
".",
"attrib",
")",
"parent",
"=",
"node",
".",
"getparent",
"(",
")",
"if",
"parent",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Skipping candidate - parent node is 'None'.\"",
")",
"continue",
"grand",
"=",
"parent",
".",
"getparent",
"(",
")",
"if",
"grand",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Skipping candidate - grand parent node is 'None'.\"",
")",
"continue",
"inner_text",
"=",
"node",
".",
"text_content",
"(",
")",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"inner_text",
")",
"<",
"MIN_HIT_LENTH",
":",
"logger",
".",
"debug",
"(",
"\"Skipping candidate - inner text < %d characters.\"",
",",
"MIN_HIT_LENTH",
")",
"continue",
"if",
"parent",
"not",
"in",
"candidates",
":",
"candidates",
"[",
"parent",
"]",
"=",
"ScoredNode",
"(",
"parent",
")",
"if",
"grand",
"not",
"in",
"candidates",
":",
"candidates",
"[",
"grand",
"]",
"=",
"ScoredNode",
"(",
"grand",
")",
"content_score",
"=",
"1",
"if",
"inner_text",
":",
"commas_count",
"=",
"inner_text",
".",
"count",
"(",
"\",\"",
")",
"content_score",
"+=",
"commas_count",
"*",
"0.25",
"logger",
".",
"debug",
"(",
"\"Bonus points for %d commas.\"",
",",
"commas_count",
")",
"double_quotes_count",
"=",
"inner_text",
".",
"count",
"(",
"'\"'",
")",
"content_score",
"+=",
"double_quotes_count",
"*",
"-",
"0.5",
"logger",
".",
"debug",
"(",
"\"Penalty points for %d double-quotes.\"",
",",
"double_quotes_count",
")",
"length_points",
"=",
"len",
"(",
"inner_text",
")",
"/",
"100",
"content_score",
"+=",
"min",
"(",
"length_points",
",",
"3.0",
")",
"logger",
".",
"debug",
"(",
"\"Bonus points for length of text: %f\"",
",",
"length_points",
")",
"logger",
".",
"debug",
"(",
"\"Bonus points for parent %s %r with score %f: %f\"",
",",
"parent",
".",
"tag",
",",
"parent",
".",
"attrib",
",",
"candidates",
"[",
"parent",
"]",
".",
"content_score",
",",
"content_score",
")",
"candidates",
"[",
"parent",
"]",
".",
"content_score",
"+=",
"content_score",
"logger",
".",
"debug",
"(",
"\"Bonus points for grand %s %r with score %f: %f\"",
",",
"grand",
".",
"tag",
",",
"grand",
".",
"attrib",
",",
"candidates",
"[",
"grand",
"]",
".",
"content_score",
",",
"content_score",
"/",
"2.0",
")",
"candidates",
"[",
"grand",
"]",
".",
"content_score",
"+=",
"content_score",
"/",
"2.0",
"if",
"node",
"not",
"in",
"candidates",
":",
"candidates",
"[",
"node",
"]",
"=",
"ScoredNode",
"(",
"node",
")",
"candidates",
"[",
"node",
"]",
".",
"content_score",
"+=",
"content_score",
"for",
"candidate",
"in",
"candidates",
".",
"values",
"(",
")",
":",
"adjustment",
"=",
"1.0",
"-",
"get_link_density",
"(",
"candidate",
".",
"node",
")",
"candidate",
".",
"content_score",
"*=",
"adjustment",
"logger",
".",
"debug",
"(",
"\"Link density adjustment for %s %r: %f\"",
",",
"candidate",
".",
"node",
".",
"tag",
",",
"candidate",
".",
"node",
".",
"attrib",
",",
"adjustment",
")",
"return",
"candidates"
] | python | Given a list of potential nodes, find some initial scores to start | false |
2,479,782 | def encode_coin_link(copper, silver=0, gold=0):
"""Encode a chat link for an amount of coins.
"""
return encode_chat_link(gw2api.TYPE_COIN, copper=copper, silver=silver,
gold=gold) | [
"def",
"encode_coin_link",
"(",
"copper",
",",
"silver",
"=",
"0",
",",
"gold",
"=",
"0",
")",
":",
"return",
"encode_chat_link",
"(",
"gw2api",
".",
"TYPE_COIN",
",",
"copper",
"=",
"copper",
",",
"silver",
"=",
"silver",
",",
"gold",
"=",
"gold",
")"
] | python | Encode a chat link for an amount of coins. | false |
2,376,463 | def call(self, itemMethod):
"""
Invoke the given bound item method in the batch process.
Return a Deferred which fires when the method has been invoked.
"""
item = itemMethod.im_self
method = itemMethod.im_func.func_name
return self.batchController.getProcess().addCallback(
CallItemMethod(storepath=item.store.dbdir,
storeid=item.storeID,
method=method).do) | [
"def",
"call",
"(",
"self",
",",
"itemMethod",
")",
":",
"item",
"=",
"itemMethod",
".",
"im_self",
"method",
"=",
"itemMethod",
".",
"im_func",
".",
"func_name",
"return",
"self",
".",
"batchController",
".",
"getProcess",
"(",
")",
".",
"addCallback",
"(",
"CallItemMethod",
"(",
"storepath",
"=",
"item",
".",
"store",
".",
"dbdir",
",",
"storeid",
"=",
"item",
".",
"storeID",
",",
"method",
"=",
"method",
")",
".",
"do",
")"
] | python | Invoke the given bound item method in the batch process.
Return a Deferred which fires when the method has been invoked. | false |
2,587,870 | def do_scan(self, line):
"""
scan [:tablename] [--batch=#] [-{max}] [+filter_attribute=filter_value] [attributes,...]
filter_attribute is either the field name to filter on or a field name with a conditional, as specified in boto's documentation,
in the form of {name}__{conditional} where conditional is:
eq (equal value)
ne {value} (not equal value)
lte (less or equal then value)
lt (less then value)
gte (greater or equal then value)
gt (greater then value)
null (value is null / does not exists - pass true/false)
contains (contains value)
ncontains (does not contains value)
beginswith (attribute begins with value)
in (value in range)
between (between value1 and value2 - use: between=value1,value2)
"""
table, line = self.get_table_params(line)
args = self.getargs(line)
scan_filter = {}
#count = False
as_array = False
max_size = None
batch_size = None
start = None
cond = None
while args:
if args[0].startswith('+'):
arg = args.pop(0)
filter_name, filter_value = arg[1:].split('=', 1)
if "__" not in filter_name:
filter_name += "__eq"
if filter_name.endswith("__null"):
scan_filter[filter_name] = filter_value == "true"
else:
scan_filter[filter_name] = self.get_typed_value(filter_name, filter_value)
elif args[0].startswith('--batch='):
arg = args.pop(0)
batch_size = int(arg[8:])
elif args[0].startswith('--max='):
arg = args.pop(0)
max_size = int(arg[6:])
elif args[0].startswith('--start='):
arg = args.pop(0)
start = (arg[8:], )
elif args[0] == "--and":
args.pop(0)
cond = "AND"
elif args[0] == "--or":
args.pop(0)
cond = "OR"
elif args[0] == '--next':
arg = args.pop(0)
if self.next_key:
start = self.next_key
else:
print "no next"
return
elif args[0] == '-a' or args[0] == '--array':
as_array = True
args.pop(0)
elif args[0].startswith('-'):
arg = args.pop(0)
#if arg == '-c' or arg == '--count':
# count = True
if arg[0] == '-' and arg[1:].isdigit():
max_size = int(arg[1:])
elif arg == '--':
break
else:
print "invalid argument: %s" % arg
break
else:
break
attr_keys = args[0].split(",") if args else None
attrs = list(set(attr_keys)) if attr_keys else None
result = table.scan(limit=max_size, max_page_size=batch_size, attributes=attrs, conditional_operator=cond, exclusive_start_key=start, **scan_filter)
#
# enable this if you want to see when pages are fetched
#
if False:
_fetch_more = result.fetch_more
def fetch_more():
print "==== fetch page ===="
_fetch_more()
result.fetch_more = fetch_more
if False: # count:
print "count: %s/%s" % (result.scanned_count, result.count)
self.next_key = None
else:
if as_array and attr_keys:
self.print_iterator_array(result, attr_keys)
else:
self.print_iterator(result)
self.next_key = result._last_key_seen
if self.consumed:
print "consumed units:", result.consumed_units | [
"def",
"do_scan",
"(",
"self",
",",
"line",
")",
":",
"table",
",",
"line",
"=",
"self",
".",
"get_table_params",
"(",
"line",
")",
"args",
"=",
"self",
".",
"getargs",
"(",
"line",
")",
"scan_filter",
"=",
"{",
"}",
"as_array",
"=",
"False",
"max_size",
"=",
"None",
"batch_size",
"=",
"None",
"start",
"=",
"None",
"cond",
"=",
"None",
"while",
"args",
":",
"if",
"args",
"[",
"0",
"]",
".",
"startswith",
"(",
"'+'",
")",
":",
"arg",
"=",
"args",
".",
"pop",
"(",
"0",
")",
"filter_name",
",",
"filter_value",
"=",
"arg",
"[",
"1",
":",
"]",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"\"__\"",
"not",
"in",
"filter_name",
":",
"filter_name",
"+=",
"\"__eq\"",
"if",
"filter_name",
".",
"endswith",
"(",
"\"__null\"",
")",
":",
"scan_filter",
"[",
"filter_name",
"]",
"=",
"filter_value",
"==",
"\"true\"",
"else",
":",
"scan_filter",
"[",
"filter_name",
"]",
"=",
"self",
".",
"get_typed_value",
"(",
"filter_name",
",",
"filter_value",
")",
"elif",
"args",
"[",
"0",
"]",
".",
"startswith",
"(",
"'--batch='",
")",
":",
"arg",
"=",
"args",
".",
"pop",
"(",
"0",
")",
"batch_size",
"=",
"int",
"(",
"arg",
"[",
"8",
":",
"]",
")",
"elif",
"args",
"[",
"0",
"]",
".",
"startswith",
"(",
"'--max='",
")",
":",
"arg",
"=",
"args",
".",
"pop",
"(",
"0",
")",
"max_size",
"=",
"int",
"(",
"arg",
"[",
"6",
":",
"]",
")",
"elif",
"args",
"[",
"0",
"]",
".",
"startswith",
"(",
"'--start='",
")",
":",
"arg",
"=",
"args",
".",
"pop",
"(",
"0",
")",
"start",
"=",
"(",
"arg",
"[",
"8",
":",
"]",
",",
")",
"elif",
"args",
"[",
"0",
"]",
"==",
"\"--and\"",
":",
"args",
".",
"pop",
"(",
"0",
")",
"cond",
"=",
"\"AND\"",
"elif",
"args",
"[",
"0",
"]",
"==",
"\"--or\"",
":",
"args",
".",
"pop",
"(",
"0",
")",
"cond",
"=",
"\"OR\"",
"elif",
"args",
"[",
"0",
"]",
"==",
"'--next'",
":",
"arg",
"=",
"args",
".",
"pop",
"(",
"0",
")",
"if",
"self",
".",
"next_key",
":",
"start",
"=",
"self",
".",
"next_key",
"else",
":",
"print",
"\"no next\"",
"return",
"elif",
"args",
"[",
"0",
"]",
"==",
"'-a'",
"or",
"args",
"[",
"0",
"]",
"==",
"'--array'",
":",
"as_array",
"=",
"True",
"args",
".",
"pop",
"(",
"0",
")",
"elif",
"args",
"[",
"0",
"]",
".",
"startswith",
"(",
"'-'",
")",
":",
"arg",
"=",
"args",
".",
"pop",
"(",
"0",
")",
"if",
"arg",
"[",
"0",
"]",
"==",
"'-'",
"and",
"arg",
"[",
"1",
":",
"]",
".",
"isdigit",
"(",
")",
":",
"max_size",
"=",
"int",
"(",
"arg",
"[",
"1",
":",
"]",
")",
"elif",
"arg",
"==",
"'--'",
":",
"break",
"else",
":",
"print",
"\"invalid argument: %s\"",
"%",
"arg",
"break",
"else",
":",
"break",
"attr_keys",
"=",
"args",
"[",
"0",
"]",
".",
"split",
"(",
"\",\"",
")",
"if",
"args",
"else",
"None",
"attrs",
"=",
"list",
"(",
"set",
"(",
"attr_keys",
")",
")",
"if",
"attr_keys",
"else",
"None",
"result",
"=",
"table",
".",
"scan",
"(",
"limit",
"=",
"max_size",
",",
"max_page_size",
"=",
"batch_size",
",",
"attributes",
"=",
"attrs",
",",
"conditional_operator",
"=",
"cond",
",",
"exclusive_start_key",
"=",
"start",
",",
"**",
"scan_filter",
")",
"if",
"False",
":",
"_fetch_more",
"=",
"result",
".",
"fetch_more",
"def",
"fetch_more",
"(",
")",
":",
"print",
"\"==== fetch page ====\"",
"_fetch_more",
"(",
")",
"result",
".",
"fetch_more",
"=",
"fetch_more",
"if",
"False",
":",
"print",
"\"count: %s/%s\"",
"%",
"(",
"result",
".",
"scanned_count",
",",
"result",
".",
"count",
")",
"self",
".",
"next_key",
"=",
"None",
"else",
":",
"if",
"as_array",
"and",
"attr_keys",
":",
"self",
".",
"print_iterator_array",
"(",
"result",
",",
"attr_keys",
")",
"else",
":",
"self",
".",
"print_iterator",
"(",
"result",
")",
"self",
".",
"next_key",
"=",
"result",
".",
"_last_key_seen",
"if",
"self",
".",
"consumed",
":",
"print",
"\"consumed units:\"",
",",
"result",
".",
"consumed_units"
] | python | scan [:tablename] [--batch=#] [-{max}] [+filter_attribute=filter_value] [attributes,...]
filter_attribute is either the field name to filter on or a field name with a conditional, as specified in boto's documentation,
in the form of {name}__{conditional} where conditional is:
eq (equal value)
ne {value} (not equal value)
lte (less or equal then value)
lt (less then value)
gte (greater or equal then value)
gt (greater then value)
null (value is null / does not exists - pass true/false)
contains (contains value)
ncontains (does not contains value)
beginswith (attribute begins with value)
in (value in range)
between (between value1 and value2 - use: between=value1,value2) | false |
2,385,278 | def machineCurrents(self, Xg, U):
""" Based on MachineCurrents.m from MatDyn by Stijn Cole, developed at
Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/
electa/teaching/matdyn/} for more information.
@param Xg: Generator state variables.
@param U: Generator voltages.
@rtype: tuple
@return: Currents and electric power of generators.
"""
generators = self.dyn_generators
# Initialise.
ng = len(generators)
Id = zeros(ng)
Iq = zeros(ng)
Pe = zeros(ng)
typ1 = [g._i for g in generators if g.model == CLASSICAL]
typ2 = [g._i for g in generators if g.model == FOURTH_ORDER]
# Generator type 1: classical model
delta = Xg[typ1, 0]
Eq_tr = Xg[typ1, 2]
xd = array([g.xd for g in generators])
Pe[typ1] = \
1 / xd * abs(U[typ1]) * abs(Eq_tr) * sin(delta - angle(U[typ1]))
# Generator type 2: 4th order model
delta = Xg[typ1, 0]
Eq_tr = Xg[typ1, 2]
Ed_tr = Xg[typ1, 3]
xd_tr = array([g.xd_tr for g in generators])
xq_tr = array([g.xq_tr for g in generators])
theta = angle(U)
# Transform U to rotor frame of reference.
vd = -abs(U[typ2]) * sin(delta - theta[typ2])
vq = abs(U[typ2]) * cos(delta - theta[typ2])
Id[typ2] = (vq - Eq_tr) / xd_tr
Iq[typ2] = -(vd - Ed_tr) / xq_tr
Pe[typ2] = \
Eq_tr * Iq[typ2] + Ed_tr * Id[typ2] + \
(xd_tr - xq_tr) * Id[typ2] * Iq[typ2]
return Id, Iq, Pe | [
"def",
"machineCurrents",
"(",
"self",
",",
"Xg",
",",
"U",
")",
":",
"generators",
"=",
"self",
".",
"dyn_generators",
"ng",
"=",
"len",
"(",
"generators",
")",
"Id",
"=",
"zeros",
"(",
"ng",
")",
"Iq",
"=",
"zeros",
"(",
"ng",
")",
"Pe",
"=",
"zeros",
"(",
"ng",
")",
"typ1",
"=",
"[",
"g",
".",
"_i",
"for",
"g",
"in",
"generators",
"if",
"g",
".",
"model",
"==",
"CLASSICAL",
"]",
"typ2",
"=",
"[",
"g",
".",
"_i",
"for",
"g",
"in",
"generators",
"if",
"g",
".",
"model",
"==",
"FOURTH_ORDER",
"]",
"delta",
"=",
"Xg",
"[",
"typ1",
",",
"0",
"]",
"Eq_tr",
"=",
"Xg",
"[",
"typ1",
",",
"2",
"]",
"xd",
"=",
"array",
"(",
"[",
"g",
".",
"xd",
"for",
"g",
"in",
"generators",
"]",
")",
"Pe",
"[",
"typ1",
"]",
"=",
"1",
"/",
"xd",
"*",
"abs",
"(",
"U",
"[",
"typ1",
"]",
")",
"*",
"abs",
"(",
"Eq_tr",
")",
"*",
"sin",
"(",
"delta",
"-",
"angle",
"(",
"U",
"[",
"typ1",
"]",
")",
")",
"delta",
"=",
"Xg",
"[",
"typ1",
",",
"0",
"]",
"Eq_tr",
"=",
"Xg",
"[",
"typ1",
",",
"2",
"]",
"Ed_tr",
"=",
"Xg",
"[",
"typ1",
",",
"3",
"]",
"xd_tr",
"=",
"array",
"(",
"[",
"g",
".",
"xd_tr",
"for",
"g",
"in",
"generators",
"]",
")",
"xq_tr",
"=",
"array",
"(",
"[",
"g",
".",
"xq_tr",
"for",
"g",
"in",
"generators",
"]",
")",
"theta",
"=",
"angle",
"(",
"U",
")",
"vd",
"=",
"-",
"abs",
"(",
"U",
"[",
"typ2",
"]",
")",
"*",
"sin",
"(",
"delta",
"-",
"theta",
"[",
"typ2",
"]",
")",
"vq",
"=",
"abs",
"(",
"U",
"[",
"typ2",
"]",
")",
"*",
"cos",
"(",
"delta",
"-",
"theta",
"[",
"typ2",
"]",
")",
"Id",
"[",
"typ2",
"]",
"=",
"(",
"vq",
"-",
"Eq_tr",
")",
"/",
"xd_tr",
"Iq",
"[",
"typ2",
"]",
"=",
"-",
"(",
"vd",
"-",
"Ed_tr",
")",
"/",
"xq_tr",
"Pe",
"[",
"typ2",
"]",
"=",
"Eq_tr",
"*",
"Iq",
"[",
"typ2",
"]",
"+",
"Ed_tr",
"*",
"Id",
"[",
"typ2",
"]",
"+",
"(",
"xd_tr",
"-",
"xq_tr",
")",
"*",
"Id",
"[",
"typ2",
"]",
"*",
"Iq",
"[",
"typ2",
"]",
"return",
"Id",
",",
"Iq",
",",
"Pe"
] | python | Based on MachineCurrents.m from MatDyn by Stijn Cole, developed at
Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/
electa/teaching/matdyn/} for more information.
@param Xg: Generator state variables.
@param U: Generator voltages.
@rtype: tuple
@return: Currents and electric power of generators. | false |
2,556,753 | def _assembled_out_file_name(self):
"""Checks file name is set for assembled output.
Returns absolute path."""
if self.Parameters['-s'].isOn():
assembled_reads = self._absolute(str(self.Parameters['-s'].Value))
else:
raise ValueError(
"No assembled-reads (flag -s) output path specified")
return assembled_reads | [
"def",
"_assembled_out_file_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"Parameters",
"[",
"'-s'",
"]",
".",
"isOn",
"(",
")",
":",
"assembled_reads",
"=",
"self",
".",
"_absolute",
"(",
"str",
"(",
"self",
".",
"Parameters",
"[",
"'-s'",
"]",
".",
"Value",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"No assembled-reads (flag -s) output path specified\"",
")",
"return",
"assembled_reads"
] | python | Checks file name is set for assembled output.
Returns absolute path. | false |
1,581,034 | def get_longest_non_repeat_v2(string):
"""
Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
Return max_len and the substring as a tuple
"""
if string is None:
return 0, ''
sub_string = ''
start, max_len = 0, 0
used_char = {}
for index, char in enumerate(string):
if char in used_char and start <= used_char[char]:
start = used_char[char] + 1
else:
if index - start + 1 > max_len:
max_len = index - start + 1
sub_string = string[start: index + 1]
used_char[char] = index
return max_len, sub_string | [
"def",
"get_longest_non_repeat_v2",
"(",
"string",
")",
":",
"if",
"string",
"is",
"None",
":",
"return",
"0",
",",
"''",
"sub_string",
"=",
"''",
"start",
",",
"max_len",
"=",
"0",
",",
"0",
"used_char",
"=",
"{",
"}",
"for",
"index",
",",
"char",
"in",
"enumerate",
"(",
"string",
")",
":",
"if",
"char",
"in",
"used_char",
"and",
"start",
"<=",
"used_char",
"[",
"char",
"]",
":",
"start",
"=",
"used_char",
"[",
"char",
"]",
"+",
"1",
"else",
":",
"if",
"index",
"-",
"start",
"+",
"1",
">",
"max_len",
":",
"max_len",
"=",
"index",
"-",
"start",
"+",
"1",
"sub_string",
"=",
"string",
"[",
"start",
":",
"index",
"+",
"1",
"]",
"used_char",
"[",
"char",
"]",
"=",
"index",
"return",
"max_len",
",",
"sub_string"
] | python | Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
Return max_len and the substring as a tuple | false |
1,988,802 | def writeSamples(self, data_list, digital = False):
"""
Writes physical samples (uV, mA, Ohm) from data belonging to all signals
The physical samples will be converted to digital samples using the values
of physical maximum, physical minimum, digital maximum and digital minimum.
if the samplefrequency of all signals are equal, then the data could be
saved into a matrix with the size (N,signals) If the samplefrequency
is different, then sample_freq is a vector containing all the different
samplefrequencys. The data is saved as list. Each list entry contains
a vector with the data of one signal.
If digital is True, digital signals (as directly from the ADC) will be expected.
(e.g. int16 from 0 to 2048)
All parameters must be already written into the bdf/edf-file.
"""
if (len(data_list) != len(self.channels)):
raise WrongInputSize(len(data_list))
if digital:
if any([not np.issubdtype(a.dtype, np.integer) for a in data_list]):
raise TypeError('Digital = True requires all signals in int')
ind = []
notAtEnd = True
for i in np.arange(len(data_list)):
ind.append(0)
sampleLength = 0
sampleRates = np.zeros(len(data_list), dtype=np.int)
for i in np.arange(len(data_list)):
sampleRates[i] = self.channels[i]['sample_rate']
if (np.size(data_list[i]) < ind[i] + self.channels[i]['sample_rate']):
notAtEnd = False
sampleLength += self.channels[i]['sample_rate']
dataOfOneSecond = np.array([], dtype=np.int if digital else None)
while notAtEnd:
# dataOfOneSecondInd = 0
del dataOfOneSecond
dataOfOneSecond = np.array([], dtype=np.int if digital else None)
for i in np.arange(len(data_list)):
# dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])]
dataOfOneSecond = np.append(dataOfOneSecond,data_list[i].ravel()[int(ind[i]):int(ind[i]+sampleRates[i])])
# self.writePhysicalSamples(data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])])
ind[i] += sampleRates[i]
# dataOfOneSecondInd += sampleRates[i]
if digital:
self.blockWriteDigitalSamples(dataOfOneSecond)
else:
self.blockWritePhysicalSamples(dataOfOneSecond)
for i in np.arange(len(data_list)):
if (np.size(data_list[i]) < ind[i] + sampleRates[i]):
notAtEnd = False
# dataOfOneSecondInd = 0
for i in np.arange(len(data_list)):
lastSamples = np.zeros(sampleRates[i], dtype=np.int if digital else None)
lastSampleInd = int(np.max(data_list[i].shape) - ind[i])
lastSampleInd = int(np.min((lastSampleInd,sampleRates[i])))
if lastSampleInd > 0:
lastSamples[:lastSampleInd] = data_list[i].ravel()[-lastSampleInd:]
# dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = lastSamples
# dataOfOneSecondInd += self.channels[i]['sample_rate']
if digital:
self.writeDigitalSamples(lastSamples)
else:
self.writePhysicalSamples(lastSamples) | [
"def",
"writeSamples",
"(",
"self",
",",
"data_list",
",",
"digital",
"=",
"False",
")",
":",
"if",
"(",
"len",
"(",
"data_list",
")",
"!=",
"len",
"(",
"self",
".",
"channels",
")",
")",
":",
"raise",
"WrongInputSize",
"(",
"len",
"(",
"data_list",
")",
")",
"if",
"digital",
":",
"if",
"any",
"(",
"[",
"not",
"np",
".",
"issubdtype",
"(",
"a",
".",
"dtype",
",",
"np",
".",
"integer",
")",
"for",
"a",
"in",
"data_list",
"]",
")",
":",
"raise",
"TypeError",
"(",
"'Digital = True requires all signals in int'",
")",
"ind",
"=",
"[",
"]",
"notAtEnd",
"=",
"True",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"ind",
".",
"append",
"(",
"0",
")",
"sampleLength",
"=",
"0",
"sampleRates",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"data_list",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"sampleRates",
"[",
"i",
"]",
"=",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'sample_rate'",
"]",
"if",
"(",
"np",
".",
"size",
"(",
"data_list",
"[",
"i",
"]",
")",
"<",
"ind",
"[",
"i",
"]",
"+",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'sample_rate'",
"]",
")",
":",
"notAtEnd",
"=",
"False",
"sampleLength",
"+=",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'sample_rate'",
"]",
"dataOfOneSecond",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
"if",
"digital",
"else",
"None",
")",
"while",
"notAtEnd",
":",
"del",
"dataOfOneSecond",
"dataOfOneSecond",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
"if",
"digital",
"else",
"None",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"dataOfOneSecond",
"=",
"np",
".",
"append",
"(",
"dataOfOneSecond",
",",
"data_list",
"[",
"i",
"]",
".",
"ravel",
"(",
")",
"[",
"int",
"(",
"ind",
"[",
"i",
"]",
")",
":",
"int",
"(",
"ind",
"[",
"i",
"]",
"+",
"sampleRates",
"[",
"i",
"]",
")",
"]",
")",
"ind",
"[",
"i",
"]",
"+=",
"sampleRates",
"[",
"i",
"]",
"if",
"digital",
":",
"self",
".",
"blockWriteDigitalSamples",
"(",
"dataOfOneSecond",
")",
"else",
":",
"self",
".",
"blockWritePhysicalSamples",
"(",
"dataOfOneSecond",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"if",
"(",
"np",
".",
"size",
"(",
"data_list",
"[",
"i",
"]",
")",
"<",
"ind",
"[",
"i",
"]",
"+",
"sampleRates",
"[",
"i",
"]",
")",
":",
"notAtEnd",
"=",
"False",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"lastSamples",
"=",
"np",
".",
"zeros",
"(",
"sampleRates",
"[",
"i",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
"if",
"digital",
"else",
"None",
")",
"lastSampleInd",
"=",
"int",
"(",
"np",
".",
"max",
"(",
"data_list",
"[",
"i",
"]",
".",
"shape",
")",
"-",
"ind",
"[",
"i",
"]",
")",
"lastSampleInd",
"=",
"int",
"(",
"np",
".",
"min",
"(",
"(",
"lastSampleInd",
",",
"sampleRates",
"[",
"i",
"]",
")",
")",
")",
"if",
"lastSampleInd",
">",
"0",
":",
"lastSamples",
"[",
":",
"lastSampleInd",
"]",
"=",
"data_list",
"[",
"i",
"]",
".",
"ravel",
"(",
")",
"[",
"-",
"lastSampleInd",
":",
"]",
"if",
"digital",
":",
"self",
".",
"writeDigitalSamples",
"(",
"lastSamples",
")",
"else",
":",
"self",
".",
"writePhysicalSamples",
"(",
"lastSamples",
")"
] | python | Writes physical samples (uV, mA, Ohm) from data belonging to all signals
The physical samples will be converted to digital samples using the values
of physical maximum, physical minimum, digital maximum and digital minimum.
if the samplefrequency of all signals are equal, then the data could be
saved into a matrix with the size (N,signals) If the samplefrequency
is different, then sample_freq is a vector containing all the different
samplefrequencys. The data is saved as list. Each list entry contains
a vector with the data of one signal.
If digital is True, digital signals (as directly from the ADC) will be expected.
(e.g. int16 from 0 to 2048)
All parameters must be already written into the bdf/edf-file. | false |
2,097,463 | def cos_1(a=1):
r"""Fourier cosine transform pair cos_1 ([Ande75]_)."""
def lhs(x):
return np.exp(-a**2*x**2)
def rhs(b):
return np.sqrt(np.pi)*np.exp(-b**2/(4*a**2))/(2*a)
return Ghosh('cos', lhs, rhs) | [
"def",
"cos_1",
"(",
"a",
"=",
"1",
")",
":",
"def",
"lhs",
"(",
"x",
")",
":",
"return",
"np",
".",
"exp",
"(",
"-",
"a",
"**",
"2",
"*",
"x",
"**",
"2",
")",
"def",
"rhs",
"(",
"b",
")",
":",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"pi",
")",
"*",
"np",
".",
"exp",
"(",
"-",
"b",
"**",
"2",
"/",
"(",
"4",
"*",
"a",
"**",
"2",
")",
")",
"/",
"(",
"2",
"*",
"a",
")",
"return",
"Ghosh",
"(",
"'cos'",
",",
"lhs",
",",
"rhs",
")"
] | python | r"""Fourier cosine transform pair cos_1 ([Ande75]_). | false |
2,570,690 | def avatar_url_from_openid(openid, size=64, default='retro', dns=False):
"""
Our own implementation since fas doesn't support this nicely yet.
"""
if dns:
# This makes an extra DNS SRV query, which can slow down our webapps.
# It is necessary for libravatar federation, though.
import libravatar
return libravatar.libravatar_url(
openid=openid,
size=size,
default=default,
)
else:
params = _ordered_query_params([('s', size), ('d', default)])
query = parse.urlencode(params)
hash = sha256(openid.encode('utf-8')).hexdigest()
return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query) | [
"def",
"avatar_url_from_openid",
"(",
"openid",
",",
"size",
"=",
"64",
",",
"default",
"=",
"'retro'",
",",
"dns",
"=",
"False",
")",
":",
"if",
"dns",
":",
"import",
"libravatar",
"return",
"libravatar",
".",
"libravatar_url",
"(",
"openid",
"=",
"openid",
",",
"size",
"=",
"size",
",",
"default",
"=",
"default",
",",
")",
"else",
":",
"params",
"=",
"_ordered_query_params",
"(",
"[",
"(",
"'s'",
",",
"size",
")",
",",
"(",
"'d'",
",",
"default",
")",
"]",
")",
"query",
"=",
"parse",
".",
"urlencode",
"(",
"params",
")",
"hash",
"=",
"sha256",
"(",
"openid",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"\"https://seccdn.libravatar.org/avatar/%s?%s\"",
"%",
"(",
"hash",
",",
"query",
")"
] | python | Our own implementation since fas doesn't support this nicely yet. | false |
2,699,894 | async def _handle_response(self, response: aiohttp.client_reqrep.ClientResponse, await_final_result: bool) -> dict:
"""
Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which
means that the API call returns just a job id. The actually expected API response is postponed and a specific
asyncJobResults API has to be polled using the job id to get the final result once the API call has been
processed.
:param response: The response returned by the aiohttp call.
:type response: aiohttp.client_reqrep.ClientResponse
:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API
until the asynchronous API call has been processed
:type await_final_result: bool
:return: Dictionary containing the JSON response of the API call
:rtype: dict
"""
try:
data = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
text = await response.text()
logging.debug('Content returned by server not of type "application/json"\n Content: {}'.format(text))
raise CloudStackClientException(message="Could not decode content. Server did not return json content!")
else:
data = self._transform_data(data)
if response.status != 200:
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode", response.status),
error_text=data.get("errortext"),
response=data)
while await_final_result and ('jobid' in data):
await asyncio.sleep(self.async_poll_latency)
data = await self.queryAsyncJobResult(jobid=data['jobid'])
if data['jobstatus']: # jobstatus is 0 for pending async CloudStack calls
if not data['jobresultcode']: # exit code is zero
try:
return data['jobresult']
except KeyError:
pass
logging.debug("Async CloudStack call returned {}".format(str(data)))
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode"),
error_text=data.get("errortext"),
response=data)
return data | [
"async",
"def",
"_handle_response",
"(",
"self",
",",
"response",
":",
"aiohttp",
".",
"client_reqrep",
".",
"ClientResponse",
",",
"await_final_result",
":",
"bool",
")",
"->",
"dict",
":",
"try",
":",
"data",
"=",
"await",
"response",
".",
"json",
"(",
")",
"except",
"aiohttp",
".",
"client_exceptions",
".",
"ContentTypeError",
":",
"text",
"=",
"await",
"response",
".",
"text",
"(",
")",
"logging",
".",
"debug",
"(",
"'Content returned by server not of type \"application/json\"\\n Content: {}'",
".",
"format",
"(",
"text",
")",
")",
"raise",
"CloudStackClientException",
"(",
"message",
"=",
"\"Could not decode content. Server did not return json content!\"",
")",
"else",
":",
"data",
"=",
"self",
".",
"_transform_data",
"(",
"data",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"CloudStackClientException",
"(",
"message",
"=",
"\"Async CloudStack call failed!\"",
",",
"error_code",
"=",
"data",
".",
"get",
"(",
"\"errorcode\"",
",",
"response",
".",
"status",
")",
",",
"error_text",
"=",
"data",
".",
"get",
"(",
"\"errortext\"",
")",
",",
"response",
"=",
"data",
")",
"while",
"await_final_result",
"and",
"(",
"'jobid'",
"in",
"data",
")",
":",
"await",
"asyncio",
".",
"sleep",
"(",
"self",
".",
"async_poll_latency",
")",
"data",
"=",
"await",
"self",
".",
"queryAsyncJobResult",
"(",
"jobid",
"=",
"data",
"[",
"'jobid'",
"]",
")",
"if",
"data",
"[",
"'jobstatus'",
"]",
":",
"if",
"not",
"data",
"[",
"'jobresultcode'",
"]",
":",
"try",
":",
"return",
"data",
"[",
"'jobresult'",
"]",
"except",
"KeyError",
":",
"pass",
"logging",
".",
"debug",
"(",
"\"Async CloudStack call returned {}\"",
".",
"format",
"(",
"str",
"(",
"data",
")",
")",
")",
"raise",
"CloudStackClientException",
"(",
"message",
"=",
"\"Async CloudStack call failed!\"",
",",
"error_code",
"=",
"data",
".",
"get",
"(",
"\"errorcode\"",
")",
",",
"error_text",
"=",
"data",
".",
"get",
"(",
"\"errortext\"",
")",
",",
"response",
"=",
"data",
")",
"return",
"data"
] | python | Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which
means that the API call returns just a job id. The actually expected API response is postponed and a specific
asyncJobResults API has to be polled using the job id to get the final result once the API call has been
processed.
:param response: The response returned by the aiohttp call.
:type response: aiohttp.client_reqrep.ClientResponse
:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API
until the asynchronous API call has been processed
:type await_final_result: bool
:return: Dictionary containing the JSON response of the API call
:rtype: dict | false |
2,198,373 | def hacking_assert_greater_less(logical_line, noqa):
r"""Check that self.assert{Greater,Less}[Equal] are used.
Okay: self.assertGreater(x, y)
Okay: self.assertGreaterEqual(x, y)
Okay: self.assertLess(x, y)
Okay: self.assertLessEqual(x, y)
H205: self.assertTrue(x > y)
H205: self.assertTrue(x >= y)
H205: self.assertTrue(x < y)
H205: self.assertTrue(x <= y)
"""
if noqa:
return
methods = ['assertTrue', 'assertFalse']
for method in methods:
start = logical_line.find('.%s' % method) + 1
if start != 0:
break
else:
return
comparisons = [ast.Gt, ast.GtE, ast.Lt, ast.LtE]
checker = AssertTrueFalseChecker(methods, comparisons)
checker.visit(ast.parse(logical_line))
if checker.error:
yield start, 'H205: Use assert{Greater,Less}[Equal]' | [
"def",
"hacking_assert_greater_less",
"(",
"logical_line",
",",
"noqa",
")",
":",
"if",
"noqa",
":",
"return",
"methods",
"=",
"[",
"'assertTrue'",
",",
"'assertFalse'",
"]",
"for",
"method",
"in",
"methods",
":",
"start",
"=",
"logical_line",
".",
"find",
"(",
"'.%s'",
"%",
"method",
")",
"+",
"1",
"if",
"start",
"!=",
"0",
":",
"break",
"else",
":",
"return",
"comparisons",
"=",
"[",
"ast",
".",
"Gt",
",",
"ast",
".",
"GtE",
",",
"ast",
".",
"Lt",
",",
"ast",
".",
"LtE",
"]",
"checker",
"=",
"AssertTrueFalseChecker",
"(",
"methods",
",",
"comparisons",
")",
"checker",
".",
"visit",
"(",
"ast",
".",
"parse",
"(",
"logical_line",
")",
")",
"if",
"checker",
".",
"error",
":",
"yield",
"start",
",",
"'H205: Use assert{Greater,Less}[Equal]'"
] | python | r"""Check that self.assert{Greater,Less}[Equal] are used.
Okay: self.assertGreater(x, y)
Okay: self.assertGreaterEqual(x, y)
Okay: self.assertLess(x, y)
Okay: self.assertLessEqual(x, y)
H205: self.assertTrue(x > y)
H205: self.assertTrue(x >= y)
H205: self.assertTrue(x < y)
H205: self.assertTrue(x <= y) | false |
1,880,463 | def shortInterestDF(symbol, date=None, token='', version=''):
'''The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.
The report data will be published daily at 4:00pm ET.
https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev
Args:
symbol (string); Ticker to request
date (datetime); Effective Datetime
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.DataFrame(shortInterest(symbol, date, token, version))
_toDatetime(df)
return df | [
"def",
"shortInterestDF",
"(",
"symbol",
",",
"date",
"=",
"None",
",",
"token",
"=",
"''",
",",
"version",
"=",
"''",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"shortInterest",
"(",
"symbol",
",",
"date",
",",
"token",
",",
"version",
")",
")",
"_toDatetime",
"(",
"df",
")",
"return",
"df"
] | python | The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.
The report data will be published daily at 4:00pm ET.
https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev
Args:
symbol (string); Ticker to request
date (datetime); Effective Datetime
token (string); Access token
version (string); API version
Returns:
DataFrame: result | false |
2,700,944 | def current(self):
"""
Display the current database revision
"""
config = self.alembic_config()
script = ScriptDirectory.from_config(config)
revision = 'base'
def display_version(rev, context):
for rev in script.get_all_current(rev):
nonlocal revision
revision = rev.cmd_format(False)
return []
with EnvironmentContext(config, script, fn=display_version):
script.run_env()
return revision | [
"def",
"current",
"(",
"self",
")",
":",
"config",
"=",
"self",
".",
"alembic_config",
"(",
")",
"script",
"=",
"ScriptDirectory",
".",
"from_config",
"(",
"config",
")",
"revision",
"=",
"'base'",
"def",
"display_version",
"(",
"rev",
",",
"context",
")",
":",
"for",
"rev",
"in",
"script",
".",
"get_all_current",
"(",
"rev",
")",
":",
"nonlocal",
"revision",
"revision",
"=",
"rev",
".",
"cmd_format",
"(",
"False",
")",
"return",
"[",
"]",
"with",
"EnvironmentContext",
"(",
"config",
",",
"script",
",",
"fn",
"=",
"display_version",
")",
":",
"script",
".",
"run_env",
"(",
")",
"return",
"revision"
] | python | Display the current database revision | false |
1,988,122 | def register(self, table):
"""Adds a shared table to the catalog.
Args:
table (SymbolTable): A non-system, shared symbol table.
"""
if table.table_type.is_system:
raise ValueError('Cannot add system table to catalog')
if not table.table_type.is_shared:
raise ValueError('Cannot add local table to catalog')
if table.is_substitute:
raise ValueError('Cannot add substitute table to catalog')
versions = self.__tables.get(table.name)
if versions is None:
versions = {}
self.__tables[table.name] = versions
versions[table.version] = table | [
"def",
"register",
"(",
"self",
",",
"table",
")",
":",
"if",
"table",
".",
"table_type",
".",
"is_system",
":",
"raise",
"ValueError",
"(",
"'Cannot add system table to catalog'",
")",
"if",
"not",
"table",
".",
"table_type",
".",
"is_shared",
":",
"raise",
"ValueError",
"(",
"'Cannot add local table to catalog'",
")",
"if",
"table",
".",
"is_substitute",
":",
"raise",
"ValueError",
"(",
"'Cannot add substitute table to catalog'",
")",
"versions",
"=",
"self",
".",
"__tables",
".",
"get",
"(",
"table",
".",
"name",
")",
"if",
"versions",
"is",
"None",
":",
"versions",
"=",
"{",
"}",
"self",
".",
"__tables",
"[",
"table",
".",
"name",
"]",
"=",
"versions",
"versions",
"[",
"table",
".",
"version",
"]",
"=",
"table"
] | python | Adds a shared table to the catalog.
Args:
table (SymbolTable): A non-system, shared symbol table. | false |
2,097,313 | def disable_invalid_filters(env):
"""It analyzes all the existing active filters to check if they are still
correct. If not, they are disabled for avoiding errors when clicking on
them, or worse, if they are default filters when opening the model/action.
To be run at the base end-migration script for having a general scope. Only
assured to work on > v8.
:param env: Environment parameter.
"""
try:
from odoo.tools.safe_eval import safe_eval
except ImportError:
from openerp.tools.safe_eval import safe_eval
import time
try:
basestring
except: # For Python 3 compatibility
basestring = str
def format_message(f):
msg = "FILTER DISABLED: "
if f.user_id:
msg += "Filter '%s' for user '%s'" % (f.name, f.user_id.name)
else:
msg += "Global Filter '%s'" % f.name
msg += " for model '%s' has been disabled " % f.model_id
return msg
filters = env['ir.filters'].search([('domain', '!=', '[]')])
for f in filters:
if f.model_id not in env:
continue # Obsolete or invalid model
model = env[f.model_id]
columns = (
getattr(model, '_columns', False) or getattr(model, '_fields')
)
# DOMAIN
try:
# Strange artifact found in a filter
domain = f.domain.replace('%%', '%')
model.search(
safe_eval(domain, {'time': time, 'uid': env.uid}),
limit=1,
)
except Exception:
logger.warning(
format_message(f) + "as it contains an invalid domain."
)
f.active = False
continue
# CONTEXT GROUP BY
try:
context = safe_eval(f.context, {'time': time, 'uid': env.uid})
except Exception:
logger.warning(
format_message(f) + "as it contains an invalid context %s.",
f.context
)
f.active = False
continue
keys = ['group_by', 'col_group_by']
for key in keys:
if not context.get(key):
continue
g = context[key]
if not g:
continue
if isinstance(g, basestring):
g = [g]
for field_expr in g:
field = field_expr.split(':')[0] # Remove date specifiers
if not columns.get(field):
logger.warning(
format_message(f) +
"as it contains an invalid %s." % key
)
f.active = False
break | [
"def",
"disable_invalid_filters",
"(",
"env",
")",
":",
"try",
":",
"from",
"odoo",
".",
"tools",
".",
"safe_eval",
"import",
"safe_eval",
"except",
"ImportError",
":",
"from",
"openerp",
".",
"tools",
".",
"safe_eval",
"import",
"safe_eval",
"import",
"time",
"try",
":",
"basestring",
"except",
":",
"basestring",
"=",
"str",
"def",
"format_message",
"(",
"f",
")",
":",
"msg",
"=",
"\"FILTER DISABLED: \"",
"if",
"f",
".",
"user_id",
":",
"msg",
"+=",
"\"Filter '%s' for user '%s'\"",
"%",
"(",
"f",
".",
"name",
",",
"f",
".",
"user_id",
".",
"name",
")",
"else",
":",
"msg",
"+=",
"\"Global Filter '%s'\"",
"%",
"f",
".",
"name",
"msg",
"+=",
"\" for model '%s' has been disabled \"",
"%",
"f",
".",
"model_id",
"return",
"msg",
"filters",
"=",
"env",
"[",
"'ir.filters'",
"]",
".",
"search",
"(",
"[",
"(",
"'domain'",
",",
"'!='",
",",
"'[]'",
")",
"]",
")",
"for",
"f",
"in",
"filters",
":",
"if",
"f",
".",
"model_id",
"not",
"in",
"env",
":",
"continue",
"model",
"=",
"env",
"[",
"f",
".",
"model_id",
"]",
"columns",
"=",
"(",
"getattr",
"(",
"model",
",",
"'_columns'",
",",
"False",
")",
"or",
"getattr",
"(",
"model",
",",
"'_fields'",
")",
")",
"try",
":",
"domain",
"=",
"f",
".",
"domain",
".",
"replace",
"(",
"'%%'",
",",
"'%'",
")",
"model",
".",
"search",
"(",
"safe_eval",
"(",
"domain",
",",
"{",
"'time'",
":",
"time",
",",
"'uid'",
":",
"env",
".",
"uid",
"}",
")",
",",
"limit",
"=",
"1",
",",
")",
"except",
"Exception",
":",
"logger",
".",
"warning",
"(",
"format_message",
"(",
"f",
")",
"+",
"\"as it contains an invalid domain.\"",
")",
"f",
".",
"active",
"=",
"False",
"continue",
"try",
":",
"context",
"=",
"safe_eval",
"(",
"f",
".",
"context",
",",
"{",
"'time'",
":",
"time",
",",
"'uid'",
":",
"env",
".",
"uid",
"}",
")",
"except",
"Exception",
":",
"logger",
".",
"warning",
"(",
"format_message",
"(",
"f",
")",
"+",
"\"as it contains an invalid context %s.\"",
",",
"f",
".",
"context",
")",
"f",
".",
"active",
"=",
"False",
"continue",
"keys",
"=",
"[",
"'group_by'",
",",
"'col_group_by'",
"]",
"for",
"key",
"in",
"keys",
":",
"if",
"not",
"context",
".",
"get",
"(",
"key",
")",
":",
"continue",
"g",
"=",
"context",
"[",
"key",
"]",
"if",
"not",
"g",
":",
"continue",
"if",
"isinstance",
"(",
"g",
",",
"basestring",
")",
":",
"g",
"=",
"[",
"g",
"]",
"for",
"field_expr",
"in",
"g",
":",
"field",
"=",
"field_expr",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"if",
"not",
"columns",
".",
"get",
"(",
"field",
")",
":",
"logger",
".",
"warning",
"(",
"format_message",
"(",
"f",
")",
"+",
"\"as it contains an invalid %s.\"",
"%",
"key",
")",
"f",
".",
"active",
"=",
"False",
"break"
] | python | It analyzes all the existing active filters to check if they are still
correct. If not, they are disabled for avoiding errors when clicking on
them, or worse, if they are default filters when opening the model/action.
To be run at the base end-migration script for having a general scope. Only
assured to work on > v8.
:param env: Environment parameter. | false |