Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
300 | Cognexa/cxflow | cxflow/cli/ls.py | _ls_print_listing | def _ls_print_listing(dir_: str, recursive: bool, all_: bool, long: bool) -> List[Tuple[str, dict, TrainingTrace]]:
"""
Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace)
"""
all_trainings = []
for root_dir, train_dirs in walk_train_dirs(dir_):
if train_dirs:
if recursive:
print(root_dir + ':')
trainings = [(train_dir,
load_config(path.join(train_dir, CXF_CONFIG_FILE), []),
TrainingTrace.from_file(path.join(train_dir, CXF_TRACE_FILE)))
for train_dir
in [os.path.join(root_dir, train_dir) for train_dir in train_dirs]]
if not all_:
trainings = [train_dir for train_dir in trainings if train_dir[2][TrainingTraceKeys.EPOCHS_DONE]]
if long:
print('total {}'.format(len(trainings)))
_print_trainings_long(trainings)
else:
for train_dir, _, _ in trainings:
print(path.basename(train_dir))
all_trainings.extend(trainings)
if recursive:
print()
if not recursive:
break
return all_trainings | python | def _ls_print_listing(dir_: str, recursive: bool, all_: bool, long: bool) -> List[Tuple[str, dict, TrainingTrace]]:
"""
Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace)
"""
all_trainings = []
for root_dir, train_dirs in walk_train_dirs(dir_):
if train_dirs:
if recursive:
print(root_dir + ':')
trainings = [(train_dir,
load_config(path.join(train_dir, CXF_CONFIG_FILE), []),
TrainingTrace.from_file(path.join(train_dir, CXF_TRACE_FILE)))
for train_dir
in [os.path.join(root_dir, train_dir) for train_dir in train_dirs]]
if not all_:
trainings = [train_dir for train_dir in trainings if train_dir[2][TrainingTraceKeys.EPOCHS_DONE]]
if long:
print('total {}'.format(len(trainings)))
_print_trainings_long(trainings)
else:
for train_dir, _, _ in trainings:
print(path.basename(train_dir))
all_trainings.extend(trainings)
if recursive:
print()
if not recursive:
break
return all_trainings | ['def', '_ls_print_listing', '(', 'dir_', ':', 'str', ',', 'recursive', ':', 'bool', ',', 'all_', ':', 'bool', ',', 'long', ':', 'bool', ')', '->', 'List', '[', 'Tuple', '[', 'str', ',', 'dict', ',', 'TrainingTrace', ']', ']', ':', 'all_trainings', '=', '[', ']', 'for', 'root_dir', ',', 'train_dirs', 'in', 'walk_train_dirs', '(', 'dir_', ')', ':', 'if', 'train_dirs', ':', 'if', 'recursive', ':', 'print', '(', 'root_dir', '+', "':'", ')', 'trainings', '=', '[', '(', 'train_dir', ',', 'load_config', '(', 'path', '.', 'join', '(', 'train_dir', ',', 'CXF_CONFIG_FILE', ')', ',', '[', ']', ')', ',', 'TrainingTrace', '.', 'from_file', '(', 'path', '.', 'join', '(', 'train_dir', ',', 'CXF_TRACE_FILE', ')', ')', ')', 'for', 'train_dir', 'in', '[', 'os', '.', 'path', '.', 'join', '(', 'root_dir', ',', 'train_dir', ')', 'for', 'train_dir', 'in', 'train_dirs', ']', ']', 'if', 'not', 'all_', ':', 'trainings', '=', '[', 'train_dir', 'for', 'train_dir', 'in', 'trainings', 'if', 'train_dir', '[', '2', ']', '[', 'TrainingTraceKeys', '.', 'EPOCHS_DONE', ']', ']', 'if', 'long', ':', 'print', '(', "'total {}'", '.', 'format', '(', 'len', '(', 'trainings', ')', ')', ')', '_print_trainings_long', '(', 'trainings', ')', 'else', ':', 'for', 'train_dir', ',', '_', ',', '_', 'in', 'trainings', ':', 'print', '(', 'path', '.', 'basename', '(', 'train_dir', ')', ')', 'all_trainings', '.', 'extend', '(', 'trainings', ')', 'if', 'recursive', ':', 'print', '(', ')', 'if', 'not', 'recursive', ':', 'break', 'return', 'all_trainings'] | Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace) | ['Print', 'names', 'of', 'the', 'train', 'dirs', 'contained', 'in', 'the', 'given', 'dir', '.'] | train | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/ls.py#L117-L152 |
301 | potash/drain | drain/util.py | to_float | def to_float(*args):
"""
cast numpy arrays to float32
if there's more than one, return an array
"""
floats = [np.array(a, dtype=np.float32) for a in args]
return floats[0] if len(floats) == 1 else floats | python | def to_float(*args):
"""
cast numpy arrays to float32
if there's more than one, return an array
"""
floats = [np.array(a, dtype=np.float32) for a in args]
return floats[0] if len(floats) == 1 else floats | ['def', 'to_float', '(', '*', 'args', ')', ':', 'floats', '=', '[', 'np', '.', 'array', '(', 'a', ',', 'dtype', '=', 'np', '.', 'float32', ')', 'for', 'a', 'in', 'args', ']', 'return', 'floats', '[', '0', ']', 'if', 'len', '(', 'floats', ')', '==', '1', 'else', 'floats'] | cast numpy arrays to float32
if there's more than one, return an array | ['cast', 'numpy', 'arrays', 'to', 'float32', 'if', 'there', 's', 'more', 'than', 'one', 'return', 'an', 'array'] | train | https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/util.py#L83-L89 |
302 | pymoca/pymoca | src/pymoca/backends/xml/parser.py | ModelListener.call | def call(self, tag_name: str, *args, **kwargs):
"""Convenience method for calling methods with walker."""
if hasattr(self, tag_name):
getattr(self, tag_name)(*args, **kwargs) | python | def call(self, tag_name: str, *args, **kwargs):
"""Convenience method for calling methods with walker."""
if hasattr(self, tag_name):
getattr(self, tag_name)(*args, **kwargs) | ['def', 'call', '(', 'self', ',', 'tag_name', ':', 'str', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'hasattr', '(', 'self', ',', 'tag_name', ')', ':', 'getattr', '(', 'self', ',', 'tag_name', ')', '(', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Convenience method for calling methods with walker. | ['Convenience', 'method', 'for', 'calling', 'methods', 'with', 'walker', '.'] | train | https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L85-L88 |
303 | cloud-custodian/cloud-custodian | c7n/utils.py | reformat_schema | def reformat_schema(model):
""" Reformat schema to be in a more displayable format. """
if not hasattr(model, 'schema'):
return "Model '{}' does not have a schema".format(model)
if 'properties' not in model.schema:
return "Schema in unexpected format."
ret = copy.deepcopy(model.schema['properties'])
if 'type' in ret:
del(ret['type'])
for key in model.schema.get('required', []):
if key in ret:
ret[key]['required'] = True
return ret | python | def reformat_schema(model):
""" Reformat schema to be in a more displayable format. """
if not hasattr(model, 'schema'):
return "Model '{}' does not have a schema".format(model)
if 'properties' not in model.schema:
return "Schema in unexpected format."
ret = copy.deepcopy(model.schema['properties'])
if 'type' in ret:
del(ret['type'])
for key in model.schema.get('required', []):
if key in ret:
ret[key]['required'] = True
return ret | ['def', 'reformat_schema', '(', 'model', ')', ':', 'if', 'not', 'hasattr', '(', 'model', ',', "'schema'", ')', ':', 'return', '"Model \'{}\' does not have a schema"', '.', 'format', '(', 'model', ')', 'if', "'properties'", 'not', 'in', 'model', '.', 'schema', ':', 'return', '"Schema in unexpected format."', 'ret', '=', 'copy', '.', 'deepcopy', '(', 'model', '.', 'schema', '[', "'properties'", ']', ')', 'if', "'type'", 'in', 'ret', ':', 'del', '(', 'ret', '[', "'type'", ']', ')', 'for', 'key', 'in', 'model', '.', 'schema', '.', 'get', '(', "'required'", ',', '[', ']', ')', ':', 'if', 'key', 'in', 'ret', ':', 'ret', '[', 'key', ']', '[', "'required'", ']', '=', 'True', 'return', 'ret'] | Reformat schema to be in a more displayable format. | ['Reformat', 'schema', 'to', 'be', 'in', 'a', 'more', 'displayable', 'format', '.'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/utils.py#L449-L466 |
304 | MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecuritiesAggregate.get_by_symbol | def get_by_symbol(self, symbol: str) -> Commodity:
"""
Returns the commodity with the given symbol.
If more are found, an exception will be thrown.
"""
# handle namespace. Accept GnuCash and Yahoo-style symbols.
full_symbol = self.__parse_gc_symbol(symbol)
query = (
self.query
.filter(Commodity.mnemonic == full_symbol["mnemonic"])
)
if full_symbol["namespace"]:
query = query.filter(Commodity.namespace == full_symbol["namespace"])
return query.first() | python | def get_by_symbol(self, symbol: str) -> Commodity:
"""
Returns the commodity with the given symbol.
If more are found, an exception will be thrown.
"""
# handle namespace. Accept GnuCash and Yahoo-style symbols.
full_symbol = self.__parse_gc_symbol(symbol)
query = (
self.query
.filter(Commodity.mnemonic == full_symbol["mnemonic"])
)
if full_symbol["namespace"]:
query = query.filter(Commodity.namespace == full_symbol["namespace"])
return query.first() | ['def', 'get_by_symbol', '(', 'self', ',', 'symbol', ':', 'str', ')', '->', 'Commodity', ':', '# handle namespace. Accept GnuCash and Yahoo-style symbols.', 'full_symbol', '=', 'self', '.', '__parse_gc_symbol', '(', 'symbol', ')', 'query', '=', '(', 'self', '.', 'query', '.', 'filter', '(', 'Commodity', '.', 'mnemonic', '==', 'full_symbol', '[', '"mnemonic"', ']', ')', ')', 'if', 'full_symbol', '[', '"namespace"', ']', ':', 'query', '=', 'query', '.', 'filter', '(', 'Commodity', '.', 'namespace', '==', 'full_symbol', '[', '"namespace"', ']', ')', 'return', 'query', '.', 'first', '(', ')'] | Returns the commodity with the given symbol.
If more are found, an exception will be thrown. | ['Returns', 'the', 'commodity', 'with', 'the', 'given', 'symbol', '.', 'If', 'more', 'are', 'found', 'an', 'exception', 'will', 'be', 'thrown', '.'] | train | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L387-L402 |
305 | O365/python-o365 | O365/mailbox.py | Folder.copy_folder | def copy_folder(self, to_folder):
""" Copy this folder and it's contents to into another folder
:param to_folder: the destination Folder/folder_id to copy into
:type to_folder: mailbox.Folder or str
:return: The new folder after copying
:rtype: mailbox.Folder or None
"""
to_folder_id = to_folder.folder_id if isinstance(to_folder,
Folder) else to_folder
if self.root or not self.folder_id or not to_folder_id:
return None
url = self.build_url(
self._endpoints.get('copy_folder').format(id=self.folder_id))
response = self.con.post(url,
data={self._cc('destinationId'): to_folder_id})
if not response:
return None
folder = response.json()
self_class = getattr(self, 'folder_constructor', type(self))
# Everything received from cloud must be passed as self._cloud_data_key
return self_class(con=self.con, main_resource=self.main_resource,
**{self._cloud_data_key: folder}) | python | def copy_folder(self, to_folder):
""" Copy this folder and it's contents to into another folder
:param to_folder: the destination Folder/folder_id to copy into
:type to_folder: mailbox.Folder or str
:return: The new folder after copying
:rtype: mailbox.Folder or None
"""
to_folder_id = to_folder.folder_id if isinstance(to_folder,
Folder) else to_folder
if self.root or not self.folder_id or not to_folder_id:
return None
url = self.build_url(
self._endpoints.get('copy_folder').format(id=self.folder_id))
response = self.con.post(url,
data={self._cc('destinationId'): to_folder_id})
if not response:
return None
folder = response.json()
self_class = getattr(self, 'folder_constructor', type(self))
# Everything received from cloud must be passed as self._cloud_data_key
return self_class(con=self.con, main_resource=self.main_resource,
**{self._cloud_data_key: folder}) | ['def', 'copy_folder', '(', 'self', ',', 'to_folder', ')', ':', 'to_folder_id', '=', 'to_folder', '.', 'folder_id', 'if', 'isinstance', '(', 'to_folder', ',', 'Folder', ')', 'else', 'to_folder', 'if', 'self', '.', 'root', 'or', 'not', 'self', '.', 'folder_id', 'or', 'not', 'to_folder_id', ':', 'return', 'None', 'url', '=', 'self', '.', 'build_url', '(', 'self', '.', '_endpoints', '.', 'get', '(', "'copy_folder'", ')', '.', 'format', '(', 'id', '=', 'self', '.', 'folder_id', ')', ')', 'response', '=', 'self', '.', 'con', '.', 'post', '(', 'url', ',', 'data', '=', '{', 'self', '.', '_cc', '(', "'destinationId'", ')', ':', 'to_folder_id', '}', ')', 'if', 'not', 'response', ':', 'return', 'None', 'folder', '=', 'response', '.', 'json', '(', ')', 'self_class', '=', 'getattr', '(', 'self', ',', "'folder_constructor'", ',', 'type', '(', 'self', ')', ')', '# Everything received from cloud must be passed as self._cloud_data_key', 'return', 'self_class', '(', 'con', '=', 'self', '.', 'con', ',', 'main_resource', '=', 'self', '.', 'main_resource', ',', '*', '*', '{', 'self', '.', '_cloud_data_key', ':', 'folder', '}', ')'] | Copy this folder and it's contents to into another folder
:param to_folder: the destination Folder/folder_id to copy into
:type to_folder: mailbox.Folder or str
:return: The new folder after copying
:rtype: mailbox.Folder or None | ['Copy', 'this', 'folder', 'and', 'it', 's', 'contents', 'to', 'into', 'another', 'folder'] | train | https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/mailbox.py#L405-L432 |
306 | geertj/gruvi | lib/gruvi/fibers.py | spawn | def spawn(func, *args, **kwargs):
"""Spawn a new fiber.
A new :class:`Fiber` is created with main function *func* and positional
arguments *args*. The keyword arguments are passed to the :class:`Fiber`
constructor, not to the main function. The fiber is then scheduled to start
by calling its :meth:`~Fiber.start` method.
The fiber instance is returned.
"""
fiber = Fiber(func, args, **kwargs)
fiber.start()
return fiber | python | def spawn(func, *args, **kwargs):
"""Spawn a new fiber.
A new :class:`Fiber` is created with main function *func* and positional
arguments *args*. The keyword arguments are passed to the :class:`Fiber`
constructor, not to the main function. The fiber is then scheduled to start
by calling its :meth:`~Fiber.start` method.
The fiber instance is returned.
"""
fiber = Fiber(func, args, **kwargs)
fiber.start()
return fiber | ['def', 'spawn', '(', 'func', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'fiber', '=', 'Fiber', '(', 'func', ',', 'args', ',', '*', '*', 'kwargs', ')', 'fiber', '.', 'start', '(', ')', 'return', 'fiber'] | Spawn a new fiber.
A new :class:`Fiber` is created with main function *func* and positional
arguments *args*. The keyword arguments are passed to the :class:`Fiber`
constructor, not to the main function. The fiber is then scheduled to start
by calling its :meth:`~Fiber.start` method.
The fiber instance is returned. | ['Spawn', 'a', 'new', 'fiber', '.'] | train | https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/fibers.py#L150-L162 |
307 | MalongTech/productai-python-sdk | productai/__init__.py | ProductSetAPI.get_products | def get_products(self, product_ids):
"""
This function (and backend API) is being obsoleted. Don't use it anymore.
"""
if self.product_set_id is None:
raise ValueError('product_set_id must be specified')
data = {'ids': product_ids}
return self.client.get(self.base_url + '/products', json=data) | python | def get_products(self, product_ids):
"""
This function (and backend API) is being obsoleted. Don't use it anymore.
"""
if self.product_set_id is None:
raise ValueError('product_set_id must be specified')
data = {'ids': product_ids}
return self.client.get(self.base_url + '/products', json=data) | ['def', 'get_products', '(', 'self', ',', 'product_ids', ')', ':', 'if', 'self', '.', 'product_set_id', 'is', 'None', ':', 'raise', 'ValueError', '(', "'product_set_id must be specified'", ')', 'data', '=', '{', "'ids'", ':', 'product_ids', '}', 'return', 'self', '.', 'client', '.', 'get', '(', 'self', '.', 'base_url', '+', "'/products'", ',', 'json', '=', 'data', ')'] | This function (and backend API) is being obsoleted. Don't use it anymore. | ['This', 'function', '(', 'and', 'backend', 'API', ')', 'is', 'being', 'obsoleted', '.', 'Don', 't', 'use', 'it', 'anymore', '.'] | train | https://github.com/MalongTech/productai-python-sdk/blob/2227783dbef4ce8e94613c08e67d65d6eecee21c/productai/__init__.py#L544-L551 |
308 | singingwolfboy/flask-dance | flask_dance/contrib/reddit.py | make_reddit_blueprint | def make_reddit_blueprint(
client_id=None,
client_secret=None,
scope="identity",
permanent=False,
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
user_agent=None,
):
"""
Make a blueprint for authenticating with Reddit using OAuth 2. This requires
a client ID and client secret from Reddit. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`REDDIT_OAUTH_CLIENT_ID` and
:envvar:`REDDIT_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on Reddit.
client_secret (str): The client secret for your application on Reddit
scope (str, optional): space-separated list of scopes for the OAuth token
Defaults to ``identity``
permanent (bool, optional): Whether to request permanent access token.
Defaults to False, access will be valid for 1 hour
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/reddit``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/reddit/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.contrib.reddit.RedditOAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
user_agent (str, optional): User agent for the requests to Reddit API.
Defaults to ``Flask-Dance/{{version}}``
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
"""
authorization_url_params = {}
if permanent:
authorization_url_params["duration"] = "permanent"
reddit_bp = OAuth2ConsumerBlueprint(
"reddit",
__name__,
client_id=client_id,
client_secret=client_secret,
scope=scope,
base_url="https://oauth.reddit.com/",
authorization_url="https://www.reddit.com/api/v1/authorize",
authorization_url_params=authorization_url_params,
token_url="https://www.reddit.com/api/v1/access_token",
auto_refresh_url="https://www.reddit.com/api/v1/access_token",
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class or RedditOAuth2Session,
storage=storage,
)
reddit_bp.from_config["client_id"] = "REDDIT_OAUTH_CLIENT_ID"
reddit_bp.from_config["client_secret"] = "REDDIT_OAUTH_CLIENT_SECRET"
reddit_bp.user_agent = user_agent
@reddit_bp.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.reddit_oauth = reddit_bp.session
return reddit_bp | python | def make_reddit_blueprint(
client_id=None,
client_secret=None,
scope="identity",
permanent=False,
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
user_agent=None,
):
"""
Make a blueprint for authenticating with Reddit using OAuth 2. This requires
a client ID and client secret from Reddit. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`REDDIT_OAUTH_CLIENT_ID` and
:envvar:`REDDIT_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on Reddit.
client_secret (str): The client secret for your application on Reddit
scope (str, optional): space-separated list of scopes for the OAuth token
Defaults to ``identity``
permanent (bool, optional): Whether to request permanent access token.
Defaults to False, access will be valid for 1 hour
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/reddit``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/reddit/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.contrib.reddit.RedditOAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
user_agent (str, optional): User agent for the requests to Reddit API.
Defaults to ``Flask-Dance/{{version}}``
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
"""
authorization_url_params = {}
if permanent:
authorization_url_params["duration"] = "permanent"
reddit_bp = OAuth2ConsumerBlueprint(
"reddit",
__name__,
client_id=client_id,
client_secret=client_secret,
scope=scope,
base_url="https://oauth.reddit.com/",
authorization_url="https://www.reddit.com/api/v1/authorize",
authorization_url_params=authorization_url_params,
token_url="https://www.reddit.com/api/v1/access_token",
auto_refresh_url="https://www.reddit.com/api/v1/access_token",
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class or RedditOAuth2Session,
storage=storage,
)
reddit_bp.from_config["client_id"] = "REDDIT_OAUTH_CLIENT_ID"
reddit_bp.from_config["client_secret"] = "REDDIT_OAUTH_CLIENT_SECRET"
reddit_bp.user_agent = user_agent
@reddit_bp.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.reddit_oauth = reddit_bp.session
return reddit_bp | ['def', 'make_reddit_blueprint', '(', 'client_id', '=', 'None', ',', 'client_secret', '=', 'None', ',', 'scope', '=', '"identity"', ',', 'permanent', '=', 'False', ',', 'redirect_url', '=', 'None', ',', 'redirect_to', '=', 'None', ',', 'login_url', '=', 'None', ',', 'authorized_url', '=', 'None', ',', 'session_class', '=', 'None', ',', 'storage', '=', 'None', ',', 'user_agent', '=', 'None', ',', ')', ':', 'authorization_url_params', '=', '{', '}', 'if', 'permanent', ':', 'authorization_url_params', '[', '"duration"', ']', '=', '"permanent"', 'reddit_bp', '=', 'OAuth2ConsumerBlueprint', '(', '"reddit"', ',', '__name__', ',', 'client_id', '=', 'client_id', ',', 'client_secret', '=', 'client_secret', ',', 'scope', '=', 'scope', ',', 'base_url', '=', '"https://oauth.reddit.com/"', ',', 'authorization_url', '=', '"https://www.reddit.com/api/v1/authorize"', ',', 'authorization_url_params', '=', 'authorization_url_params', ',', 'token_url', '=', '"https://www.reddit.com/api/v1/access_token"', ',', 'auto_refresh_url', '=', '"https://www.reddit.com/api/v1/access_token"', ',', 'redirect_url', '=', 'redirect_url', ',', 'redirect_to', '=', 'redirect_to', ',', 'login_url', '=', 'login_url', ',', 'authorized_url', '=', 'authorized_url', ',', 'session_class', '=', 'session_class', 'or', 'RedditOAuth2Session', ',', 'storage', '=', 'storage', ',', ')', 'reddit_bp', '.', 'from_config', '[', '"client_id"', ']', '=', '"REDDIT_OAUTH_CLIENT_ID"', 'reddit_bp', '.', 'from_config', '[', '"client_secret"', ']', '=', '"REDDIT_OAUTH_CLIENT_SECRET"', 'reddit_bp', '.', 'user_agent', '=', 'user_agent', '@', 'reddit_bp', '.', 'before_app_request', 'def', 'set_applocal_session', '(', ')', ':', 'ctx', '=', 'stack', '.', 'top', 'ctx', '.', 'reddit_oauth', '=', 'reddit_bp', '.', 'session', 'return', 'reddit_bp'] | Make a blueprint for authenticating with Reddit using OAuth 2. This requires
a client ID and client secret from Reddit. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`REDDIT_OAUTH_CLIENT_ID` and
:envvar:`REDDIT_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on Reddit.
client_secret (str): The client secret for your application on Reddit
scope (str, optional): space-separated list of scopes for the OAuth token
Defaults to ``identity``
permanent (bool, optional): Whether to request permanent access token.
Defaults to False, access will be valid for 1 hour
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/reddit``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/reddit/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.contrib.reddit.RedditOAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
user_agent (str, optional): User agent for the requests to Reddit API.
Defaults to ``Flask-Dance/{{version}}``
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app. | ['Make', 'a', 'blueprint', 'for', 'authenticating', 'with', 'Reddit', 'using', 'OAuth', '2', '.', 'This', 'requires', 'a', 'client', 'ID', 'and', 'client', 'secret', 'from', 'Reddit', '.', 'You', 'should', 'either', 'pass', 'them', 'to', 'this', 'constructor', 'or', 'make', 'sure', 'that', 'your', 'Flask', 'application', 'config', 'defines', 'them', 'using', 'the', 'variables', ':', 'envvar', ':', 'REDDIT_OAUTH_CLIENT_ID', 'and', ':', 'envvar', ':', 'REDDIT_OAUTH_CLIENT_SECRET', '.'] | train | https://github.com/singingwolfboy/flask-dance/blob/87d45328bbdaff833559a6d3da71461fe4579592/flask_dance/contrib/reddit.py#L34-L115 |
309 | Capitains/MyCapytain | MyCapytain/common/utils/_generic.py | nested_get | def nested_get(dictionary, keys):
""" Get value in dictionary for dictionary[keys[0]][keys[1]][keys[..n]]
:param dictionary: An input dictionary
:param keys: Keys where to store data
:return:
"""
return reduce(lambda d, k: d[k], keys, dictionary) | python | def nested_get(dictionary, keys):
""" Get value in dictionary for dictionary[keys[0]][keys[1]][keys[..n]]
:param dictionary: An input dictionary
:param keys: Keys where to store data
:return:
"""
return reduce(lambda d, k: d[k], keys, dictionary) | ['def', 'nested_get', '(', 'dictionary', ',', 'keys', ')', ':', 'return', 'reduce', '(', 'lambda', 'd', ',', 'k', ':', 'd', '[', 'k', ']', ',', 'keys', ',', 'dictionary', ')'] | Get value in dictionary for dictionary[keys[0]][keys[1]][keys[..n]]
:param dictionary: An input dictionary
:param keys: Keys where to store data
:return: | ['Get', 'value', 'in', 'dictionary', 'for', 'dictionary', '[', 'keys', '[', '0', ']]', '[', 'keys', '[', '1', ']]', '[', 'keys', '[', '..', 'n', ']]'] | train | https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/utils/_generic.py#L32-L39 |
310 | singularityhub/singularity-python | singularity/package/clone.py | package_node | def package_node(root=None, name=None):
'''package node aims to package a (present working node) for a user into
a container. This assumes that the node is a single partition.
:param root: the root of the node to package, default is /
:param name: the name for the image. If not specified, will use machine's
psutil.disk_partitions()
'''
if name is None:
name = platform.node()
if root is None:
root = "/"
tmpdir = tempfile.mkdtemp()
image = "%s/%s.tgz" %(tmpdir,name)
print("Preparing to package root %s into %s" %(root,name))
cmd = ["tar","--one-file-system","-czvSf", image, root,"--exclude",image]
output = run_command(cmd)
return image | python | def package_node(root=None, name=None):
'''package node aims to package a (present working node) for a user into
a container. This assumes that the node is a single partition.
:param root: the root of the node to package, default is /
:param name: the name for the image. If not specified, will use machine's
psutil.disk_partitions()
'''
if name is None:
name = platform.node()
if root is None:
root = "/"
tmpdir = tempfile.mkdtemp()
image = "%s/%s.tgz" %(tmpdir,name)
print("Preparing to package root %s into %s" %(root,name))
cmd = ["tar","--one-file-system","-czvSf", image, root,"--exclude",image]
output = run_command(cmd)
return image | ['def', 'package_node', '(', 'root', '=', 'None', ',', 'name', '=', 'None', ')', ':', 'if', 'name', 'is', 'None', ':', 'name', '=', 'platform', '.', 'node', '(', ')', 'if', 'root', 'is', 'None', ':', 'root', '=', '"/"', 'tmpdir', '=', 'tempfile', '.', 'mkdtemp', '(', ')', 'image', '=', '"%s/%s.tgz"', '%', '(', 'tmpdir', ',', 'name', ')', 'print', '(', '"Preparing to package root %s into %s"', '%', '(', 'root', ',', 'name', ')', ')', 'cmd', '=', '[', '"tar"', ',', '"--one-file-system"', ',', '"-czvSf"', ',', 'image', ',', 'root', ',', '"--exclude"', ',', 'image', ']', 'output', '=', 'run_command', '(', 'cmd', ')', 'return', 'image'] | package node aims to package a (present working node) for a user into
a container. This assumes that the node is a single partition.
:param root: the root of the node to package, default is /
:param name: the name for the image. If not specified, will use machine's
psutil.disk_partitions() | ['package', 'node', 'aims', 'to', 'package', 'a', '(', 'present', 'working', 'node', ')', 'for', 'a', 'user', 'into', 'a', 'container', '.', 'This', 'assumes', 'that', 'the', 'node', 'is', 'a', 'single', 'partition', '.', ':', 'param', 'root', ':', 'the', 'root', 'of', 'the', 'node', 'to', 'package', 'default', 'is', '/', ':', 'param', 'name', ':', 'the', 'name', 'for', 'the', 'image', '.', 'If', 'not', 'specified', 'will', 'use', 'machine', 's'] | train | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/package/clone.py#L30-L53 |
311 | glomex/gcdt | gcdt/yugen_core.py | _update_stage | def _update_stage(awsclient, api_id, stage_name, method_settings):
"""Helper to apply method_settings to stage
:param awsclient:
:param api_id:
:param stage_name:
:param method_settings:
:return:
"""
# settings docs in response: https://botocore.readthedocs.io/en/latest/reference/services/apigateway.html#APIGateway.Client.update_stage
client_api = awsclient.get_client('apigateway')
operations = _convert_method_settings_into_operations(method_settings)
if operations:
print('update method settings for stage')
_sleep()
response = client_api.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=operations) | python | def _update_stage(awsclient, api_id, stage_name, method_settings):
"""Helper to apply method_settings to stage
:param awsclient:
:param api_id:
:param stage_name:
:param method_settings:
:return:
"""
# settings docs in response: https://botocore.readthedocs.io/en/latest/reference/services/apigateway.html#APIGateway.Client.update_stage
client_api = awsclient.get_client('apigateway')
operations = _convert_method_settings_into_operations(method_settings)
if operations:
print('update method settings for stage')
_sleep()
response = client_api.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=operations) | ['def', '_update_stage', '(', 'awsclient', ',', 'api_id', ',', 'stage_name', ',', 'method_settings', ')', ':', '# settings docs in response: https://botocore.readthedocs.io/en/latest/reference/services/apigateway.html#APIGateway.Client.update_stage', 'client_api', '=', 'awsclient', '.', 'get_client', '(', "'apigateway'", ')', 'operations', '=', '_convert_method_settings_into_operations', '(', 'method_settings', ')', 'if', 'operations', ':', 'print', '(', "'update method settings for stage'", ')', '_sleep', '(', ')', 'response', '=', 'client_api', '.', 'update_stage', '(', 'restApiId', '=', 'api_id', ',', 'stageName', '=', 'stage_name', ',', 'patchOperations', '=', 'operations', ')'] | Helper to apply method_settings to stage
:param awsclient:
:param api_id:
:param stage_name:
:param method_settings:
:return: | ['Helper', 'to', 'apply', 'method_settings', 'to', 'stage'] | train | https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/yugen_core.py#L461-L479 |
312 | pdkit/pdkit | pdkit/tremor_processor.py | TremorProcessor.amplitude_by_fft | def amplitude_by_fft(self, data_frame):
"""
This methods extract the fft components and sum the ones from lower to upper freq as per \
:cite:`Kassavetis2015`
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ampl: the ampl
:rtype ampl: float
:return freq: the freq
:rtype freq: float
"""
signal_length = len(data_frame.filtered_signal)
normalised_transformed_signal = data_frame.transformed_signal.values / signal_length
k = np.arange(signal_length)
T = signal_length / self.sampling_frequency
f = k / T # two sides frequency range
f = f[range(int(signal_length / 2))] # one side frequency range
ts = normalised_transformed_signal[range(int(signal_length / 2))]
ampl = sum(abs(ts[(f > self.lower_frequency) & (f < self.upper_frequency)]))
freq = f[abs(ts).argmax(axis=0)]
logging.debug("tremor ampl calculated")
return ampl, freq | python | def amplitude_by_fft(self, data_frame):
"""
This methods extract the fft components and sum the ones from lower to upper freq as per \
:cite:`Kassavetis2015`
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ampl: the ampl
:rtype ampl: float
:return freq: the freq
:rtype freq: float
"""
signal_length = len(data_frame.filtered_signal)
normalised_transformed_signal = data_frame.transformed_signal.values / signal_length
k = np.arange(signal_length)
T = signal_length / self.sampling_frequency
f = k / T # two sides frequency range
f = f[range(int(signal_length / 2))] # one side frequency range
ts = normalised_transformed_signal[range(int(signal_length / 2))]
ampl = sum(abs(ts[(f > self.lower_frequency) & (f < self.upper_frequency)]))
freq = f[abs(ts).argmax(axis=0)]
logging.debug("tremor ampl calculated")
return ampl, freq | ['def', 'amplitude_by_fft', '(', 'self', ',', 'data_frame', ')', ':', 'signal_length', '=', 'len', '(', 'data_frame', '.', 'filtered_signal', ')', 'normalised_transformed_signal', '=', 'data_frame', '.', 'transformed_signal', '.', 'values', '/', 'signal_length', 'k', '=', 'np', '.', 'arange', '(', 'signal_length', ')', 'T', '=', 'signal_length', '/', 'self', '.', 'sampling_frequency', 'f', '=', 'k', '/', 'T', '# two sides frequency range', 'f', '=', 'f', '[', 'range', '(', 'int', '(', 'signal_length', '/', '2', ')', ')', ']', '# one side frequency range', 'ts', '=', 'normalised_transformed_signal', '[', 'range', '(', 'int', '(', 'signal_length', '/', '2', ')', ')', ']', 'ampl', '=', 'sum', '(', 'abs', '(', 'ts', '[', '(', 'f', '>', 'self', '.', 'lower_frequency', ')', '&', '(', 'f', '<', 'self', '.', 'upper_frequency', ')', ']', ')', ')', 'freq', '=', 'f', '[', 'abs', '(', 'ts', ')', '.', 'argmax', '(', 'axis', '=', '0', ')', ']', 'logging', '.', 'debug', '(', '"tremor ampl calculated"', ')', 'return', 'ampl', ',', 'freq'] | This methods extract the fft components and sum the ones from lower to upper freq as per \
:cite:`Kassavetis2015`
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ampl: the ampl
:rtype ampl: float
:return freq: the freq
:rtype freq: float | ['This', 'methods', 'extract', 'the', 'fft', 'components', 'and', 'sum', 'the', 'ones', 'from', 'lower', 'to', 'upper', 'freq', 'as', 'per', '\\', ':', 'cite', ':', 'Kassavetis2015'] | train | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/tremor_processor.py#L144-L170 |
313 | Jaymon/prom | prom/config.py | Schema.required_fields | def required_fields(self):
"""The normal required fields (eg, no magic fields like _id are included)"""
return {f:v for f, v in self.normal_fields.items() if v.required} | python | def required_fields(self):
"""The normal required fields (eg, no magic fields like _id are included)"""
return {f:v for f, v in self.normal_fields.items() if v.required} | ['def', 'required_fields', '(', 'self', ')', ':', 'return', '{', 'f', ':', 'v', 'for', 'f', ',', 'v', 'in', 'self', '.', 'normal_fields', '.', 'items', '(', ')', 'if', 'v', '.', 'required', '}'] | The normal required fields (eg, no magic fields like _id are included) | ['The', 'normal', 'required', 'fields', '(', 'eg', 'no', 'magic', 'fields', 'like', '_id', 'are', 'included', ')'] | train | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/config.py#L175-L177 |
314 | saltstack/salt | salt/states/boto_vpc.py | dhcp_options_present | def dhcp_options_present(name, dhcp_options_id=None, vpc_name=None, vpc_id=None,
domain_name=None, domain_name_servers=None, ntp_servers=None,
netbios_name_servers=None, netbios_node_type=None,
tags=None, region=None, key=None, keyid=None, profile=None):
'''
Ensure a set of DHCP options with the given settings exist.
Note that the current implementation only SETS values during option set
creation. It is unable to update option sets in place, and thus merely
verifies the set exists via the given name and/or dhcp_options_id param.
name
(string)
Name of the DHCP options.
vpc_name
(string)
Name of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
vpc_id
(string)
Id of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
domain_name
(string)
Domain name to be assiciated with this option set.
domain_name_servers
(list of strings)
The IP address(es) of up to four domain name servers.
ntp_servers
(list of strings)
The IP address(es) of up to four desired NTP servers.
netbios_name_servers
(list of strings)
The IP address(es) of up to four NetBIOS name servers.
netbios_node_type
(string)
The NetBIOS node type (1, 2, 4, or 8). For more information about
the allowed values, see RFC 2132. The recommended is 2 at this
time (broadcast and multicast are currently not supported).
tags
(dict of key:value pairs)
A set of tags to be added.
region
(string)
Region to connect to.
key
(string)
Secret key to be used.
keyid
(string)
Access key to be used.
profile
(various)
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
_new = {'domain_name': domain_name,
'domain_name_servers': domain_name_servers,
'ntp_servers': ntp_servers,
'netbios_name_servers': netbios_name_servers,
'netbios_node_type': netbios_node_type
}
# boto provides no "update_dhcp_options()" functionality, and you can't delete it if
# it's attached, and you can't detach it if it's the only one, so just check if it's
# there or not, and make no effort to validate it's actual settings... :(
### TODO - add support for multiple sets of DHCP options, and then for "swapping out"
### sets by creating new, mapping, then deleting the old.
r = __salt__['boto_vpc.dhcp_options_exists'](dhcp_options_id=dhcp_options_id,
dhcp_options_name=name,
region=region, key=key, keyid=keyid,
profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to validate DHCP options: {0}.'.format(r['error']['message'])
return ret
if r.get('exists'):
ret['comment'] = 'DHCP options already present.'
return ret
else:
if __opts__['test']:
ret['comment'] = 'DHCP options {0} are set to be created.'.format(name)
ret['result'] = None
return ret
r = __salt__['boto_vpc.create_dhcp_options'](domain_name=domain_name,
domain_name_servers=domain_name_servers,
ntp_servers=ntp_servers,
netbios_name_servers=netbios_name_servers,
netbios_node_type=netbios_node_type,
dhcp_options_name=name, tags=tags,
vpc_id=vpc_id, vpc_name=vpc_name,
region=region, key=key, keyid=keyid,
profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create DHCP options: {0}'.format(r['error']['message'])
return ret
ret['changes']['old'] = {'dhcp_options': None}
ret['changes']['new'] = {'dhcp_options': _new}
ret['comment'] = 'DHCP options {0} created.'.format(name)
return ret | python | def dhcp_options_present(name, dhcp_options_id=None, vpc_name=None, vpc_id=None,
domain_name=None, domain_name_servers=None, ntp_servers=None,
netbios_name_servers=None, netbios_node_type=None,
tags=None, region=None, key=None, keyid=None, profile=None):
'''
Ensure a set of DHCP options with the given settings exist.
Note that the current implementation only SETS values during option set
creation. It is unable to update option sets in place, and thus merely
verifies the set exists via the given name and/or dhcp_options_id param.
name
(string)
Name of the DHCP options.
vpc_name
(string)
Name of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
vpc_id
(string)
Id of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
domain_name
(string)
Domain name to be assiciated with this option set.
domain_name_servers
(list of strings)
The IP address(es) of up to four domain name servers.
ntp_servers
(list of strings)
The IP address(es) of up to four desired NTP servers.
netbios_name_servers
(list of strings)
The IP address(es) of up to four NetBIOS name servers.
netbios_node_type
(string)
The NetBIOS node type (1, 2, 4, or 8). For more information about
the allowed values, see RFC 2132. The recommended is 2 at this
time (broadcast and multicast are currently not supported).
tags
(dict of key:value pairs)
A set of tags to be added.
region
(string)
Region to connect to.
key
(string)
Secret key to be used.
keyid
(string)
Access key to be used.
profile
(various)
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
_new = {'domain_name': domain_name,
'domain_name_servers': domain_name_servers,
'ntp_servers': ntp_servers,
'netbios_name_servers': netbios_name_servers,
'netbios_node_type': netbios_node_type
}
# boto provides no "update_dhcp_options()" functionality, and you can't delete it if
# it's attached, and you can't detach it if it's the only one, so just check if it's
# there or not, and make no effort to validate it's actual settings... :(
### TODO - add support for multiple sets of DHCP options, and then for "swapping out"
### sets by creating new, mapping, then deleting the old.
r = __salt__['boto_vpc.dhcp_options_exists'](dhcp_options_id=dhcp_options_id,
dhcp_options_name=name,
region=region, key=key, keyid=keyid,
profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to validate DHCP options: {0}.'.format(r['error']['message'])
return ret
if r.get('exists'):
ret['comment'] = 'DHCP options already present.'
return ret
else:
if __opts__['test']:
ret['comment'] = 'DHCP options {0} are set to be created.'.format(name)
ret['result'] = None
return ret
r = __salt__['boto_vpc.create_dhcp_options'](domain_name=domain_name,
domain_name_servers=domain_name_servers,
ntp_servers=ntp_servers,
netbios_name_servers=netbios_name_servers,
netbios_node_type=netbios_node_type,
dhcp_options_name=name, tags=tags,
vpc_id=vpc_id, vpc_name=vpc_name,
region=region, key=key, keyid=keyid,
profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create DHCP options: {0}'.format(r['error']['message'])
return ret
ret['changes']['old'] = {'dhcp_options': None}
ret['changes']['new'] = {'dhcp_options': _new}
ret['comment'] = 'DHCP options {0} created.'.format(name)
return ret | ['def', 'dhcp_options_present', '(', 'name', ',', 'dhcp_options_id', '=', 'None', ',', 'vpc_name', '=', 'None', ',', 'vpc_id', '=', 'None', ',', 'domain_name', '=', 'None', ',', 'domain_name_servers', '=', 'None', ',', 'ntp_servers', '=', 'None', ',', 'netbios_name_servers', '=', 'None', ',', 'netbios_node_type', '=', 'None', ',', 'tags', '=', 'None', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'result'", ':', 'True', ',', "'comment'", ':', "''", ',', "'changes'", ':', '{', '}', '}', '_new', '=', '{', "'domain_name'", ':', 'domain_name', ',', "'domain_name_servers'", ':', 'domain_name_servers', ',', "'ntp_servers'", ':', 'ntp_servers', ',', "'netbios_name_servers'", ':', 'netbios_name_servers', ',', "'netbios_node_type'", ':', 'netbios_node_type', '}', '# boto provides no "update_dhcp_options()" functionality, and you can\'t delete it if', "# it's attached, and you can't detach it if it's the only one, so just check if it's", "# there or not, and make no effort to validate it's actual settings... :(", '### TODO - add support for multiple sets of DHCP options, and then for "swapping out"', '### sets by creating new, mapping, then deleting the old.', 'r', '=', '__salt__', '[', "'boto_vpc.dhcp_options_exists'", ']', '(', 'dhcp_options_id', '=', 'dhcp_options_id', ',', 'dhcp_options_name', '=', 'name', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'if', "'error'", 'in', 'r', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', "'Failed to validate DHCP options: {0}.'", '.', 'format', '(', 'r', '[', "'error'", ']', '[', "'message'", ']', ')', 'return', 'ret', 'if', 'r', '.', 'get', '(', "'exists'", ')', ':', 'ret', '[', "'comment'", ']', '=', "'DHCP options already present.'", 'return', 'ret', 'else', ':', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'comment'", ']', '=', "'DHCP options {0} are set to be created.'", '.', 'format', '(', 'name', ')', 'ret', '[', "'result'", ']', '=', 'None', 'return', 'ret', 'r', '=', '__salt__', '[', "'boto_vpc.create_dhcp_options'", ']', '(', 'domain_name', '=', 'domain_name', ',', 'domain_name_servers', '=', 'domain_name_servers', ',', 'ntp_servers', '=', 'ntp_servers', ',', 'netbios_name_servers', '=', 'netbios_name_servers', ',', 'netbios_node_type', '=', 'netbios_node_type', ',', 'dhcp_options_name', '=', 'name', ',', 'tags', '=', 'tags', ',', 'vpc_id', '=', 'vpc_id', ',', 'vpc_name', '=', 'vpc_name', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'if', 'not', 'r', '.', 'get', '(', "'created'", ')', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', "'Failed to create DHCP options: {0}'", '.', 'format', '(', 'r', '[', "'error'", ']', '[', "'message'", ']', ')', 'return', 'ret', 'ret', '[', "'changes'", ']', '[', "'old'", ']', '=', '{', "'dhcp_options'", ':', 'None', '}', 'ret', '[', "'changes'", ']', '[', "'new'", ']', '=', '{', "'dhcp_options'", ':', '_new', '}', 'ret', '[', "'comment'", ']', '=', "'DHCP options {0} created.'", '.', 'format', '(', 'name', ')', 'return', 'ret'] | Ensure a set of DHCP options with the given settings exist.
Note that the current implementation only SETS values during option set
creation. It is unable to update option sets in place, and thus merely
verifies the set exists via the given name and/or dhcp_options_id param.
name
(string)
Name of the DHCP options.
vpc_name
(string)
Name of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
vpc_id
(string)
Id of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
domain_name
(string)
Domain name to be assiciated with this option set.
domain_name_servers
(list of strings)
The IP address(es) of up to four domain name servers.
ntp_servers
(list of strings)
The IP address(es) of up to four desired NTP servers.
netbios_name_servers
(list of strings)
The IP address(es) of up to four NetBIOS name servers.
netbios_node_type
(string)
The NetBIOS node type (1, 2, 4, or 8). For more information about
the allowed values, see RFC 2132. The recommended is 2 at this
time (broadcast and multicast are currently not supported).
tags
(dict of key:value pairs)
A set of tags to be added.
region
(string)
Region to connect to.
key
(string)
Secret key to be used.
keyid
(string)
Access key to be used.
profile
(various)
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0 | ['Ensure', 'a', 'set', 'of', 'DHCP', 'options', 'with', 'the', 'given', 'settings', 'exist', '.', 'Note', 'that', 'the', 'current', 'implementation', 'only', 'SETS', 'values', 'during', 'option', 'set', 'creation', '.', 'It', 'is', 'unable', 'to', 'update', 'option', 'sets', 'in', 'place', 'and', 'thus', 'merely', 'verifies', 'the', 'set', 'exists', 'via', 'the', 'given', 'name', 'and', '/', 'or', 'dhcp_options_id', 'param', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_vpc.py#L307-L428 |
315 | dbrattli/OSlash | oslash/observable.py | Observable.bind | def bind(self, fn: Callable[[Any], 'Observable']) -> 'Observable':
r"""Chain continuation passing functions.
Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
"""
source = self
return Observable(lambda on_next: source.subscribe(lambda a: fn(a).subscribe(on_next))) | python | def bind(self, fn: Callable[[Any], 'Observable']) -> 'Observable':
r"""Chain continuation passing functions.
Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
"""
source = self
return Observable(lambda on_next: source.subscribe(lambda a: fn(a).subscribe(on_next))) | ['def', 'bind', '(', 'self', ',', 'fn', ':', 'Callable', '[', '[', 'Any', ']', ',', "'Observable'", ']', ')', '->', "'Observable'", ':', 'source', '=', 'self', 'return', 'Observable', '(', 'lambda', 'on_next', ':', 'source', '.', 'subscribe', '(', 'lambda', 'a', ':', 'fn', '(', 'a', ')', '.', 'subscribe', '(', 'on_next', ')', ')', ')'] | r"""Chain continuation passing functions.
Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c | ['r', 'Chain', 'continuation', 'passing', 'functions', '.'] | train | https://github.com/dbrattli/OSlash/blob/ffdc714c5d454f7519f740254de89f70850929eb/oslash/observable.py#L47-L53 |
316 | apple/turicreate | src/unity/python/turicreate/extensions.py | ext_import | def ext_import(soname, module_subpath=""):
"""
Loads a turicreate toolkit module (a shared library) into the
tc.extensions namespace.
Toolkit module created via SDK can either be directly imported,
e.g. ``import example`` or via this function, e.g. ``turicreate.ext_import("example.so")``.
Use ``ext_import`` when you need more namespace control, or when
the shared library is not local, e.g. in http, s3 or hdfs.
Parameters
----------
soname : string
The filename of the shared library to load.
This can be a URL, or a HDFS location. For instance if soname is
somewhere/outthere/toolkit.so
The functions in toolkit.so will appear in tc.extensions.toolkit.*
module_subpath : string, optional
Any additional module paths to prepend to the toolkit module after
it is imported. For instance if soname is
somewhere/outthere/toolkit.so, by default
the functions in toolkit.so will appear in tc.extensions.toolkit.*.
However, if I module_subpath="somewhere.outthere", the functions
in toolkit.so will appear in tc.extensions.somewhere.outthere.toolkit.*
Returns
-------
out : a list of functions and classes loaded.
Examples
--------
For instance, given a module which implements the function "square_root",
.. code-block:: c++
#include <cmath>
#include <turicreate/sdk/toolkit_function_macros.hpp>
double square_root(double a) {
return sqrt(a);
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(square_root, "a");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> turicreate.ext_import('example1.so')
['example1.square_root']
>>> turicreate.extensions.example1.square_root(9)
3.0
We can customize the import location with module_subpath which can be
used to avoid namespace conflicts when you have multiple toolkits with the
same filename.
>>> turicreate.ext_import('example1.so', 'math')
['math.example1.square_root']
>>> turicreate.extensions.math.example1.square_root(9)
3.0
The module can also be imported directly, but turicreate *must* be imported
first. turicreate will intercept the module loading process to load the
toolkit.
>>> import turicreate
>>> import example1 #searches for example1.so in all the python paths
>>> example1.square_root(9)
3.0
"""
unity = _get_unity()
import os
if os.path.exists(soname):
soname = os.path.abspath(soname)
else:
soname = _make_internal_url(soname)
ret = unity.load_toolkit(soname, module_subpath)
if len(ret) > 0:
raise RuntimeError(ret)
_publish()
# push the functions into the corresponding module namespace
return unity.list_toolkit_functions_in_dynamic_module(soname) + unity.list_toolkit_classes_in_dynamic_module(soname) | python | def ext_import(soname, module_subpath=""):
"""
Loads a turicreate toolkit module (a shared library) into the
tc.extensions namespace.
Toolkit module created via SDK can either be directly imported,
e.g. ``import example`` or via this function, e.g. ``turicreate.ext_import("example.so")``.
Use ``ext_import`` when you need more namespace control, or when
the shared library is not local, e.g. in http, s3 or hdfs.
Parameters
----------
soname : string
The filename of the shared library to load.
This can be a URL, or a HDFS location. For instance if soname is
somewhere/outthere/toolkit.so
The functions in toolkit.so will appear in tc.extensions.toolkit.*
module_subpath : string, optional
Any additional module paths to prepend to the toolkit module after
it is imported. For instance if soname is
somewhere/outthere/toolkit.so, by default
the functions in toolkit.so will appear in tc.extensions.toolkit.*.
However, if I module_subpath="somewhere.outthere", the functions
in toolkit.so will appear in tc.extensions.somewhere.outthere.toolkit.*
Returns
-------
out : a list of functions and classes loaded.
Examples
--------
For instance, given a module which implements the function "square_root",
.. code-block:: c++
#include <cmath>
#include <turicreate/sdk/toolkit_function_macros.hpp>
double square_root(double a) {
return sqrt(a);
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(square_root, "a");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> turicreate.ext_import('example1.so')
['example1.square_root']
>>> turicreate.extensions.example1.square_root(9)
3.0
We can customize the import location with module_subpath which can be
used to avoid namespace conflicts when you have multiple toolkits with the
same filename.
>>> turicreate.ext_import('example1.so', 'math')
['math.example1.square_root']
>>> turicreate.extensions.math.example1.square_root(9)
3.0
The module can also be imported directly, but turicreate *must* be imported
first. turicreate will intercept the module loading process to load the
toolkit.
>>> import turicreate
>>> import example1 #searches for example1.so in all the python paths
>>> example1.square_root(9)
3.0
"""
unity = _get_unity()
import os
if os.path.exists(soname):
soname = os.path.abspath(soname)
else:
soname = _make_internal_url(soname)
ret = unity.load_toolkit(soname, module_subpath)
if len(ret) > 0:
raise RuntimeError(ret)
_publish()
# push the functions into the corresponding module namespace
return unity.list_toolkit_functions_in_dynamic_module(soname) + unity.list_toolkit_classes_in_dynamic_module(soname) | ['def', 'ext_import', '(', 'soname', ',', 'module_subpath', '=', '""', ')', ':', 'unity', '=', '_get_unity', '(', ')', 'import', 'os', 'if', 'os', '.', 'path', '.', 'exists', '(', 'soname', ')', ':', 'soname', '=', 'os', '.', 'path', '.', 'abspath', '(', 'soname', ')', 'else', ':', 'soname', '=', '_make_internal_url', '(', 'soname', ')', 'ret', '=', 'unity', '.', 'load_toolkit', '(', 'soname', ',', 'module_subpath', ')', 'if', 'len', '(', 'ret', ')', '>', '0', ':', 'raise', 'RuntimeError', '(', 'ret', ')', '_publish', '(', ')', '# push the functions into the corresponding module namespace', 'return', 'unity', '.', 'list_toolkit_functions_in_dynamic_module', '(', 'soname', ')', '+', 'unity', '.', 'list_toolkit_classes_in_dynamic_module', '(', 'soname', ')'] | Loads a turicreate toolkit module (a shared library) into the
tc.extensions namespace.
Toolkit module created via SDK can either be directly imported,
e.g. ``import example`` or via this function, e.g. ``turicreate.ext_import("example.so")``.
Use ``ext_import`` when you need more namespace control, or when
the shared library is not local, e.g. in http, s3 or hdfs.
Parameters
----------
soname : string
The filename of the shared library to load.
This can be a URL, or a HDFS location. For instance if soname is
somewhere/outthere/toolkit.so
The functions in toolkit.so will appear in tc.extensions.toolkit.*
module_subpath : string, optional
Any additional module paths to prepend to the toolkit module after
it is imported. For instance if soname is
somewhere/outthere/toolkit.so, by default
the functions in toolkit.so will appear in tc.extensions.toolkit.*.
However, if I module_subpath="somewhere.outthere", the functions
in toolkit.so will appear in tc.extensions.somewhere.outthere.toolkit.*
Returns
-------
out : a list of functions and classes loaded.
Examples
--------
For instance, given a module which implements the function "square_root",
.. code-block:: c++
#include <cmath>
#include <turicreate/sdk/toolkit_function_macros.hpp>
double square_root(double a) {
return sqrt(a);
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(square_root, "a");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> turicreate.ext_import('example1.so')
['example1.square_root']
>>> turicreate.extensions.example1.square_root(9)
3.0
We can customize the import location with module_subpath which can be
used to avoid namespace conflicts when you have multiple toolkits with the
same filename.
>>> turicreate.ext_import('example1.so', 'math')
['math.example1.square_root']
>>> turicreate.extensions.math.example1.square_root(9)
3.0
The module can also be imported directly, but turicreate *must* be imported
first. turicreate will intercept the module loading process to load the
toolkit.
>>> import turicreate
>>> import example1 #searches for example1.so in all the python paths
>>> example1.square_root(9)
3.0 | ['Loads', 'a', 'turicreate', 'toolkit', 'module', '(', 'a', 'shared', 'library', ')', 'into', 'the', 'tc', '.', 'extensions', 'namespace', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/extensions.py#L501-L584 |
317 | gwastro/pycbc | pycbc/tmpltbank/lattice_utils.py | generate_anstar_3d_lattice | def generate_anstar_3d_lattice(maxv1, minv1, maxv2, minv2, maxv3, minv3, \
mindist):
"""
This function calls into LAL routines to generate a 3-dimensional array
of points using the An^* lattice.
Parameters
-----------
maxv1 : float
Largest value in the 1st dimension to cover
minv1 : float
Smallest value in the 1st dimension to cover
maxv2 : float
Largest value in the 2nd dimension to cover
minv2 : float
Smallest value in the 2nd dimension to cover
maxv3 : float
Largest value in the 3rd dimension to cover
minv3 : float
Smallest value in the 3rd dimension to cover
mindist : float
Maximum allowed mismatch between a point in the parameter space and the
generated bank of points.
Returns
--------
v1s : numpy.array
Array of positions in the first dimension
v2s : numpy.array
Array of positions in the second dimension
v3s : numpy.array
Array of positions in the second dimension
"""
# Lalpulsar not a requirement for the rest of pycbc, so check if we have it
# here in this function.
try:
import lalpulsar
except:
raise ImportError("A SWIG-wrapped install of lalpulsar is needed to use the anstar tiling functionality.")
tiling = lalpulsar.CreateLatticeTiling(3)
lalpulsar.SetLatticeTilingConstantBound(tiling, 0, minv1, maxv1)
lalpulsar.SetLatticeTilingConstantBound(tiling, 1, minv2, maxv2)
lalpulsar.SetLatticeTilingConstantBound(tiling, 2, minv3, maxv3)
# Make a 3x3 Euclidean lattice
a = lal.gsl_matrix(3,3)
a.data[0,0] = 1
a.data[1,1] = 1
a.data[2,2] = 1
try:
# old versions of lalpulsar used an enumeration
lattice = lalpulsar.TILING_LATTICE_ANSTAR
except AttributeError:
# newer versions of lalpulsar use a string
lattice = 'An-star'
lalpulsar.SetTilingLatticeAndMetric(tiling, lattice, a, mindist)
try:
iterator = lalpulsar.CreateLatticeTilingIterator(tiling, 3)
except TypeError:
# old versions of lalpulsar required the flags argument
# (set to 0 for defaults)
iterator = lalpulsar.CreateLatticeTilingIterator(tiling, 3, 0)
vs1 = []
vs2 = []
vs3 = []
curr_point = lal.gsl_vector(3)
while (lalpulsar.NextLatticeTilingPoint(iterator, curr_point) > 0):
vs1.append(curr_point.data[0])
vs2.append(curr_point.data[1])
vs3.append(curr_point.data[2])
return vs1, vs2, vs3 | python | def generate_anstar_3d_lattice(maxv1, minv1, maxv2, minv2, maxv3, minv3, \
mindist):
"""
This function calls into LAL routines to generate a 3-dimensional array
of points using the An^* lattice.
Parameters
-----------
maxv1 : float
Largest value in the 1st dimension to cover
minv1 : float
Smallest value in the 1st dimension to cover
maxv2 : float
Largest value in the 2nd dimension to cover
minv2 : float
Smallest value in the 2nd dimension to cover
maxv3 : float
Largest value in the 3rd dimension to cover
minv3 : float
Smallest value in the 3rd dimension to cover
mindist : float
Maximum allowed mismatch between a point in the parameter space and the
generated bank of points.
Returns
--------
v1s : numpy.array
Array of positions in the first dimension
v2s : numpy.array
Array of positions in the second dimension
v3s : numpy.array
Array of positions in the second dimension
"""
# Lalpulsar not a requirement for the rest of pycbc, so check if we have it
# here in this function.
try:
import lalpulsar
except:
raise ImportError("A SWIG-wrapped install of lalpulsar is needed to use the anstar tiling functionality.")
tiling = lalpulsar.CreateLatticeTiling(3)
lalpulsar.SetLatticeTilingConstantBound(tiling, 0, minv1, maxv1)
lalpulsar.SetLatticeTilingConstantBound(tiling, 1, minv2, maxv2)
lalpulsar.SetLatticeTilingConstantBound(tiling, 2, minv3, maxv3)
# Make a 3x3 Euclidean lattice
a = lal.gsl_matrix(3,3)
a.data[0,0] = 1
a.data[1,1] = 1
a.data[2,2] = 1
try:
# old versions of lalpulsar used an enumeration
lattice = lalpulsar.TILING_LATTICE_ANSTAR
except AttributeError:
# newer versions of lalpulsar use a string
lattice = 'An-star'
lalpulsar.SetTilingLatticeAndMetric(tiling, lattice, a, mindist)
try:
iterator = lalpulsar.CreateLatticeTilingIterator(tiling, 3)
except TypeError:
# old versions of lalpulsar required the flags argument
# (set to 0 for defaults)
iterator = lalpulsar.CreateLatticeTilingIterator(tiling, 3, 0)
vs1 = []
vs2 = []
vs3 = []
curr_point = lal.gsl_vector(3)
while (lalpulsar.NextLatticeTilingPoint(iterator, curr_point) > 0):
vs1.append(curr_point.data[0])
vs2.append(curr_point.data[1])
vs3.append(curr_point.data[2])
return vs1, vs2, vs3 | ['def', 'generate_anstar_3d_lattice', '(', 'maxv1', ',', 'minv1', ',', 'maxv2', ',', 'minv2', ',', 'maxv3', ',', 'minv3', ',', 'mindist', ')', ':', '# Lalpulsar not a requirement for the rest of pycbc, so check if we have it', '# here in this function.', 'try', ':', 'import', 'lalpulsar', 'except', ':', 'raise', 'ImportError', '(', '"A SWIG-wrapped install of lalpulsar is needed to use the anstar tiling functionality."', ')', 'tiling', '=', 'lalpulsar', '.', 'CreateLatticeTiling', '(', '3', ')', 'lalpulsar', '.', 'SetLatticeTilingConstantBound', '(', 'tiling', ',', '0', ',', 'minv1', ',', 'maxv1', ')', 'lalpulsar', '.', 'SetLatticeTilingConstantBound', '(', 'tiling', ',', '1', ',', 'minv2', ',', 'maxv2', ')', 'lalpulsar', '.', 'SetLatticeTilingConstantBound', '(', 'tiling', ',', '2', ',', 'minv3', ',', 'maxv3', ')', '# Make a 3x3 Euclidean lattice', 'a', '=', 'lal', '.', 'gsl_matrix', '(', '3', ',', '3', ')', 'a', '.', 'data', '[', '0', ',', '0', ']', '=', '1', 'a', '.', 'data', '[', '1', ',', '1', ']', '=', '1', 'a', '.', 'data', '[', '2', ',', '2', ']', '=', '1', 'try', ':', '# old versions of lalpulsar used an enumeration', 'lattice', '=', 'lalpulsar', '.', 'TILING_LATTICE_ANSTAR', 'except', 'AttributeError', ':', '# newer versions of lalpulsar use a string', 'lattice', '=', "'An-star'", 'lalpulsar', '.', 'SetTilingLatticeAndMetric', '(', 'tiling', ',', 'lattice', ',', 'a', ',', 'mindist', ')', 'try', ':', 'iterator', '=', 'lalpulsar', '.', 'CreateLatticeTilingIterator', '(', 'tiling', ',', '3', ')', 'except', 'TypeError', ':', '# old versions of lalpulsar required the flags argument', '# (set to 0 for defaults)', 'iterator', '=', 'lalpulsar', '.', 'CreateLatticeTilingIterator', '(', 'tiling', ',', '3', ',', '0', ')', 'vs1', '=', '[', ']', 'vs2', '=', '[', ']', 'vs3', '=', '[', ']', 'curr_point', '=', 'lal', '.', 'gsl_vector', '(', '3', ')', 'while', '(', 'lalpulsar', '.', 'NextLatticeTilingPoint', '(', 'iterator', ',', 'curr_point', ')', '>', '0', ')', ':', 'vs1', '.', 'append', '(', 'curr_point', '.', 'data', '[', '0', ']', ')', 'vs2', '.', 'append', '(', 'curr_point', '.', 'data', '[', '1', ']', ')', 'vs3', '.', 'append', '(', 'curr_point', '.', 'data', '[', '2', ']', ')', 'return', 'vs1', ',', 'vs2', ',', 'vs3'] | This function calls into LAL routines to generate a 3-dimensional array
of points using the An^* lattice.
Parameters
-----------
maxv1 : float
Largest value in the 1st dimension to cover
minv1 : float
Smallest value in the 1st dimension to cover
maxv2 : float
Largest value in the 2nd dimension to cover
minv2 : float
Smallest value in the 2nd dimension to cover
maxv3 : float
Largest value in the 3rd dimension to cover
minv3 : float
Smallest value in the 3rd dimension to cover
mindist : float
Maximum allowed mismatch between a point in the parameter space and the
generated bank of points.
Returns
--------
v1s : numpy.array
Array of positions in the first dimension
v2s : numpy.array
Array of positions in the second dimension
v3s : numpy.array
Array of positions in the second dimension | ['This', 'function', 'calls', 'into', 'LAL', 'routines', 'to', 'generate', 'a', '3', '-', 'dimensional', 'array', 'of', 'points', 'using', 'the', 'An^', '*', 'lattice', '.'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/lattice_utils.py#L88-L159 |
318 | mitsei/dlkit | dlkit/json_/learning/objects.py | ObjectiveForm.clear_cognitive_process | def clear_cognitive_process(self):
"""Clears the cognitive process.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_cognitive_process_metadata().is_read_only() or
self.get_cognitive_process_metadata().is_required()):
raise errors.NoAccess()
self._my_map['cognitiveProcessId'] = self._cognitive_process_default | python | def clear_cognitive_process(self):
"""Clears the cognitive process.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_cognitive_process_metadata().is_read_only() or
self.get_cognitive_process_metadata().is_required()):
raise errors.NoAccess()
self._my_map['cognitiveProcessId'] = self._cognitive_process_default | ['def', 'clear_cognitive_process', '(', 'self', ')', ':', '# Implemented from template for osid.resource.ResourceForm.clear_avatar_template', 'if', '(', 'self', '.', 'get_cognitive_process_metadata', '(', ')', '.', 'is_read_only', '(', ')', 'or', 'self', '.', 'get_cognitive_process_metadata', '(', ')', '.', 'is_required', '(', ')', ')', ':', 'raise', 'errors', '.', 'NoAccess', '(', ')', 'self', '.', '_my_map', '[', "'cognitiveProcessId'", ']', '=', 'self', '.', '_cognitive_process_default'] | Clears the cognitive process.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | ['Clears', 'the', 'cognitive', 'process', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/objects.py#L375-L387 |
319 | TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_channels | def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList') | python | def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList') | ['def', 'get_channels', '(', 'self', ')', ':', 'self', '.', 'request', '(', 'EP_GET_TV_CHANNELS', ')', 'return', '{', '}', 'if', 'self', '.', 'last_response', 'is', 'None', 'else', 'self', '.', 'last_response', '.', 'get', '(', "'payload'", ')', '.', 'get', '(', "'channelList'", ')'] | Get all tv channels. | ['Get', 'all', 'tv', 'channels', '.'] | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L343-L346 |
320 | olsoneric/pedemath | pedemath/vec3.py | point_to_line | def point_to_line(point, segment_start, segment_end):
"""Given a point and a line segment, return the vector from the point to
the closest point on the segment.
"""
# TODO: Needs unittests.
segment_vec = segment_end - segment_start
# t is distance along line
t = -(segment_start - point).dot(segment_vec) / (
segment_vec.length_squared())
closest_point = segment_start + scale_v3(segment_vec, t)
return point - closest_point | python | def point_to_line(point, segment_start, segment_end):
"""Given a point and a line segment, return the vector from the point to
the closest point on the segment.
"""
# TODO: Needs unittests.
segment_vec = segment_end - segment_start
# t is distance along line
t = -(segment_start - point).dot(segment_vec) / (
segment_vec.length_squared())
closest_point = segment_start + scale_v3(segment_vec, t)
return point - closest_point | ['def', 'point_to_line', '(', 'point', ',', 'segment_start', ',', 'segment_end', ')', ':', '# TODO: Needs unittests.', 'segment_vec', '=', 'segment_end', '-', 'segment_start', '# t is distance along line', 't', '=', '-', '(', 'segment_start', '-', 'point', ')', '.', 'dot', '(', 'segment_vec', ')', '/', '(', 'segment_vec', '.', 'length_squared', '(', ')', ')', 'closest_point', '=', 'segment_start', '+', 'scale_v3', '(', 'segment_vec', ',', 't', ')', 'return', 'point', '-', 'closest_point'] | Given a point and a line segment, return the vector from the point to
the closest point on the segment. | ['Given', 'a', 'point', 'and', 'a', 'line', 'segment', 'return', 'the', 'vector', 'from', 'the', 'point', 'to', 'the', 'closest', 'point', 'on', 'the', 'segment', '.'] | train | https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/vec3.py#L105-L117 |
321 | maaku/python-bitcoin | bitcoin/tools.py | icmp | def icmp(a, b):
"Like cmp(), but for any iterator."
for xa in a:
try:
xb = next(b)
d = cmp(xa, xb)
if d: return d
except StopIteration:
return 1
try:
next(b)
return -1
except StopIteration:
return 0 | python | def icmp(a, b):
"Like cmp(), but for any iterator."
for xa in a:
try:
xb = next(b)
d = cmp(xa, xb)
if d: return d
except StopIteration:
return 1
try:
next(b)
return -1
except StopIteration:
return 0 | ['def', 'icmp', '(', 'a', ',', 'b', ')', ':', 'for', 'xa', 'in', 'a', ':', 'try', ':', 'xb', '=', 'next', '(', 'b', ')', 'd', '=', 'cmp', '(', 'xa', ',', 'xb', ')', 'if', 'd', ':', 'return', 'd', 'except', 'StopIteration', ':', 'return', '1', 'try', ':', 'next', '(', 'b', ')', 'return', '-', '1', 'except', 'StopIteration', ':', 'return', '0'] | Like cmp(), but for any iterator. | ['Like', 'cmp', '()', 'but', 'for', 'any', 'iterator', '.'] | train | https://github.com/maaku/python-bitcoin/blob/1b80c284170fd3f547cc45f4700ce169f3f99641/bitcoin/tools.py#L145-L158 |
322 | DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/pymongo/command_cursor.py | CommandCursor.batch_size | def batch_size(self, batch_size):
"""Limits the number of documents returned in one batch. Each batch
requires a round trip to the server. It can be adjusted to optimize
performance and limit data transfer.
.. note:: batch_size can not override MongoDB's internal limits on the
amount of data it will return to the client in a single batch (i.e
if you set batch size to 1,000,000,000, MongoDB will currently only
return 4-16MB of results per batch).
Raises :exc:`TypeError` if `batch_size` is not an integer.
Raises :exc:`ValueError` if `batch_size` is less than ``0``.
:Parameters:
- `batch_size`: The size of each batch of results requested.
"""
if not isinstance(batch_size, integer_types):
raise TypeError("batch_size must be an integer")
if batch_size < 0:
raise ValueError("batch_size must be >= 0")
self.__batch_size = batch_size == 1 and 2 or batch_size
return self | python | def batch_size(self, batch_size):
"""Limits the number of documents returned in one batch. Each batch
requires a round trip to the server. It can be adjusted to optimize
performance and limit data transfer.
.. note:: batch_size can not override MongoDB's internal limits on the
amount of data it will return to the client in a single batch (i.e
if you set batch size to 1,000,000,000, MongoDB will currently only
return 4-16MB of results per batch).
Raises :exc:`TypeError` if `batch_size` is not an integer.
Raises :exc:`ValueError` if `batch_size` is less than ``0``.
:Parameters:
- `batch_size`: The size of each batch of results requested.
"""
if not isinstance(batch_size, integer_types):
raise TypeError("batch_size must be an integer")
if batch_size < 0:
raise ValueError("batch_size must be >= 0")
self.__batch_size = batch_size == 1 and 2 or batch_size
return self | ['def', 'batch_size', '(', 'self', ',', 'batch_size', ')', ':', 'if', 'not', 'isinstance', '(', 'batch_size', ',', 'integer_types', ')', ':', 'raise', 'TypeError', '(', '"batch_size must be an integer"', ')', 'if', 'batch_size', '<', '0', ':', 'raise', 'ValueError', '(', '"batch_size must be >= 0"', ')', 'self', '.', '__batch_size', '=', 'batch_size', '==', '1', 'and', '2', 'or', 'batch_size', 'return', 'self'] | Limits the number of documents returned in one batch. Each batch
requires a round trip to the server. It can be adjusted to optimize
performance and limit data transfer.
.. note:: batch_size can not override MongoDB's internal limits on the
amount of data it will return to the client in a single batch (i.e
if you set batch size to 1,000,000,000, MongoDB will currently only
return 4-16MB of results per batch).
Raises :exc:`TypeError` if `batch_size` is not an integer.
Raises :exc:`ValueError` if `batch_size` is less than ``0``.
:Parameters:
- `batch_size`: The size of each batch of results requested. | ['Limits', 'the', 'number', 'of', 'documents', 'returned', 'in', 'one', 'batch', '.', 'Each', 'batch', 'requires', 'a', 'round', 'trip', 'to', 'the', 'server', '.', 'It', 'can', 'be', 'adjusted', 'to', 'optimize', 'performance', 'and', 'limit', 'data', 'transfer', '.'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/command_cursor.py#L70-L92 |
323 | GNS3/gns3-server | gns3server/controller/project.py | Project.remove_allocated_node_name | def remove_allocated_node_name(self, name):
"""
Removes an allocated node name
:param name: allocated node name
"""
if name in self._allocated_node_names:
self._allocated_node_names.remove(name) | python | def remove_allocated_node_name(self, name):
"""
Removes an allocated node name
:param name: allocated node name
"""
if name in self._allocated_node_names:
self._allocated_node_names.remove(name) | ['def', 'remove_allocated_node_name', '(', 'self', ',', 'name', ')', ':', 'if', 'name', 'in', 'self', '.', '_allocated_node_names', ':', 'self', '.', '_allocated_node_names', '.', 'remove', '(', 'name', ')'] | Removes an allocated node name
:param name: allocated node name | ['Removes', 'an', 'allocated', 'node', 'name'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/project.py#L349-L357 |
324 | mongolab/mongoctl | mongoctl/commands/server/start.py | prepare_mongod_server | def prepare_mongod_server(server):
"""
Contains post start server operations
"""
log_info("Preparing server '%s' for use as configured..." %
server.id)
cluster = server.get_cluster()
# setup the local users if server supports that
if server.supports_local_users():
users.setup_server_local_users(server)
if not server.is_cluster_member() or server.is_standalone_config_server():
users.setup_server_users(server)
if cluster and server.is_primary():
users.setup_cluster_users(cluster, server) | python | def prepare_mongod_server(server):
"""
Contains post start server operations
"""
log_info("Preparing server '%s' for use as configured..." %
server.id)
cluster = server.get_cluster()
# setup the local users if server supports that
if server.supports_local_users():
users.setup_server_local_users(server)
if not server.is_cluster_member() or server.is_standalone_config_server():
users.setup_server_users(server)
if cluster and server.is_primary():
users.setup_cluster_users(cluster, server) | ['def', 'prepare_mongod_server', '(', 'server', ')', ':', 'log_info', '(', '"Preparing server \'%s\' for use as configured..."', '%', 'server', '.', 'id', ')', 'cluster', '=', 'server', '.', 'get_cluster', '(', ')', '# setup the local users if server supports that', 'if', 'server', '.', 'supports_local_users', '(', ')', ':', 'users', '.', 'setup_server_local_users', '(', 'server', ')', 'if', 'not', 'server', '.', 'is_cluster_member', '(', ')', 'or', 'server', '.', 'is_standalone_config_server', '(', ')', ':', 'users', '.', 'setup_server_users', '(', 'server', ')', 'if', 'cluster', 'and', 'server', '.', 'is_primary', '(', ')', ':', 'users', '.', 'setup_cluster_users', '(', 'cluster', ',', 'server', ')'] | Contains post start server operations | ['Contains', 'post', 'start', 'server', 'operations'] | train | https://github.com/mongolab/mongoctl/blob/fab15216127ad4bf8ea9aa8a95d75504c0ef01a2/mongoctl/commands/server/start.py#L254-L270 |
325 | pandas-dev/pandas | pandas/core/dtypes/common.py | classes_and_not_datetimelike | def classes_and_not_datetimelike(*klasses):
"""
evaluate if the tipo is a subclass of the klasses
and not a datetimelike
"""
return lambda tipo: (issubclass(tipo, klasses) and
not issubclass(tipo, (np.datetime64, np.timedelta64))) | python | def classes_and_not_datetimelike(*klasses):
"""
evaluate if the tipo is a subclass of the klasses
and not a datetimelike
"""
return lambda tipo: (issubclass(tipo, klasses) and
not issubclass(tipo, (np.datetime64, np.timedelta64))) | ['def', 'classes_and_not_datetimelike', '(', '*', 'klasses', ')', ':', 'return', 'lambda', 'tipo', ':', '(', 'issubclass', '(', 'tipo', ',', 'klasses', ')', 'and', 'not', 'issubclass', '(', 'tipo', ',', '(', 'np', '.', 'datetime64', ',', 'np', '.', 'timedelta64', ')', ')', ')'] | evaluate if the tipo is a subclass of the klasses
and not a datetimelike | ['evaluate', 'if', 'the', 'tipo', 'is', 'a', 'subclass', 'of', 'the', 'klasses', 'and', 'not', 'a', 'datetimelike'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L122-L128 |
326 | senaite/senaite.core | bika/lims/browser/referencesample.py | ReferenceResultsView.get_reference_results | def get_reference_results(self):
"""Return a mapping of Analysis Service -> Reference Results
"""
referenceresults = self.context.getReferenceResults()
return dict(map(lambda rr: (rr.get("uid"), rr), referenceresults)) | python | def get_reference_results(self):
"""Return a mapping of Analysis Service -> Reference Results
"""
referenceresults = self.context.getReferenceResults()
return dict(map(lambda rr: (rr.get("uid"), rr), referenceresults)) | ['def', 'get_reference_results', '(', 'self', ')', ':', 'referenceresults', '=', 'self', '.', 'context', '.', 'getReferenceResults', '(', ')', 'return', 'dict', '(', 'map', '(', 'lambda', 'rr', ':', '(', 'rr', '.', 'get', '(', '"uid"', ')', ',', 'rr', ')', ',', 'referenceresults', ')', ')'] | Return a mapping of Analysis Service -> Reference Results | ['Return', 'a', 'mapping', 'of', 'Analysis', 'Service', '-', '>', 'Reference', 'Results'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/referencesample.py#L315-L319 |
327 | mongodb/mongo-python-driver | pymongo/cursor.py | Cursor.count | def count(self, with_limit_and_skip=False):
"""**DEPRECATED** - Get the size of the results set for this query.
The :meth:`count` method is deprecated and **not** supported in a
transaction. Please use
:meth:`~pymongo.collection.Collection.count_documents` instead.
Returns the number of documents in the results set for this query. Does
not take :meth:`limit` and :meth:`skip` into account by default - set
`with_limit_and_skip` to ``True`` if that is the desired behavior.
Raises :class:`~pymongo.errors.OperationFailure` on a database error.
When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint`
applied to the query. In the following example the hint is passed to
the count command:
collection.find({'field': 'value'}).hint('field_1').count()
The :meth:`count` method obeys the
:attr:`~pymongo.collection.Collection.read_preference` of the
:class:`~pymongo.collection.Collection` instance on which
:meth:`~pymongo.collection.Collection.find` was called.
:Parameters:
- `with_limit_and_skip` (optional): take any :meth:`limit` or
:meth:`skip` that has been applied to this cursor into account when
getting the count
.. note:: The `with_limit_and_skip` parameter requires server
version **>= 1.1.4-**
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 2.8
The :meth:`~count` method now supports :meth:`~hint`.
"""
warnings.warn("count is deprecated. Use Collection.count_documents "
"instead.", DeprecationWarning, stacklevel=2)
validate_boolean("with_limit_and_skip", with_limit_and_skip)
cmd = SON([("count", self.__collection.name),
("query", self.__spec)])
if self.__max_time_ms is not None:
cmd["maxTimeMS"] = self.__max_time_ms
if self.__comment:
cmd["comment"] = self.__comment
if self.__hint is not None:
cmd["hint"] = self.__hint
if with_limit_and_skip:
if self.__limit:
cmd["limit"] = self.__limit
if self.__skip:
cmd["skip"] = self.__skip
return self.__collection._count(
cmd, self.__collation, session=self.__session) | python | def count(self, with_limit_and_skip=False):
"""**DEPRECATED** - Get the size of the results set for this query.
The :meth:`count` method is deprecated and **not** supported in a
transaction. Please use
:meth:`~pymongo.collection.Collection.count_documents` instead.
Returns the number of documents in the results set for this query. Does
not take :meth:`limit` and :meth:`skip` into account by default - set
`with_limit_and_skip` to ``True`` if that is the desired behavior.
Raises :class:`~pymongo.errors.OperationFailure` on a database error.
When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint`
applied to the query. In the following example the hint is passed to
the count command:
collection.find({'field': 'value'}).hint('field_1').count()
The :meth:`count` method obeys the
:attr:`~pymongo.collection.Collection.read_preference` of the
:class:`~pymongo.collection.Collection` instance on which
:meth:`~pymongo.collection.Collection.find` was called.
:Parameters:
- `with_limit_and_skip` (optional): take any :meth:`limit` or
:meth:`skip` that has been applied to this cursor into account when
getting the count
.. note:: The `with_limit_and_skip` parameter requires server
version **>= 1.1.4-**
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 2.8
The :meth:`~count` method now supports :meth:`~hint`.
"""
warnings.warn("count is deprecated. Use Collection.count_documents "
"instead.", DeprecationWarning, stacklevel=2)
validate_boolean("with_limit_and_skip", with_limit_and_skip)
cmd = SON([("count", self.__collection.name),
("query", self.__spec)])
if self.__max_time_ms is not None:
cmd["maxTimeMS"] = self.__max_time_ms
if self.__comment:
cmd["comment"] = self.__comment
if self.__hint is not None:
cmd["hint"] = self.__hint
if with_limit_and_skip:
if self.__limit:
cmd["limit"] = self.__limit
if self.__skip:
cmd["skip"] = self.__skip
return self.__collection._count(
cmd, self.__collation, session=self.__session) | ['def', 'count', '(', 'self', ',', 'with_limit_and_skip', '=', 'False', ')', ':', 'warnings', '.', 'warn', '(', '"count is deprecated. Use Collection.count_documents "', '"instead."', ',', 'DeprecationWarning', ',', 'stacklevel', '=', '2', ')', 'validate_boolean', '(', '"with_limit_and_skip"', ',', 'with_limit_and_skip', ')', 'cmd', '=', 'SON', '(', '[', '(', '"count"', ',', 'self', '.', '__collection', '.', 'name', ')', ',', '(', '"query"', ',', 'self', '.', '__spec', ')', ']', ')', 'if', 'self', '.', '__max_time_ms', 'is', 'not', 'None', ':', 'cmd', '[', '"maxTimeMS"', ']', '=', 'self', '.', '__max_time_ms', 'if', 'self', '.', '__comment', ':', 'cmd', '[', '"comment"', ']', '=', 'self', '.', '__comment', 'if', 'self', '.', '__hint', 'is', 'not', 'None', ':', 'cmd', '[', '"hint"', ']', '=', 'self', '.', '__hint', 'if', 'with_limit_and_skip', ':', 'if', 'self', '.', '__limit', ':', 'cmd', '[', '"limit"', ']', '=', 'self', '.', '__limit', 'if', 'self', '.', '__skip', ':', 'cmd', '[', '"skip"', ']', '=', 'self', '.', '__skip', 'return', 'self', '.', '__collection', '.', '_count', '(', 'cmd', ',', 'self', '.', '__collation', ',', 'session', '=', 'self', '.', '__session', ')'] | **DEPRECATED** - Get the size of the results set for this query.
The :meth:`count` method is deprecated and **not** supported in a
transaction. Please use
:meth:`~pymongo.collection.Collection.count_documents` instead.
Returns the number of documents in the results set for this query. Does
not take :meth:`limit` and :meth:`skip` into account by default - set
`with_limit_and_skip` to ``True`` if that is the desired behavior.
Raises :class:`~pymongo.errors.OperationFailure` on a database error.
When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint`
applied to the query. In the following example the hint is passed to
the count command:
collection.find({'field': 'value'}).hint('field_1').count()
The :meth:`count` method obeys the
:attr:`~pymongo.collection.Collection.read_preference` of the
:class:`~pymongo.collection.Collection` instance on which
:meth:`~pymongo.collection.Collection.find` was called.
:Parameters:
- `with_limit_and_skip` (optional): take any :meth:`limit` or
:meth:`skip` that has been applied to this cursor into account when
getting the count
.. note:: The `with_limit_and_skip` parameter requires server
version **>= 1.1.4-**
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 2.8
The :meth:`~count` method now supports :meth:`~hint`. | ['**', 'DEPRECATED', '**', '-', 'Get', 'the', 'size', 'of', 'the', 'results', 'set', 'for', 'this', 'query', '.'] | train | https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/cursor.py#L723-L780 |
328 | jmcgeheeiv/pyfakefs | pyfakefs/helpers.py | FakeStatResult.st_atime | def st_atime(self):
"""Return the access time in seconds."""
atime = self._st_atime_ns / 1e9
return atime if self.use_float else int(atime) | python | def st_atime(self):
"""Return the access time in seconds."""
atime = self._st_atime_ns / 1e9
return atime if self.use_float else int(atime) | ['def', 'st_atime', '(', 'self', ')', ':', 'atime', '=', 'self', '.', '_st_atime_ns', '/', '1e9', 'return', 'atime', 'if', 'self', '.', 'use_float', 'else', 'int', '(', 'atime', ')'] | Return the access time in seconds. | ['Return', 'the', 'access', 'time', 'in', 'seconds', '.'] | train | https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/helpers.py#L169-L172 |
329 | delfick/gitmit | gitmit/prefix_tree.py | PrefixTree.fill | def fill(self, paths):
"""
Initialise the tree.
paths is a list of strings where each string is the relative path to some
file.
"""
for path in paths:
tree = self.tree
parts = tuple(path.split('/'))
dir_parts = parts[:-1]
built = ()
for part in dir_parts:
self.cache[built] = tree
built += (part, )
parent = tree
tree = parent.folders.get(part, empty)
if tree is empty:
tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent)
self.cache[dir_parts] = tree
tree.files.add(parts[-1]) | python | def fill(self, paths):
"""
Initialise the tree.
paths is a list of strings where each string is the relative path to some
file.
"""
for path in paths:
tree = self.tree
parts = tuple(path.split('/'))
dir_parts = parts[:-1]
built = ()
for part in dir_parts:
self.cache[built] = tree
built += (part, )
parent = tree
tree = parent.folders.get(part, empty)
if tree is empty:
tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent)
self.cache[dir_parts] = tree
tree.files.add(parts[-1]) | ['def', 'fill', '(', 'self', ',', 'paths', ')', ':', 'for', 'path', 'in', 'paths', ':', 'tree', '=', 'self', '.', 'tree', 'parts', '=', 'tuple', '(', 'path', '.', 'split', '(', "'/'", ')', ')', 'dir_parts', '=', 'parts', '[', ':', '-', '1', ']', 'built', '=', '(', ')', 'for', 'part', 'in', 'dir_parts', ':', 'self', '.', 'cache', '[', 'built', ']', '=', 'tree', 'built', '+=', '(', 'part', ',', ')', 'parent', '=', 'tree', 'tree', '=', 'parent', '.', 'folders', '.', 'get', '(', 'part', ',', 'empty', ')', 'if', 'tree', 'is', 'empty', ':', 'tree', '=', 'parent', '.', 'folders', '[', 'part', ']', '=', 'TreeItem', '(', 'name', '=', 'built', ',', 'folders', '=', '{', '}', ',', 'files', '=', 'set', '(', ')', ',', 'parent', '=', 'parent', ')', 'self', '.', 'cache', '[', 'dir_parts', ']', '=', 'tree', 'tree', '.', 'files', '.', 'add', '(', 'parts', '[', '-', '1', ']', ')'] | Initialise the tree.
paths is a list of strings where each string is the relative path to some
file. | ['Initialise', 'the', 'tree', '.'] | train | https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/prefix_tree.py#L59-L80 |
330 | pymc-devs/pymc | pymc/distributions.py | exponweib_like | def exponweib_like(x, alpha, k, loc=0, scale=1):
R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0).
"""
return flib.exponweib(x, alpha, k, loc, scale) | python | def exponweib_like(x, alpha, k, loc=0, scale=1):
R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0).
"""
return flib.exponweib(x, alpha, k, loc, scale) | ['def', 'exponweib_like', '(', 'x', ',', 'alpha', ',', 'k', ',', 'loc', '=', '0', ',', 'scale', '=', '1', ')', ':', 'return', 'flib', '.', 'exponweib', '(', 'x', ',', 'alpha', ',', 'k', ',', 'loc', ',', 'scale', ')'] | R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0). | ['R', 'Exponentiated', 'Weibull', 'log', '-', 'likelihood', '.'] | train | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1241-L1261 |
331 | myint/unify | unify.py | format_file | def format_file(filename, args, standard_out):
"""Run format_code() on a file.
Returns `True` if any changes are needed and they are not being done
in-place.
"""
encoding = detect_encoding(filename)
with open_with_encoding(filename, encoding=encoding) as input_file:
source = input_file.read()
formatted_source = format_code(
source,
preferred_quote=args.quote)
if source != formatted_source:
if args.in_place:
with open_with_encoding(filename, mode='w',
encoding=encoding) as output_file:
output_file.write(formatted_source)
else:
import difflib
diff = difflib.unified_diff(
source.splitlines(),
formatted_source.splitlines(),
'before/' + filename,
'after/' + filename,
lineterm='')
standard_out.write('\n'.join(list(diff) + ['']))
return True | python | def format_file(filename, args, standard_out):
"""Run format_code() on a file.
Returns `True` if any changes are needed and they are not being done
in-place.
"""
encoding = detect_encoding(filename)
with open_with_encoding(filename, encoding=encoding) as input_file:
source = input_file.read()
formatted_source = format_code(
source,
preferred_quote=args.quote)
if source != formatted_source:
if args.in_place:
with open_with_encoding(filename, mode='w',
encoding=encoding) as output_file:
output_file.write(formatted_source)
else:
import difflib
diff = difflib.unified_diff(
source.splitlines(),
formatted_source.splitlines(),
'before/' + filename,
'after/' + filename,
lineterm='')
standard_out.write('\n'.join(list(diff) + ['']))
return True | ['def', 'format_file', '(', 'filename', ',', 'args', ',', 'standard_out', ')', ':', 'encoding', '=', 'detect_encoding', '(', 'filename', ')', 'with', 'open_with_encoding', '(', 'filename', ',', 'encoding', '=', 'encoding', ')', 'as', 'input_file', ':', 'source', '=', 'input_file', '.', 'read', '(', ')', 'formatted_source', '=', 'format_code', '(', 'source', ',', 'preferred_quote', '=', 'args', '.', 'quote', ')', 'if', 'source', '!=', 'formatted_source', ':', 'if', 'args', '.', 'in_place', ':', 'with', 'open_with_encoding', '(', 'filename', ',', 'mode', '=', "'w'", ',', 'encoding', '=', 'encoding', ')', 'as', 'output_file', ':', 'output_file', '.', 'write', '(', 'formatted_source', ')', 'else', ':', 'import', 'difflib', 'diff', '=', 'difflib', '.', 'unified_diff', '(', 'source', '.', 'splitlines', '(', ')', ',', 'formatted_source', '.', 'splitlines', '(', ')', ',', "'before/'", '+', 'filename', ',', "'after/'", '+', 'filename', ',', 'lineterm', '=', "''", ')', 'standard_out', '.', 'write', '(', "'\\n'", '.', 'join', '(', 'list', '(', 'diff', ')', '+', '[', "''", ']', ')', ')', 'return', 'True'] | Run format_code() on a file.
Returns `True` if any changes are needed and they are not being done
in-place. | ['Run', 'format_code', '()', 'on', 'a', 'file', '.'] | train | https://github.com/myint/unify/blob/ae699f5980a715cadc4a2f07bf16d11083c59401/unify.py#L135-L164 |
332 | bitesofcode/projexui | projexui/widgets/xorbcolumnnavigator.py | XOrbColumnItem.load | def load(self):
"""
Loads the children for this item.
"""
if self._loaded:
return
self.setChildIndicatorPolicy(self.DontShowIndicatorWhenChildless)
self._loaded = True
column = self.schemaColumn()
if not column.isReference():
return
ref = column.referenceModel()
if not ref:
return
columns = sorted(ref.schema().columns(),
key=lambda x: x.name().strip('_'))
for column in columns:
XOrbColumnItem(self, column) | python | def load(self):
"""
Loads the children for this item.
"""
if self._loaded:
return
self.setChildIndicatorPolicy(self.DontShowIndicatorWhenChildless)
self._loaded = True
column = self.schemaColumn()
if not column.isReference():
return
ref = column.referenceModel()
if not ref:
return
columns = sorted(ref.schema().columns(),
key=lambda x: x.name().strip('_'))
for column in columns:
XOrbColumnItem(self, column) | ['def', 'load', '(', 'self', ')', ':', 'if', 'self', '.', '_loaded', ':', 'return', 'self', '.', 'setChildIndicatorPolicy', '(', 'self', '.', 'DontShowIndicatorWhenChildless', ')', 'self', '.', '_loaded', '=', 'True', 'column', '=', 'self', '.', 'schemaColumn', '(', ')', 'if', 'not', 'column', '.', 'isReference', '(', ')', ':', 'return', 'ref', '=', 'column', '.', 'referenceModel', '(', ')', 'if', 'not', 'ref', ':', 'return', 'columns', '=', 'sorted', '(', 'ref', '.', 'schema', '(', ')', '.', 'columns', '(', ')', ',', 'key', '=', 'lambda', 'x', ':', 'x', '.', 'name', '(', ')', '.', 'strip', '(', "'_'", ')', ')', 'for', 'column', 'in', 'columns', ':', 'XOrbColumnItem', '(', 'self', ',', 'column', ')'] | Loads the children for this item. | ['Loads', 'the', 'children', 'for', 'this', 'item', '.'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbcolumnnavigator.py#L49-L70 |
333 | EconForge/dolo | dolo/misc/decorators.py | deprecated | def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
import warnings
@functools.wraps(func)
def new_func(*args, **kwargs):
if is_python_3:
code = func.__code__
else:
code = func.func_code
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=Warning,
filename=code.co_filename,
lineno=code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func | python | def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
import warnings
@functools.wraps(func)
def new_func(*args, **kwargs):
if is_python_3:
code = func.__code__
else:
code = func.func_code
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=Warning,
filename=code.co_filename,
lineno=code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func | ['def', 'deprecated', '(', 'func', ')', ':', 'import', 'warnings', '@', 'functools', '.', 'wraps', '(', 'func', ')', 'def', 'new_func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'is_python_3', ':', 'code', '=', 'func', '.', '__code__', 'else', ':', 'code', '=', 'func', '.', 'func_code', 'warnings', '.', 'warn_explicit', '(', '"Call to deprecated function {}."', '.', 'format', '(', 'func', '.', '__name__', ')', ',', 'category', '=', 'Warning', ',', 'filename', '=', 'code', '.', 'co_filename', ',', 'lineno', '=', 'code', '.', 'co_firstlineno', '+', '1', ')', 'return', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'new_func'] | This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used. | ['This', 'is', 'a', 'decorator', 'which', 'can', 'be', 'used', 'to', 'mark', 'functions', 'as', 'deprecated', '.', 'It', 'will', 'result', 'in', 'a', 'warning', 'being', 'emitted', 'when', 'the', 'function', 'is', 'used', '.'] | train | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/misc/decorators.py#L8-L28 |
334 | python-rope/rope | rope/base/prefs.py | Prefs.set | def set(self, key, value):
"""Set the value of `key` preference to `value`."""
if key in self.callbacks:
self.callbacks[key](value)
else:
self.prefs[key] = value | python | def set(self, key, value):
"""Set the value of `key` preference to `value`."""
if key in self.callbacks:
self.callbacks[key](value)
else:
self.prefs[key] = value | ['def', 'set', '(', 'self', ',', 'key', ',', 'value', ')', ':', 'if', 'key', 'in', 'self', '.', 'callbacks', ':', 'self', '.', 'callbacks', '[', 'key', ']', '(', 'value', ')', 'else', ':', 'self', '.', 'prefs', '[', 'key', ']', '=', 'value'] | Set the value of `key` preference to `value`. | ['Set', 'the', 'value', 'of', 'key', 'preference', 'to', 'value', '.'] | train | https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/prefs.py#L7-L12 |
335 | lextoumbourou/txstripe | txstripe/resource.py | Dispute.close | def close(self, idempotency_key=None):
"""Return a deferred."""
url = self.instance_url() + '/close'
headers = populate_headers(idempotency_key)
d = self.request('post', url, {}, headers)
return d.addCallback(self.refresh_from).addCallback(lambda _: self) | python | def close(self, idempotency_key=None):
"""Return a deferred."""
url = self.instance_url() + '/close'
headers = populate_headers(idempotency_key)
d = self.request('post', url, {}, headers)
return d.addCallback(self.refresh_from).addCallback(lambda _: self) | ['def', 'close', '(', 'self', ',', 'idempotency_key', '=', 'None', ')', ':', 'url', '=', 'self', '.', 'instance_url', '(', ')', '+', "'/close'", 'headers', '=', 'populate_headers', '(', 'idempotency_key', ')', 'd', '=', 'self', '.', 'request', '(', "'post'", ',', 'url', ',', '{', '}', ',', 'headers', ')', 'return', 'd', '.', 'addCallback', '(', 'self', '.', 'refresh_from', ')', '.', 'addCallback', '(', 'lambda', '_', ':', 'self', ')'] | Return a deferred. | ['Return', 'a', 'deferred', '.'] | train | https://github.com/lextoumbourou/txstripe/blob/a69e67f524258026fd1840655a0578311bba3b89/txstripe/resource.py#L350-L355 |
336 | pyviz/holoviews | holoviews/core/boundingregion.py | BoundingBox.contains_exclusive | def contains_exclusive(self, x, y):
"""
Return True if the given point is contained within the
bounding box, where the bottom and right boundaries are
considered exclusive.
"""
left, bottom, right, top = self._aarect.lbrt()
return (left <= x < right) and (bottom < y <= top) | python | def contains_exclusive(self, x, y):
"""
Return True if the given point is contained within the
bounding box, where the bottom and right boundaries are
considered exclusive.
"""
left, bottom, right, top = self._aarect.lbrt()
return (left <= x < right) and (bottom < y <= top) | ['def', 'contains_exclusive', '(', 'self', ',', 'x', ',', 'y', ')', ':', 'left', ',', 'bottom', ',', 'right', ',', 'top', '=', 'self', '.', '_aarect', '.', 'lbrt', '(', ')', 'return', '(', 'left', '<=', 'x', '<', 'right', ')', 'and', '(', 'bottom', '<', 'y', '<=', 'top', ')'] | Return True if the given point is contained within the
bounding box, where the bottom and right boundaries are
considered exclusive. | ['Return', 'True', 'if', 'the', 'given', 'point', 'is', 'contained', 'within', 'the', 'bounding', 'box', 'where', 'the', 'bottom', 'and', 'right', 'boundaries', 'are', 'considered', 'exclusive', '.'] | train | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/boundingregion.py#L157-L164 |
337 | NoneGG/aredis | aredis/commands/extra.py | ExtraCommandMixin.cache | def cache(self, name, cache_class=Cache,
identity_generator_class=IdentityGenerator,
compressor_class=Compressor,
serializer_class=Serializer, *args, **kwargs):
"""
Return a cache object using default identity generator,
serializer and compressor.
``name`` is used to identify the series of your cache
``cache_class`` Cache is for normal use and HerdCache
is used in case of Thundering Herd Problem
``identity_generator_class`` is the class used to generate
the real unique key in cache, can be overwritten to
meet your special needs. It should provide `generate` API
``compressor_class`` is the class used to compress cache in redis,
can be overwritten with API `compress` and `decompress` retained.
``serializer_class`` is the class used to serialize
content before compress, can be overwritten with API
`serialize` and `deserialize` retained.
"""
return cache_class(self, app=name,
identity_generator_class=identity_generator_class,
compressor_class=compressor_class,
serializer_class=serializer_class,
*args, **kwargs) | python | def cache(self, name, cache_class=Cache,
identity_generator_class=IdentityGenerator,
compressor_class=Compressor,
serializer_class=Serializer, *args, **kwargs):
"""
Return a cache object using default identity generator,
serializer and compressor.
``name`` is used to identify the series of your cache
``cache_class`` Cache is for normal use and HerdCache
is used in case of Thundering Herd Problem
``identity_generator_class`` is the class used to generate
the real unique key in cache, can be overwritten to
meet your special needs. It should provide `generate` API
``compressor_class`` is the class used to compress cache in redis,
can be overwritten with API `compress` and `decompress` retained.
``serializer_class`` is the class used to serialize
content before compress, can be overwritten with API
`serialize` and `deserialize` retained.
"""
return cache_class(self, app=name,
identity_generator_class=identity_generator_class,
compressor_class=compressor_class,
serializer_class=serializer_class,
*args, **kwargs) | ['def', 'cache', '(', 'self', ',', 'name', ',', 'cache_class', '=', 'Cache', ',', 'identity_generator_class', '=', 'IdentityGenerator', ',', 'compressor_class', '=', 'Compressor', ',', 'serializer_class', '=', 'Serializer', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'cache_class', '(', 'self', ',', 'app', '=', 'name', ',', 'identity_generator_class', '=', 'identity_generator_class', ',', 'compressor_class', '=', 'compressor_class', ',', 'serializer_class', '=', 'serializer_class', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Return a cache object using default identity generator,
serializer and compressor.
``name`` is used to identify the series of your cache
``cache_class`` Cache is for normal use and HerdCache
is used in case of Thundering Herd Problem
``identity_generator_class`` is the class used to generate
the real unique key in cache, can be overwritten to
meet your special needs. It should provide `generate` API
``compressor_class`` is the class used to compress cache in redis,
can be overwritten with API `compress` and `decompress` retained.
``serializer_class`` is the class used to serialize
content before compress, can be overwritten with API
`serialize` and `deserialize` retained. | ['Return', 'a', 'cache', 'object', 'using', 'default', 'identity', 'generator', 'serializer', 'and', 'compressor', '.'] | train | https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/extra.py#L11-L35 |
338 | brendonh/pyth | pyth/document.py | _PythBase.append | def append(self, item):
"""
Try to add an item to this element.
If the item is of the wrong type, and if this element has a sub-type,
then try to create such a sub-type and insert the item into that, instead.
This happens recursively, so (in python-markup):
L [ u'Foo' ]
actually creates:
L [ LE [ P [ T [ u'Foo' ] ] ] ]
If that doesn't work, raise a TypeError.
"""
okay = True
if not isinstance(item, self.contentType):
if hasattr(self.contentType, 'contentType'):
try:
item = self.contentType(content=[item])
except TypeError:
okay = False
else:
okay = False
if not okay:
raise TypeError("Wrong content type for %s: %s (%s)" % (
self.__class__.__name__, repr(type(item)), repr(item)))
self.content.append(item) | python | def append(self, item):
"""
Try to add an item to this element.
If the item is of the wrong type, and if this element has a sub-type,
then try to create such a sub-type and insert the item into that, instead.
This happens recursively, so (in python-markup):
L [ u'Foo' ]
actually creates:
L [ LE [ P [ T [ u'Foo' ] ] ] ]
If that doesn't work, raise a TypeError.
"""
okay = True
if not isinstance(item, self.contentType):
if hasattr(self.contentType, 'contentType'):
try:
item = self.contentType(content=[item])
except TypeError:
okay = False
else:
okay = False
if not okay:
raise TypeError("Wrong content type for %s: %s (%s)" % (
self.__class__.__name__, repr(type(item)), repr(item)))
self.content.append(item) | ['def', 'append', '(', 'self', ',', 'item', ')', ':', 'okay', '=', 'True', 'if', 'not', 'isinstance', '(', 'item', ',', 'self', '.', 'contentType', ')', ':', 'if', 'hasattr', '(', 'self', '.', 'contentType', ',', "'contentType'", ')', ':', 'try', ':', 'item', '=', 'self', '.', 'contentType', '(', 'content', '=', '[', 'item', ']', ')', 'except', 'TypeError', ':', 'okay', '=', 'False', 'else', ':', 'okay', '=', 'False', 'if', 'not', 'okay', ':', 'raise', 'TypeError', '(', '"Wrong content type for %s: %s (%s)"', '%', '(', 'self', '.', '__class__', '.', '__name__', ',', 'repr', '(', 'type', '(', 'item', ')', ')', ',', 'repr', '(', 'item', ')', ')', ')', 'self', '.', 'content', '.', 'append', '(', 'item', ')'] | Try to add an item to this element.
If the item is of the wrong type, and if this element has a sub-type,
then try to create such a sub-type and insert the item into that, instead.
This happens recursively, so (in python-markup):
L [ u'Foo' ]
actually creates:
L [ LE [ P [ T [ u'Foo' ] ] ] ]
If that doesn't work, raise a TypeError. | ['Try', 'to', 'add', 'an', 'item', 'to', 'this', 'element', '.'] | train | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/document.py#L30-L59 |
339 | PX4/pyulog | pyulog/core.py | ULog.get_version_info | def get_version_info(self, key_name='ver_sw_release'):
"""
get the (major, minor, patch, type) version information as tuple.
Returns None if not found
definition of type is:
>= 0: development
>= 64: alpha version
>= 128: beta version
>= 192: RC version
== 255: release version
"""
if key_name in self._msg_info_dict:
val = self._msg_info_dict[key_name]
return ((val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff)
return None | python | def get_version_info(self, key_name='ver_sw_release'):
"""
get the (major, minor, patch, type) version information as tuple.
Returns None if not found
definition of type is:
>= 0: development
>= 64: alpha version
>= 128: beta version
>= 192: RC version
== 255: release version
"""
if key_name in self._msg_info_dict:
val = self._msg_info_dict[key_name]
return ((val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff)
return None | ['def', 'get_version_info', '(', 'self', ',', 'key_name', '=', "'ver_sw_release'", ')', ':', 'if', 'key_name', 'in', 'self', '.', '_msg_info_dict', ':', 'val', '=', 'self', '.', '_msg_info_dict', '[', 'key_name', ']', 'return', '(', '(', 'val', '>>', '24', ')', '&', '0xff', ',', '(', 'val', '>>', '16', ')', '&', '0xff', ',', '(', 'val', '>>', '8', ')', '&', '0xff', ',', 'val', '&', '0xff', ')', 'return', 'None'] | get the (major, minor, patch, type) version information as tuple.
Returns None if not found
definition of type is:
>= 0: development
>= 64: alpha version
>= 128: beta version
>= 192: RC version
== 255: release version | ['get', 'the', '(', 'major', 'minor', 'patch', 'type', ')', 'version', 'information', 'as', 'tuple', '.', 'Returns', 'None', 'if', 'not', 'found', 'definition', 'of', 'type', 'is', ':', '>', '=', '0', ':', 'development', '>', '=', '64', ':', 'alpha', 'version', '>', '=', '128', ':', 'beta', 'version', '>', '=', '192', ':', 'RC', 'version', '==', '255', ':', 'release', 'version'] | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/core.py#L614-L628 |
340 | hasgeek/coaster | coaster/nlp.py | extract_named_entities | def extract_named_entities(text_blocks):
"""
Return a list of named entities extracted from provided text blocks (list of text strings).
"""
sentences = []
for text in text_blocks:
sentences.extend(nltk.sent_tokenize(text))
tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]
chunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)
def extract_entity_names(t):
entity_names = []
if hasattr(t, 'label'):
if t.label() == 'NE':
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child))
return entity_names
entity_names = []
for tree in chunked_sentences:
entity_names.extend(extract_entity_names(tree))
return set(entity_names) | python | def extract_named_entities(text_blocks):
"""
Return a list of named entities extracted from provided text blocks (list of text strings).
"""
sentences = []
for text in text_blocks:
sentences.extend(nltk.sent_tokenize(text))
tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]
chunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)
def extract_entity_names(t):
entity_names = []
if hasattr(t, 'label'):
if t.label() == 'NE':
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child))
return entity_names
entity_names = []
for tree in chunked_sentences:
entity_names.extend(extract_entity_names(tree))
return set(entity_names) | ['def', 'extract_named_entities', '(', 'text_blocks', ')', ':', 'sentences', '=', '[', ']', 'for', 'text', 'in', 'text_blocks', ':', 'sentences', '.', 'extend', '(', 'nltk', '.', 'sent_tokenize', '(', 'text', ')', ')', 'tokenized_sentences', '=', '[', 'nltk', '.', 'word_tokenize', '(', 'sentence', ')', 'for', 'sentence', 'in', 'sentences', ']', 'tagged_sentences', '=', '[', 'nltk', '.', 'pos_tag', '(', 'sentence', ')', 'for', 'sentence', 'in', 'tokenized_sentences', ']', 'chunked_sentences', '=', 'nltk', '.', 'ne_chunk_sents', '(', 'tagged_sentences', ',', 'binary', '=', 'True', ')', 'def', 'extract_entity_names', '(', 't', ')', ':', 'entity_names', '=', '[', ']', 'if', 'hasattr', '(', 't', ',', "'label'", ')', ':', 'if', 't', '.', 'label', '(', ')', '==', "'NE'", ':', 'entity_names', '.', 'append', '(', "' '", '.', 'join', '(', '[', 'child', '[', '0', ']', 'for', 'child', 'in', 't', ']', ')', ')', 'else', ':', 'for', 'child', 'in', 't', ':', 'entity_names', '.', 'extend', '(', 'extract_entity_names', '(', 'child', ')', ')', 'return', 'entity_names', 'entity_names', '=', '[', ']', 'for', 'tree', 'in', 'chunked_sentences', ':', 'entity_names', '.', 'extend', '(', 'extract_entity_names', '(', 'tree', ')', ')', 'return', 'set', '(', 'entity_names', ')'] | Return a list of named entities extracted from provided text blocks (list of text strings). | ['Return', 'a', 'list', 'of', 'named', 'entities', 'extracted', 'from', 'provided', 'text', 'blocks', '(', 'list', 'of', 'text', 'strings', ')', '.'] | train | https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/nlp.py#L20-L48 |
341 | watson-developer-cloud/python-sdk | ibm_watson/text_to_speech_v1.py | VoiceModel._from_dict | def _from_dict(cls, _dict):
"""Initialize a VoiceModel object from a json dictionary."""
args = {}
if 'customization_id' in _dict:
args['customization_id'] = _dict.get('customization_id')
else:
raise ValueError(
'Required property \'customization_id\' not present in VoiceModel JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'language' in _dict:
args['language'] = _dict.get('language')
if 'owner' in _dict:
args['owner'] = _dict.get('owner')
if 'created' in _dict:
args['created'] = _dict.get('created')
if 'last_modified' in _dict:
args['last_modified'] = _dict.get('last_modified')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'words' in _dict:
args['words'] = [Word._from_dict(x) for x in (_dict.get('words'))]
return cls(**args) | python | def _from_dict(cls, _dict):
"""Initialize a VoiceModel object from a json dictionary."""
args = {}
if 'customization_id' in _dict:
args['customization_id'] = _dict.get('customization_id')
else:
raise ValueError(
'Required property \'customization_id\' not present in VoiceModel JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'language' in _dict:
args['language'] = _dict.get('language')
if 'owner' in _dict:
args['owner'] = _dict.get('owner')
if 'created' in _dict:
args['created'] = _dict.get('created')
if 'last_modified' in _dict:
args['last_modified'] = _dict.get('last_modified')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'words' in _dict:
args['words'] = [Word._from_dict(x) for x in (_dict.get('words'))]
return cls(**args) | ['def', '_from_dict', '(', 'cls', ',', '_dict', ')', ':', 'args', '=', '{', '}', 'if', "'customization_id'", 'in', '_dict', ':', 'args', '[', "'customization_id'", ']', '=', '_dict', '.', 'get', '(', "'customization_id'", ')', 'else', ':', 'raise', 'ValueError', '(', "'Required property \\'customization_id\\' not present in VoiceModel JSON'", ')', 'if', "'name'", 'in', '_dict', ':', 'args', '[', "'name'", ']', '=', '_dict', '.', 'get', '(', "'name'", ')', 'if', "'language'", 'in', '_dict', ':', 'args', '[', "'language'", ']', '=', '_dict', '.', 'get', '(', "'language'", ')', 'if', "'owner'", 'in', '_dict', ':', 'args', '[', "'owner'", ']', '=', '_dict', '.', 'get', '(', "'owner'", ')', 'if', "'created'", 'in', '_dict', ':', 'args', '[', "'created'", ']', '=', '_dict', '.', 'get', '(', "'created'", ')', 'if', "'last_modified'", 'in', '_dict', ':', 'args', '[', "'last_modified'", ']', '=', '_dict', '.', 'get', '(', "'last_modified'", ')', 'if', "'description'", 'in', '_dict', ':', 'args', '[', "'description'", ']', '=', '_dict', '.', 'get', '(', "'description'", ')', 'if', "'words'", 'in', '_dict', ':', 'args', '[', "'words'", ']', '=', '[', 'Word', '.', '_from_dict', '(', 'x', ')', 'for', 'x', 'in', '(', '_dict', '.', 'get', '(', "'words'", ')', ')', ']', 'return', 'cls', '(', '*', '*', 'args', ')'] | Initialize a VoiceModel object from a json dictionary. | ['Initialize', 'a', 'VoiceModel', 'object', 'from', 'a', 'json', 'dictionary', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/text_to_speech_v1.py#L1306-L1329 |
342 | openstack/proliantutils | proliantutils/redfish/resources/system/storage/storage.py | Storage.has_rotational | def has_rotational(self):
"""Return true if any of the drive is HDD"""
for member in self._drives_list():
if member.media_type == constants.MEDIA_TYPE_HDD:
return True
return False | python | def has_rotational(self):
"""Return true if any of the drive is HDD"""
for member in self._drives_list():
if member.media_type == constants.MEDIA_TYPE_HDD:
return True
return False | ['def', 'has_rotational', '(', 'self', ')', ':', 'for', 'member', 'in', 'self', '.', '_drives_list', '(', ')', ':', 'if', 'member', '.', 'media_type', '==', 'constants', '.', 'MEDIA_TYPE_HDD', ':', 'return', 'True', 'return', 'False'] | Return true if any of the drive is HDD | ['Return', 'true', 'if', 'any', 'of', 'the', 'drive', 'is', 'HDD'] | train | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/storage.py#L87-L92 |
343 | klmitch/turnstile | turnstile/config.py | Config.get | def get(self, key, default=None):
"""
Retrieve the given configuration option. Configuration
options that can be queried this way are those that are
specified without prefix in the paste.ini file, or which are
specified in the '[turnstile]' section of the configuration
file. Returns the default value (None if not specified) if
the given option does not exist.
"""
return self._config.get(None, {}).get(key, default) | python | def get(self, key, default=None):
"""
Retrieve the given configuration option. Configuration
options that can be queried this way are those that are
specified without prefix in the paste.ini file, or which are
specified in the '[turnstile]' section of the configuration
file. Returns the default value (None if not specified) if
the given option does not exist.
"""
return self._config.get(None, {}).get(key, default) | ['def', 'get', '(', 'self', ',', 'key', ',', 'default', '=', 'None', ')', ':', 'return', 'self', '.', '_config', '.', 'get', '(', 'None', ',', '{', '}', ')', '.', 'get', '(', 'key', ',', 'default', ')'] | Retrieve the given configuration option. Configuration
options that can be queried this way are those that are
specified without prefix in the paste.ini file, or which are
specified in the '[turnstile]' section of the configuration
file. Returns the default value (None if not specified) if
the given option does not exist. | ['Retrieve', 'the', 'given', 'configuration', 'option', '.', 'Configuration', 'options', 'that', 'can', 'be', 'queried', 'this', 'way', 'are', 'those', 'that', 'are', 'specified', 'without', 'prefix', 'in', 'the', 'paste', '.', 'ini', 'file', 'or', 'which', 'are', 'specified', 'in', 'the', '[', 'turnstile', ']', 'section', 'of', 'the', 'configuration', 'file', '.', 'Returns', 'the', 'default', 'value', '(', 'None', 'if', 'not', 'specified', ')', 'if', 'the', 'given', 'option', 'does', 'not', 'exist', '.'] | train | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/config.py#L174-L184 |
344 | gwastro/pycbc-glue | pycbc_glue/pipeline.py | CondorJob.write_sub_file | def write_sub_file(self):
"""
Write a submit file for this Condor job.
"""
if not self.__log_file:
raise CondorSubmitError, "Log file not specified."
if not self.__err_file:
raise CondorSubmitError, "Error file not specified."
if not self.__out_file:
raise CondorSubmitError, "Output file not specified."
if not self.__sub_file_path:
raise CondorSubmitError, 'No path for submit file.'
try:
subfile = open(self.__sub_file_path, 'w')
except:
raise CondorSubmitError, "Cannot open file " + self.__sub_file_path
if self.__universe == 'grid':
if self.__grid_type == None:
raise CondorSubmitError, 'No grid type specified.'
elif self.__grid_type == 'gt2':
if self.__grid_server == None:
raise CondorSubmitError, 'No server specified for grid resource.'
elif self.__grid_type == 'gt4':
if self.__grid_server == None:
raise CondorSubmitError, 'No server specified for grid resource.'
if self.__grid_scheduler == None:
raise CondorSubmitError, 'No scheduler specified for grid resource.'
else:
raise CondorSubmitError, 'Unsupported grid resource.'
subfile.write( 'universe = ' + self.__universe + '\n' )
subfile.write( 'executable = ' + self.__executable + '\n' )
if self.__universe == 'grid':
if self.__grid_type == 'gt2':
subfile.write('grid_resource = %s %s\n' % (self.__grid_type,
self.__grid_server))
if self.__grid_type == 'gt4':
subfile.write('grid_resource = %s %s %s\n' % (self.__grid_type,
self.__grid_server, self.__grid_scheduler))
if self.__universe == 'grid':
subfile.write('when_to_transfer_output = ON_EXIT\n')
subfile.write('transfer_output_files = $(macrooutput)\n')
subfile.write('transfer_input_files = $(macroinput)\n')
if self.__options.keys() or self.__short_options.keys() or self.__arguments:
subfile.write( 'arguments = "' )
for c in self.__options.keys():
if self.__options[c]:
subfile.write( ' --' + c + ' ' + self.__options[c] )
else:
subfile.write( ' --' + c )
for c in self.__short_options.keys():
if self.__short_options[c]:
subfile.write( ' -' + c + ' ' + self.__short_options[c] )
else:
subfile.write( ' -' + c )
for c in self.__arguments:
subfile.write( ' ' + c )
subfile.write( ' "\n' )
for cmd in self.__condor_cmds.keys():
subfile.write( str(cmd) + " = " + str(self.__condor_cmds[cmd]) + '\n' )
subfile.write( 'log = ' + self.__log_file + '\n' )
if self.__in_file is not None:
subfile.write( 'input = ' + self.__in_file + '\n' )
subfile.write( 'error = ' + self.__err_file + '\n' )
subfile.write( 'output = ' + self.__out_file + '\n' )
if self.__notification:
subfile.write( 'notification = ' + self.__notification + '\n' )
subfile.write( 'queue ' + str(self.__queue) + '\n' )
subfile.close() | python | def write_sub_file(self):
"""
Write a submit file for this Condor job.
"""
if not self.__log_file:
raise CondorSubmitError, "Log file not specified."
if not self.__err_file:
raise CondorSubmitError, "Error file not specified."
if not self.__out_file:
raise CondorSubmitError, "Output file not specified."
if not self.__sub_file_path:
raise CondorSubmitError, 'No path for submit file.'
try:
subfile = open(self.__sub_file_path, 'w')
except:
raise CondorSubmitError, "Cannot open file " + self.__sub_file_path
if self.__universe == 'grid':
if self.__grid_type == None:
raise CondorSubmitError, 'No grid type specified.'
elif self.__grid_type == 'gt2':
if self.__grid_server == None:
raise CondorSubmitError, 'No server specified for grid resource.'
elif self.__grid_type == 'gt4':
if self.__grid_server == None:
raise CondorSubmitError, 'No server specified for grid resource.'
if self.__grid_scheduler == None:
raise CondorSubmitError, 'No scheduler specified for grid resource.'
else:
raise CondorSubmitError, 'Unsupported grid resource.'
subfile.write( 'universe = ' + self.__universe + '\n' )
subfile.write( 'executable = ' + self.__executable + '\n' )
if self.__universe == 'grid':
if self.__grid_type == 'gt2':
subfile.write('grid_resource = %s %s\n' % (self.__grid_type,
self.__grid_server))
if self.__grid_type == 'gt4':
subfile.write('grid_resource = %s %s %s\n' % (self.__grid_type,
self.__grid_server, self.__grid_scheduler))
if self.__universe == 'grid':
subfile.write('when_to_transfer_output = ON_EXIT\n')
subfile.write('transfer_output_files = $(macrooutput)\n')
subfile.write('transfer_input_files = $(macroinput)\n')
if self.__options.keys() or self.__short_options.keys() or self.__arguments:
subfile.write( 'arguments = "' )
for c in self.__options.keys():
if self.__options[c]:
subfile.write( ' --' + c + ' ' + self.__options[c] )
else:
subfile.write( ' --' + c )
for c in self.__short_options.keys():
if self.__short_options[c]:
subfile.write( ' -' + c + ' ' + self.__short_options[c] )
else:
subfile.write( ' -' + c )
for c in self.__arguments:
subfile.write( ' ' + c )
subfile.write( ' "\n' )
for cmd in self.__condor_cmds.keys():
subfile.write( str(cmd) + " = " + str(self.__condor_cmds[cmd]) + '\n' )
subfile.write( 'log = ' + self.__log_file + '\n' )
if self.__in_file is not None:
subfile.write( 'input = ' + self.__in_file + '\n' )
subfile.write( 'error = ' + self.__err_file + '\n' )
subfile.write( 'output = ' + self.__out_file + '\n' )
if self.__notification:
subfile.write( 'notification = ' + self.__notification + '\n' )
subfile.write( 'queue ' + str(self.__queue) + '\n' )
subfile.close() | ['def', 'write_sub_file', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '__log_file', ':', 'raise', 'CondorSubmitError', ',', '"Log file not specified."', 'if', 'not', 'self', '.', '__err_file', ':', 'raise', 'CondorSubmitError', ',', '"Error file not specified."', 'if', 'not', 'self', '.', '__out_file', ':', 'raise', 'CondorSubmitError', ',', '"Output file not specified."', 'if', 'not', 'self', '.', '__sub_file_path', ':', 'raise', 'CondorSubmitError', ',', "'No path for submit file.'", 'try', ':', 'subfile', '=', 'open', '(', 'self', '.', '__sub_file_path', ',', "'w'", ')', 'except', ':', 'raise', 'CondorSubmitError', ',', '"Cannot open file "', '+', 'self', '.', '__sub_file_path', 'if', 'self', '.', '__universe', '==', "'grid'", ':', 'if', 'self', '.', '__grid_type', '==', 'None', ':', 'raise', 'CondorSubmitError', ',', "'No grid type specified.'", 'elif', 'self', '.', '__grid_type', '==', "'gt2'", ':', 'if', 'self', '.', '__grid_server', '==', 'None', ':', 'raise', 'CondorSubmitError', ',', "'No server specified for grid resource.'", 'elif', 'self', '.', '__grid_type', '==', "'gt4'", ':', 'if', 'self', '.', '__grid_server', '==', 'None', ':', 'raise', 'CondorSubmitError', ',', "'No server specified for grid resource.'", 'if', 'self', '.', '__grid_scheduler', '==', 'None', ':', 'raise', 'CondorSubmitError', ',', "'No scheduler specified for grid resource.'", 'else', ':', 'raise', 'CondorSubmitError', ',', "'Unsupported grid resource.'", 'subfile', '.', 'write', '(', "'universe = '", '+', 'self', '.', '__universe', '+', "'\\n'", ')', 'subfile', '.', 'write', '(', "'executable = '", '+', 'self', '.', '__executable', '+', "'\\n'", ')', 'if', 'self', '.', '__universe', '==', "'grid'", ':', 'if', 'self', '.', '__grid_type', '==', "'gt2'", ':', 'subfile', '.', 'write', '(', "'grid_resource = %s %s\\n'", '%', '(', 'self', '.', '__grid_type', ',', 'self', '.', '__grid_server', ')', ')', 'if', 'self', '.', '__grid_type', '==', "'gt4'", ':', 'subfile', '.', 'write', '(', "'grid_resource = %s %s %s\\n'", '%', '(', 'self', '.', '__grid_type', ',', 'self', '.', '__grid_server', ',', 'self', '.', '__grid_scheduler', ')', ')', 'if', 'self', '.', '__universe', '==', "'grid'", ':', 'subfile', '.', 'write', '(', "'when_to_transfer_output = ON_EXIT\\n'", ')', 'subfile', '.', 'write', '(', "'transfer_output_files = $(macrooutput)\\n'", ')', 'subfile', '.', 'write', '(', "'transfer_input_files = $(macroinput)\\n'", ')', 'if', 'self', '.', '__options', '.', 'keys', '(', ')', 'or', 'self', '.', '__short_options', '.', 'keys', '(', ')', 'or', 'self', '.', '__arguments', ':', 'subfile', '.', 'write', '(', '\'arguments = "\'', ')', 'for', 'c', 'in', 'self', '.', '__options', '.', 'keys', '(', ')', ':', 'if', 'self', '.', '__options', '[', 'c', ']', ':', 'subfile', '.', 'write', '(', "' --'", '+', 'c', '+', "' '", '+', 'self', '.', '__options', '[', 'c', ']', ')', 'else', ':', 'subfile', '.', 'write', '(', "' --'", '+', 'c', ')', 'for', 'c', 'in', 'self', '.', '__short_options', '.', 'keys', '(', ')', ':', 'if', 'self', '.', '__short_options', '[', 'c', ']', ':', 'subfile', '.', 'write', '(', "' -'", '+', 'c', '+', "' '", '+', 'self', '.', '__short_options', '[', 'c', ']', ')', 'else', ':', 'subfile', '.', 'write', '(', "' -'", '+', 'c', ')', 'for', 'c', 'in', 'self', '.', '__arguments', ':', 'subfile', '.', 'write', '(', "' '", '+', 'c', ')', 'subfile', '.', 'write', '(', '\' "\\n\'', ')', 'for', 'cmd', 'in', 'self', '.', '__condor_cmds', '.', 'keys', '(', ')', ':', 'subfile', '.', 'write', '(', 'str', '(', 'cmd', ')', '+', '" = "', '+', 'str', '(', 'self', '.', '__condor_cmds', '[', 'cmd', ']', ')', '+', "'\\n'", ')', 'subfile', '.', 'write', '(', "'log = '", '+', 'self', '.', '__log_file', '+', "'\\n'", ')', 'if', 'self', '.', '__in_file', 'is', 'not', 'None', ':', 'subfile', '.', 'write', '(', "'input = '", '+', 'self', '.', '__in_file', '+', "'\\n'", ')', 'subfile', '.', 'write', '(', "'error = '", '+', 'self', '.', '__err_file', '+', "'\\n'", ')', 'subfile', '.', 'write', '(', "'output = '", '+', 'self', '.', '__out_file', '+', "'\\n'", ')', 'if', 'self', '.', '__notification', ':', 'subfile', '.', 'write', '(', "'notification = '", '+', 'self', '.', '__notification', '+', "'\\n'", ')', 'subfile', '.', 'write', '(', "'queue '", '+', 'str', '(', 'self', '.', '__queue', ')', '+', "'\\n'", ')', 'subfile', '.', 'close', '(', ')'] | Write a submit file for this Condor job. | ['Write', 'a', 'submit', 'file', 'for', 'this', 'Condor', 'job', '.'] | train | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L710-L786 |
345 | SBRG/ssbio | ssbio/io/__init__.py | save_json | def save_json(obj, outfile, allow_nan=True, compression=False):
"""Save an ssbio object as a JSON file using json_tricks"""
if compression:
with open(outfile, 'wb') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
else:
with open(outfile, 'w') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
log.info('Saved {} (id: {}) to {}'.format(type(obj), obj.id, outfile)) | python | def save_json(obj, outfile, allow_nan=True, compression=False):
"""Save an ssbio object as a JSON file using json_tricks"""
if compression:
with open(outfile, 'wb') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
else:
with open(outfile, 'w') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
log.info('Saved {} (id: {}) to {}'.format(type(obj), obj.id, outfile)) | ['def', 'save_json', '(', 'obj', ',', 'outfile', ',', 'allow_nan', '=', 'True', ',', 'compression', '=', 'False', ')', ':', 'if', 'compression', ':', 'with', 'open', '(', 'outfile', ',', "'wb'", ')', 'as', 'f', ':', 'dump', '(', 'obj', ',', 'f', ',', 'allow_nan', '=', 'allow_nan', ',', 'compression', '=', 'compression', ')', 'else', ':', 'with', 'open', '(', 'outfile', ',', "'w'", ')', 'as', 'f', ':', 'dump', '(', 'obj', ',', 'f', ',', 'allow_nan', '=', 'allow_nan', ',', 'compression', '=', 'compression', ')', 'log', '.', 'info', '(', "'Saved {} (id: {}) to {}'", '.', 'format', '(', 'type', '(', 'obj', ')', ',', 'obj', '.', 'id', ',', 'outfile', ')', ')'] | Save an ssbio object as a JSON file using json_tricks | ['Save', 'an', 'ssbio', 'object', 'as', 'a', 'JSON', 'file', 'using', 'json_tricks'] | train | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/io/__init__.py#L9-L17 |
346 | saltstack/salt | salt/modules/netscaler.py | server_enabled | def server_enabled(s_name, **connection_args):
'''
Check if a server is enabled globally
CLI Example:
.. code-block:: bash
salt '*' netscaler.server_enabled 'serverName'
'''
server = _server_get(s_name, **connection_args)
return server is not None and server.get_state() == 'ENABLED' | python | def server_enabled(s_name, **connection_args):
'''
Check if a server is enabled globally
CLI Example:
.. code-block:: bash
salt '*' netscaler.server_enabled 'serverName'
'''
server = _server_get(s_name, **connection_args)
return server is not None and server.get_state() == 'ENABLED' | ['def', 'server_enabled', '(', 's_name', ',', '*', '*', 'connection_args', ')', ':', 'server', '=', '_server_get', '(', 's_name', ',', '*', '*', 'connection_args', ')', 'return', 'server', 'is', 'not', 'None', 'and', 'server', '.', 'get_state', '(', ')', '==', "'ENABLED'"] | Check if a server is enabled globally
CLI Example:
.. code-block:: bash
salt '*' netscaler.server_enabled 'serverName' | ['Check', 'if', 'a', 'server', 'is', 'enabled', 'globally'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L642-L653 |
347 | drj11/pypng | code/png.py | is_natural | def is_natural(x):
"""A non-negative integer."""
try:
is_integer = int(x) == x
except (TypeError, ValueError):
return False
return is_integer and x >= 0 | python | def is_natural(x):
"""A non-negative integer."""
try:
is_integer = int(x) == x
except (TypeError, ValueError):
return False
return is_integer and x >= 0 | ['def', 'is_natural', '(', 'x', ')', ':', 'try', ':', 'is_integer', '=', 'int', '(', 'x', ')', '==', 'x', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'return', 'False', 'return', 'is_integer', 'and', 'x', '>=', '0'] | A non-negative integer. | ['A', 'non', '-', 'negative', 'integer', '.'] | train | https://github.com/drj11/pypng/blob/b8220ca9f58e4c5bc1d507e713744fcb8c049225/code/png.py#L2203-L2209 |
348 | icometrix/dicom2nifti | dicom2nifti/common.py | set_fd_value | def set_fd_value(tag, value):
"""
Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('d', value)
tag.value = value | python | def set_fd_value(tag, value):
"""
Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('d', value)
tag.value = value | ['def', 'set_fd_value', '(', 'tag', ',', 'value', ')', ':', 'if', 'tag', '.', 'VR', '==', "'OB'", 'or', 'tag', '.', 'VR', '==', "'UN'", ':', 'value', '=', 'struct', '.', 'pack', '(', "'d'", ',', 'value', ')', 'tag', '.', 'value', '=', 'value'] | Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read | ['Setters', 'for', 'data', 'that', 'also', 'work', 'with', 'implicit', 'transfersyntax'] | train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L309-L318 |
349 | fastai/fastai | fastai/torch_core.py | set_bn_eval | def set_bn_eval(m:nn.Module)->None:
"Set bn layers in eval mode for all recursive children of `m`."
for l in m.children():
if isinstance(l, bn_types) and not next(l.parameters()).requires_grad:
l.eval()
set_bn_eval(l) | python | def set_bn_eval(m:nn.Module)->None:
"Set bn layers in eval mode for all recursive children of `m`."
for l in m.children():
if isinstance(l, bn_types) and not next(l.parameters()).requires_grad:
l.eval()
set_bn_eval(l) | ['def', 'set_bn_eval', '(', 'm', ':', 'nn', '.', 'Module', ')', '->', 'None', ':', 'for', 'l', 'in', 'm', '.', 'children', '(', ')', ':', 'if', 'isinstance', '(', 'l', ',', 'bn_types', ')', 'and', 'not', 'next', '(', 'l', '.', 'parameters', '(', ')', ')', '.', 'requires_grad', ':', 'l', '.', 'eval', '(', ')', 'set_bn_eval', '(', 'l', ')'] | Set bn layers in eval mode for all recursive children of `m`. | ['Set', 'bn', 'layers', 'in', 'eval', 'mode', 'for', 'all', 'recursive', 'children', 'of', 'm', '.'] | train | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/torch_core.py#L216-L221 |
350 | mitsei/dlkit | dlkit/records/osid/base_records.py | FileRecord.has_file_url | def has_file_url(self):
"""stub"""
return bool(self._get_asset_content(
Id(self.my_osid_object._my_map['fileId']['assetId']),
self.my_osid_object._my_map['fileId']['assetContentTypeId']).has_url()) | python | def has_file_url(self):
"""stub"""
return bool(self._get_asset_content(
Id(self.my_osid_object._my_map['fileId']['assetId']),
self.my_osid_object._my_map['fileId']['assetContentTypeId']).has_url()) | ['def', 'has_file_url', '(', 'self', ')', ':', 'return', 'bool', '(', 'self', '.', '_get_asset_content', '(', 'Id', '(', 'self', '.', 'my_osid_object', '.', '_my_map', '[', "'fileId'", ']', '[', "'assetId'", ']', ')', ',', 'self', '.', 'my_osid_object', '.', '_my_map', '[', "'fileId'", ']', '[', "'assetContentTypeId'", ']', ')', '.', 'has_url', '(', ')', ')'] | stub | ['stub'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L2151-L2155 |
351 | lotabout/pymustache | pymustache/mustache.py | Variable._render | def _render(self, contexts, partials):
"""render variable"""
value = self._lookup(self.value, contexts)
# lambda
if callable(value):
value = inner_render(str(value()), contexts, partials)
return self._escape(value) | python | def _render(self, contexts, partials):
"""render variable"""
value = self._lookup(self.value, contexts)
# lambda
if callable(value):
value = inner_render(str(value()), contexts, partials)
return self._escape(value) | ['def', '_render', '(', 'self', ',', 'contexts', ',', 'partials', ')', ':', 'value', '=', 'self', '.', '_lookup', '(', 'self', '.', 'value', ',', 'contexts', ')', '# lambda', 'if', 'callable', '(', 'value', ')', ':', 'value', '=', 'inner_render', '(', 'str', '(', 'value', '(', ')', ')', ',', 'contexts', ',', 'partials', ')', 'return', 'self', '.', '_escape', '(', 'value', ')'] | render variable | ['render', 'variable'] | train | https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L385-L393 |
352 | mlperf/training | image_classification/tensorflow/official/resnet/imagenet_main.py | _get_block_sizes | def _get_block_sizes(resnet_size):
"""Retrieve the size of each block_layer in the ResNet model.
The number of block layers used for the Resnet model varies according
to the size of the model. This helper grabs the layer set we want, throwing
an error if a non-standard size has been selected.
Args:
resnet_size: The number of convolutional layers needed in the model.
Returns:
A list of block sizes to use in building the model.
Raises:
KeyError: if invalid resnet_size is received.
"""
choices = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3]
}
try:
return choices[resnet_size]
except KeyError:
err = ('Could not find layers for selected Resnet size.\n'
'Size received: {}; sizes allowed: {}.'.format(
resnet_size, choices.keys()))
raise ValueError(err) | python | def _get_block_sizes(resnet_size):
"""Retrieve the size of each block_layer in the ResNet model.
The number of block layers used for the Resnet model varies according
to the size of the model. This helper grabs the layer set we want, throwing
an error if a non-standard size has been selected.
Args:
resnet_size: The number of convolutional layers needed in the model.
Returns:
A list of block sizes to use in building the model.
Raises:
KeyError: if invalid resnet_size is received.
"""
choices = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3]
}
try:
return choices[resnet_size]
except KeyError:
err = ('Could not find layers for selected Resnet size.\n'
'Size received: {}; sizes allowed: {}.'.format(
resnet_size, choices.keys()))
raise ValueError(err) | ['def', '_get_block_sizes', '(', 'resnet_size', ')', ':', 'choices', '=', '{', '18', ':', '[', '2', ',', '2', ',', '2', ',', '2', ']', ',', '34', ':', '[', '3', ',', '4', ',', '6', ',', '3', ']', ',', '50', ':', '[', '3', ',', '4', ',', '6', ',', '3', ']', ',', '101', ':', '[', '3', ',', '4', ',', '23', ',', '3', ']', ',', '152', ':', '[', '3', ',', '8', ',', '36', ',', '3', ']', ',', '200', ':', '[', '3', ',', '24', ',', '36', ',', '3', ']', '}', 'try', ':', 'return', 'choices', '[', 'resnet_size', ']', 'except', 'KeyError', ':', 'err', '=', '(', "'Could not find layers for selected Resnet size.\\n'", "'Size received: {}; sizes allowed: {}.'", '.', 'format', '(', 'resnet_size', ',', 'choices', '.', 'keys', '(', ')', ')', ')', 'raise', 'ValueError', '(', 'err', ')'] | Retrieve the size of each block_layer in the ResNet model.
The number of block layers used for the Resnet model varies according
to the size of the model. This helper grabs the layer set we want, throwing
an error if a non-standard size has been selected.
Args:
resnet_size: The number of convolutional layers needed in the model.
Returns:
A list of block sizes to use in building the model.
Raises:
KeyError: if invalid resnet_size is received. | ['Retrieve', 'the', 'size', 'of', 'each', 'block_layer', 'in', 'the', 'ResNet', 'model', '.'] | train | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/imagenet_main.py#L242-L273 |
353 | baruwa-enterprise/BaruwaAPI | BaruwaAPI/resource.py | BaruwaAPIClient.update_fallbackserver | def update_fallbackserver(self, serverid, data):
"""Update Fallback server"""
return self.api_call(
ENDPOINTS['fallbackservers']['update'],
dict(serverid=serverid),
body=data) | python | def update_fallbackserver(self, serverid, data):
"""Update Fallback server"""
return self.api_call(
ENDPOINTS['fallbackservers']['update'],
dict(serverid=serverid),
body=data) | ['def', 'update_fallbackserver', '(', 'self', ',', 'serverid', ',', 'data', ')', ':', 'return', 'self', '.', 'api_call', '(', 'ENDPOINTS', '[', "'fallbackservers'", ']', '[', "'update'", ']', ',', 'dict', '(', 'serverid', '=', 'serverid', ')', ',', 'body', '=', 'data', ')'] | Update Fallback server | ['Update', 'Fallback', 'server'] | train | https://github.com/baruwa-enterprise/BaruwaAPI/blob/53335b377ccfd388e42f4f240f181eed72f51180/BaruwaAPI/resource.py#L457-L462 |
354 | revelc/pyaccumulo | pyaccumulo/proxy/AccumuloProxy.py | Client.changeLocalUserPassword | def changeLocalUserPassword(self, login, user, password):
"""
Parameters:
- login
- user
- password
"""
self.send_changeLocalUserPassword(login, user, password)
self.recv_changeLocalUserPassword() | python | def changeLocalUserPassword(self, login, user, password):
"""
Parameters:
- login
- user
- password
"""
self.send_changeLocalUserPassword(login, user, password)
self.recv_changeLocalUserPassword() | ['def', 'changeLocalUserPassword', '(', 'self', ',', 'login', ',', 'user', ',', 'password', ')', ':', 'self', '.', 'send_changeLocalUserPassword', '(', 'login', ',', 'user', ',', 'password', ')', 'self', '.', 'recv_changeLocalUserPassword', '(', ')'] | Parameters:
- login
- user
- password | ['Parameters', ':', '-', 'login', '-', 'user', '-', 'password'] | train | https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/proxy/AccumuloProxy.py#L2407-L2415 |
355 | PolyJIT/benchbuild | benchbuild/utils/schema.py | exceptions | def exceptions(error_is_fatal=True, error_messages=None):
"""
Handle SQLAlchemy exceptions in a sane way.
Args:
func: An arbitrary function to wrap.
error_is_fatal: Should we exit the program on exception?
reraise: Should we reraise the exception, after logging? Only makes sense
if error_is_fatal is False.
error_messages: A dictionary that assigns an exception class to a
customized error message.
"""
def exception_decorator(func):
nonlocal error_messages
@functools.wraps(func)
def exc_wrapper(*args, **kwargs):
nonlocal error_messages
try:
result = func(*args, **kwargs)
except sa.exc.SQLAlchemyError as err:
result = None
details = None
err_type = err.__class__
if error_messages and err_type in error_messages:
details = error_messages[err_type]
if details:
LOG.error(details)
LOG.error("For developers: (%s) %s", err.__class__, str(err))
if error_is_fatal:
sys.exit("Abort, SQL operation failed.")
if not ui.ask(
"I can continue at your own risk, do you want that?"):
raise err
return result
return exc_wrapper
return exception_decorator | python | def exceptions(error_is_fatal=True, error_messages=None):
"""
Handle SQLAlchemy exceptions in a sane way.
Args:
func: An arbitrary function to wrap.
error_is_fatal: Should we exit the program on exception?
reraise: Should we reraise the exception, after logging? Only makes sense
if error_is_fatal is False.
error_messages: A dictionary that assigns an exception class to a
customized error message.
"""
def exception_decorator(func):
nonlocal error_messages
@functools.wraps(func)
def exc_wrapper(*args, **kwargs):
nonlocal error_messages
try:
result = func(*args, **kwargs)
except sa.exc.SQLAlchemyError as err:
result = None
details = None
err_type = err.__class__
if error_messages and err_type in error_messages:
details = error_messages[err_type]
if details:
LOG.error(details)
LOG.error("For developers: (%s) %s", err.__class__, str(err))
if error_is_fatal:
sys.exit("Abort, SQL operation failed.")
if not ui.ask(
"I can continue at your own risk, do you want that?"):
raise err
return result
return exc_wrapper
return exception_decorator | ['def', 'exceptions', '(', 'error_is_fatal', '=', 'True', ',', 'error_messages', '=', 'None', ')', ':', 'def', 'exception_decorator', '(', 'func', ')', ':', 'nonlocal', 'error_messages', '@', 'functools', '.', 'wraps', '(', 'func', ')', 'def', 'exc_wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'nonlocal', 'error_messages', 'try', ':', 'result', '=', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', 'sa', '.', 'exc', '.', 'SQLAlchemyError', 'as', 'err', ':', 'result', '=', 'None', 'details', '=', 'None', 'err_type', '=', 'err', '.', '__class__', 'if', 'error_messages', 'and', 'err_type', 'in', 'error_messages', ':', 'details', '=', 'error_messages', '[', 'err_type', ']', 'if', 'details', ':', 'LOG', '.', 'error', '(', 'details', ')', 'LOG', '.', 'error', '(', '"For developers: (%s) %s"', ',', 'err', '.', '__class__', ',', 'str', '(', 'err', ')', ')', 'if', 'error_is_fatal', ':', 'sys', '.', 'exit', '(', '"Abort, SQL operation failed."', ')', 'if', 'not', 'ui', '.', 'ask', '(', '"I can continue at your own risk, do you want that?"', ')', ':', 'raise', 'err', 'return', 'result', 'return', 'exc_wrapper', 'return', 'exception_decorator'] | Handle SQLAlchemy exceptions in a sane way.
Args:
func: An arbitrary function to wrap.
error_is_fatal: Should we exit the program on exception?
reraise: Should we reraise the exception, after logging? Only makes sense
if error_is_fatal is False.
error_messages: A dictionary that assigns an exception class to a
customized error message. | ['Handle', 'SQLAlchemy', 'exceptions', 'in', 'a', 'sane', 'way', '.'] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/schema.py#L50-L89 |
356 | arne-cl/discoursegraphs | src/discoursegraphs/discoursegraph.py | select_nodes_by_attribute | def select_nodes_by_attribute(docgraph, attribute=None, value=None, data=False):
"""
Get all nodes with the given attribute (and attribute value).
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
attribute : str or None
Name of the node attribute that all nodes must posess.
If None, returns all nodes.
value : str or collection of str or None
Value of the node attribute that all nodes must posess.
If None, returns all nodes with the given node attribute key .
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of node (IDs) that posess
the given attribute. If data is True, a generator of (node ID,
node attrib dict) tuples.
"""
for node_id, node_attribs in docgraph.nodes_iter(data=True):
if attribute is None:
has_attrib = True # don't filter nodes
else:
has_attrib = attribute in node_attribs
if has_attrib:
if value is None:
has_value = True
elif isinstance(value, basestring):
has_value = node_attribs.get(attribute) == value
else: # ``value`` is a list/set/dict of values
has_value = any(node_attribs.get(attribute) == v for v in value)
if has_value:
if data:
yield (node_id, node_attribs)
else:
yield node_id | python | def select_nodes_by_attribute(docgraph, attribute=None, value=None, data=False):
"""
Get all nodes with the given attribute (and attribute value).
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
attribute : str or None
Name of the node attribute that all nodes must posess.
If None, returns all nodes.
value : str or collection of str or None
Value of the node attribute that all nodes must posess.
If None, returns all nodes with the given node attribute key .
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of node (IDs) that posess
the given attribute. If data is True, a generator of (node ID,
node attrib dict) tuples.
"""
for node_id, node_attribs in docgraph.nodes_iter(data=True):
if attribute is None:
has_attrib = True # don't filter nodes
else:
has_attrib = attribute in node_attribs
if has_attrib:
if value is None:
has_value = True
elif isinstance(value, basestring):
has_value = node_attribs.get(attribute) == value
else: # ``value`` is a list/set/dict of values
has_value = any(node_attribs.get(attribute) == v for v in value)
if has_value:
if data:
yield (node_id, node_attribs)
else:
yield node_id | ['def', 'select_nodes_by_attribute', '(', 'docgraph', ',', 'attribute', '=', 'None', ',', 'value', '=', 'None', ',', 'data', '=', 'False', ')', ':', 'for', 'node_id', ',', 'node_attribs', 'in', 'docgraph', '.', 'nodes_iter', '(', 'data', '=', 'True', ')', ':', 'if', 'attribute', 'is', 'None', ':', 'has_attrib', '=', 'True', "# don't filter nodes", 'else', ':', 'has_attrib', '=', 'attribute', 'in', 'node_attribs', 'if', 'has_attrib', ':', 'if', 'value', 'is', 'None', ':', 'has_value', '=', 'True', 'elif', 'isinstance', '(', 'value', ',', 'basestring', ')', ':', 'has_value', '=', 'node_attribs', '.', 'get', '(', 'attribute', ')', '==', 'value', 'else', ':', '# ``value`` is a list/set/dict of values', 'has_value', '=', 'any', '(', 'node_attribs', '.', 'get', '(', 'attribute', ')', '==', 'v', 'for', 'v', 'in', 'value', ')', 'if', 'has_value', ':', 'if', 'data', ':', 'yield', '(', 'node_id', ',', 'node_attribs', ')', 'else', ':', 'yield', 'node_id'] | Get all nodes with the given attribute (and attribute value).
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
attribute : str or None
Name of the node attribute that all nodes must posess.
If None, returns all nodes.
value : str or collection of str or None
Value of the node attribute that all nodes must posess.
If None, returns all nodes with the given node attribute key .
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of node (IDs) that posess
the given attribute. If data is True, a generator of (node ID,
node attrib dict) tuples. | ['Get', 'all', 'nodes', 'with', 'the', 'given', 'attribute', '(', 'and', 'attribute', 'value', ')', '.'] | train | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/discoursegraph.py#L1146-L1188 |
357 | sammchardy/python-kucoin | kucoin/client.py | Client.get_deposit_address | def get_deposit_address(self, currency):
"""Get deposit address for a currency
https://docs.kucoin.com/#get-deposit-address
:param currency: Name of currency
:type currency: string
.. code:: python
address = client.get_deposit_address('NEO')
:returns: ApiResponse
.. code:: python
{
"address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",
"memo": "5c247c8a03aa677cea2a251d"
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {
'currency': currency
}
return self._get('deposit-addresses', True, data=data) | python | def get_deposit_address(self, currency):
"""Get deposit address for a currency
https://docs.kucoin.com/#get-deposit-address
:param currency: Name of currency
:type currency: string
.. code:: python
address = client.get_deposit_address('NEO')
:returns: ApiResponse
.. code:: python
{
"address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",
"memo": "5c247c8a03aa677cea2a251d"
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {
'currency': currency
}
return self._get('deposit-addresses', True, data=data) | ['def', 'get_deposit_address', '(', 'self', ',', 'currency', ')', ':', 'data', '=', '{', "'currency'", ':', 'currency', '}', 'return', 'self', '.', '_get', '(', "'deposit-addresses'", ',', 'True', ',', 'data', '=', 'data', ')'] | Get deposit address for a currency
https://docs.kucoin.com/#get-deposit-address
:param currency: Name of currency
:type currency: string
.. code:: python
address = client.get_deposit_address('NEO')
:returns: ApiResponse
.. code:: python
{
"address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",
"memo": "5c247c8a03aa677cea2a251d"
}
:raises: KucoinResponseException, KucoinAPIException | ['Get', 'deposit', 'address', 'for', 'a', 'currency'] | train | https://github.com/sammchardy/python-kucoin/blob/a4cacde413804784bd313f27a0ad37234888be29/kucoin/client.py#L592-L621 |
358 | opennode/waldur-core | waldur_core/core/models.py | ReversionMixin.get_version_fields | def get_version_fields(self):
""" Get field that are tracked in object history versions. """
options = reversion._get_options(self)
return options.fields or [f.name for f in self._meta.fields if f not in options.exclude] | python | def get_version_fields(self):
""" Get field that are tracked in object history versions. """
options = reversion._get_options(self)
return options.fields or [f.name for f in self._meta.fields if f not in options.exclude] | ['def', 'get_version_fields', '(', 'self', ')', ':', 'options', '=', 'reversion', '.', '_get_options', '(', 'self', ')', 'return', 'options', '.', 'fields', 'or', '[', 'f', '.', 'name', 'for', 'f', 'in', 'self', '.', '_meta', '.', 'fields', 'if', 'f', 'not', 'in', 'options', '.', 'exclude', ']'] | Get field that are tracked in object history versions. | ['Get', 'field', 'that', 'are', 'tracked', 'in', 'object', 'history', 'versions', '.'] | train | https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/core/models.py#L380-L383 |
359 | rraadd88/rohan | rohan/dandage/stat/corr.py | corrdfs | def corrdfs(df1,df2,method):
"""
df1 in columns
df2 in rows
"""
dcorr=pd.DataFrame(columns=df1.columns,index=df2.columns)
dpval=pd.DataFrame(columns=df1.columns,index=df2.columns)
for c1 in df1:
for c2 in df2:
if method=='spearman':
dcorr.loc[c2,c1],dpval.loc[c2,c1]=spearmanr(df1[c1],df2[c2],
nan_policy='omit'
)
elif method=='pearson':
dcorr.loc[c2,c1],dpval.loc[c2,c1]=pearsonr(df1[c1],df2[c2],
# nan_policy='omit'
)
if not df1.columns.name is None:
dcorr.columns.name=df1.columns.name
dpval.columns.name=df1.columns.name
if not df2.columns.name is None:
dcorr.index.name=df2.columns.name
dpval.index.name=df2.columns.name
return dcorr,dpval | python | def corrdfs(df1,df2,method):
"""
df1 in columns
df2 in rows
"""
dcorr=pd.DataFrame(columns=df1.columns,index=df2.columns)
dpval=pd.DataFrame(columns=df1.columns,index=df2.columns)
for c1 in df1:
for c2 in df2:
if method=='spearman':
dcorr.loc[c2,c1],dpval.loc[c2,c1]=spearmanr(df1[c1],df2[c2],
nan_policy='omit'
)
elif method=='pearson':
dcorr.loc[c2,c1],dpval.loc[c2,c1]=pearsonr(df1[c1],df2[c2],
# nan_policy='omit'
)
if not df1.columns.name is None:
dcorr.columns.name=df1.columns.name
dpval.columns.name=df1.columns.name
if not df2.columns.name is None:
dcorr.index.name=df2.columns.name
dpval.index.name=df2.columns.name
return dcorr,dpval | ['def', 'corrdfs', '(', 'df1', ',', 'df2', ',', 'method', ')', ':', 'dcorr', '=', 'pd', '.', 'DataFrame', '(', 'columns', '=', 'df1', '.', 'columns', ',', 'index', '=', 'df2', '.', 'columns', ')', 'dpval', '=', 'pd', '.', 'DataFrame', '(', 'columns', '=', 'df1', '.', 'columns', ',', 'index', '=', 'df2', '.', 'columns', ')', 'for', 'c1', 'in', 'df1', ':', 'for', 'c2', 'in', 'df2', ':', 'if', 'method', '==', "'spearman'", ':', 'dcorr', '.', 'loc', '[', 'c2', ',', 'c1', ']', ',', 'dpval', '.', 'loc', '[', 'c2', ',', 'c1', ']', '=', 'spearmanr', '(', 'df1', '[', 'c1', ']', ',', 'df2', '[', 'c2', ']', ',', 'nan_policy', '=', "'omit'", ')', 'elif', 'method', '==', "'pearson'", ':', 'dcorr', '.', 'loc', '[', 'c2', ',', 'c1', ']', ',', 'dpval', '.', 'loc', '[', 'c2', ',', 'c1', ']', '=', 'pearsonr', '(', 'df1', '[', 'c1', ']', ',', 'df2', '[', 'c2', ']', ',', "# nan_policy='omit'", ')', 'if', 'not', 'df1', '.', 'columns', '.', 'name', 'is', 'None', ':', 'dcorr', '.', 'columns', '.', 'name', '=', 'df1', '.', 'columns', '.', 'name', 'dpval', '.', 'columns', '.', 'name', '=', 'df1', '.', 'columns', '.', 'name', 'if', 'not', 'df2', '.', 'columns', '.', 'name', 'is', 'None', ':', 'dcorr', '.', 'index', '.', 'name', '=', 'df2', '.', 'columns', '.', 'name', 'dpval', '.', 'index', '.', 'name', '=', 'df2', '.', 'columns', '.', 'name', 'return', 'dcorr', ',', 'dpval'] | df1 in columns
df2 in rows | ['df1', 'in', 'columns', 'df2', 'in', 'rows'] | train | https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/stat/corr.py#L8-L32 |
360 | GearPlug/paymentsos-python | paymentsos/tokens.py | Token.create_token | def create_token(self, *, holder_name, card_number, credit_card_cvv, expiration_date, token_type='credit_card',
identity_document=None, billing_address=None, additional_details=None):
"""
When creating a Token, remember to use the public-key header instead of the private-key header,
and do not include the app-id header.
Args:
holder_name: Name of the credit card holder.
card_number: Credit card number.
credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted.
expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy,
mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy.
token_type: The type of token
billing_address: Address.
identity_document: National identity document of the card holder.
additional_details: Optional additional data stored with your token in key/value pairs.
Returns:
"""
headers = self.client._get_public_headers()
payload = {
"token_type": token_type,
"credit_card_cvv": credit_card_cvv,
"card_number": card_number,
"expiration_date": expiration_date,
"holder_name": holder_name,
"identity_document": identity_document,
"billing_address": billing_address,
"additional_details": additional_details,
}
endpoint = '/tokens'
return self.client._post(self.client.URL_BASE + endpoint, json=payload, headers=headers) | python | def create_token(self, *, holder_name, card_number, credit_card_cvv, expiration_date, token_type='credit_card',
identity_document=None, billing_address=None, additional_details=None):
"""
When creating a Token, remember to use the public-key header instead of the private-key header,
and do not include the app-id header.
Args:
holder_name: Name of the credit card holder.
card_number: Credit card number.
credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted.
expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy,
mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy.
token_type: The type of token
billing_address: Address.
identity_document: National identity document of the card holder.
additional_details: Optional additional data stored with your token in key/value pairs.
Returns:
"""
headers = self.client._get_public_headers()
payload = {
"token_type": token_type,
"credit_card_cvv": credit_card_cvv,
"card_number": card_number,
"expiration_date": expiration_date,
"holder_name": holder_name,
"identity_document": identity_document,
"billing_address": billing_address,
"additional_details": additional_details,
}
endpoint = '/tokens'
return self.client._post(self.client.URL_BASE + endpoint, json=payload, headers=headers) | ['def', 'create_token', '(', 'self', ',', '*', ',', 'holder_name', ',', 'card_number', ',', 'credit_card_cvv', ',', 'expiration_date', ',', 'token_type', '=', "'credit_card'", ',', 'identity_document', '=', 'None', ',', 'billing_address', '=', 'None', ',', 'additional_details', '=', 'None', ')', ':', 'headers', '=', 'self', '.', 'client', '.', '_get_public_headers', '(', ')', 'payload', '=', '{', '"token_type"', ':', 'token_type', ',', '"credit_card_cvv"', ':', 'credit_card_cvv', ',', '"card_number"', ':', 'card_number', ',', '"expiration_date"', ':', 'expiration_date', ',', '"holder_name"', ':', 'holder_name', ',', '"identity_document"', ':', 'identity_document', ',', '"billing_address"', ':', 'billing_address', ',', '"additional_details"', ':', 'additional_details', ',', '}', 'endpoint', '=', "'/tokens'", 'return', 'self', '.', 'client', '.', '_post', '(', 'self', '.', 'client', '.', 'URL_BASE', '+', 'endpoint', ',', 'json', '=', 'payload', ',', 'headers', '=', 'headers', ')'] | When creating a Token, remember to use the public-key header instead of the private-key header,
and do not include the app-id header.
Args:
holder_name: Name of the credit card holder.
card_number: Credit card number.
credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted.
expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy,
mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy.
token_type: The type of token
billing_address: Address.
identity_document: National identity document of the card holder.
additional_details: Optional additional data stored with your token in key/value pairs.
Returns: | ['When', 'creating', 'a', 'Token', 'remember', 'to', 'use', 'the', 'public', '-', 'key', 'header', 'instead', 'of', 'the', 'private', '-', 'key', 'header', 'and', 'do', 'not', 'include', 'the', 'app', '-', 'id', 'header', '.'] | train | https://github.com/GearPlug/paymentsos-python/blob/2f32ba83ae890c96799b71d49fc6740bc1081f89/paymentsos/tokens.py#L6-L38 |
361 | ergo/ziggurat_foundations | ziggurat_foundations/models/services/__init__.py | BaseService.base_query | def base_query(cls, db_session=None):
"""
returns base query for specific service
:param db_session:
:return: query
"""
db_session = get_db_session(db_session)
return db_session.query(cls.model) | python | def base_query(cls, db_session=None):
"""
returns base query for specific service
:param db_session:
:return: query
"""
db_session = get_db_session(db_session)
return db_session.query(cls.model) | ['def', 'base_query', '(', 'cls', ',', 'db_session', '=', 'None', ')', ':', 'db_session', '=', 'get_db_session', '(', 'db_session', ')', 'return', 'db_session', '.', 'query', '(', 'cls', '.', 'model', ')'] | returns base query for specific service
:param db_session:
:return: query | ['returns', 'base', 'query', 'for', 'specific', 'service'] | train | https://github.com/ergo/ziggurat_foundations/blob/9eeec894d08e8d7defa60ddc04b63f69cd4cbeba/ziggurat_foundations/models/services/__init__.py#L26-L34 |
362 | choderalab/pymbar | pymbar/mbar_solvers.py | mbar_gradient | def mbar_gradient(u_kn, N_k, f_k):
"""Gradient of MBAR objective function.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of mbar_objective
Notes
-----
This is equation C6 in the JCP MBAR paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
log_numerator_k = logsumexp(-log_denominator_n - u_kn, axis=1)
return -1 * N_k * (1.0 - np.exp(f_k + log_numerator_k)) | python | def mbar_gradient(u_kn, N_k, f_k):
"""Gradient of MBAR objective function.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of mbar_objective
Notes
-----
This is equation C6 in the JCP MBAR paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
log_numerator_k = logsumexp(-log_denominator_n - u_kn, axis=1)
return -1 * N_k * (1.0 - np.exp(f_k + log_numerator_k)) | ['def', 'mbar_gradient', '(', 'u_kn', ',', 'N_k', ',', 'f_k', ')', ':', 'u_kn', ',', 'N_k', ',', 'f_k', '=', 'validate_inputs', '(', 'u_kn', ',', 'N_k', ',', 'f_k', ')', 'log_denominator_n', '=', 'logsumexp', '(', 'f_k', '-', 'u_kn', '.', 'T', ',', 'b', '=', 'N_k', ',', 'axis', '=', '1', ')', 'log_numerator_k', '=', 'logsumexp', '(', '-', 'log_denominator_n', '-', 'u_kn', ',', 'axis', '=', '1', ')', 'return', '-', '1', '*', 'N_k', '*', '(', '1.0', '-', 'np', '.', 'exp', '(', 'f_k', '+', 'log_numerator_k', ')', ')'] | Gradient of MBAR objective function.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of mbar_objective
Notes
-----
This is equation C6 in the JCP MBAR paper. | ['Gradient', 'of', 'MBAR', 'objective', 'function', '.'] | train | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L80-L105 |
363 | bitlabstudio/django-user-media | user_media/models.py | UserMediaImage.large_size | def large_size(self, as_string=True):
"""Returns a thumbnail's large size."""
size = getattr(settings, 'USER_MEDIA_THUMB_SIZE_LARGE', (150, 150))
if as_string:
return u'{}x{}'.format(size[0], size[1])
return size | python | def large_size(self, as_string=True):
"""Returns a thumbnail's large size."""
size = getattr(settings, 'USER_MEDIA_THUMB_SIZE_LARGE', (150, 150))
if as_string:
return u'{}x{}'.format(size[0], size[1])
return size | ['def', 'large_size', '(', 'self', ',', 'as_string', '=', 'True', ')', ':', 'size', '=', 'getattr', '(', 'settings', ',', "'USER_MEDIA_THUMB_SIZE_LARGE'", ',', '(', '150', ',', '150', ')', ')', 'if', 'as_string', ':', 'return', "u'{}x{}'", '.', 'format', '(', 'size', '[', '0', ']', ',', 'size', '[', '1', ']', ')', 'return', 'size'] | Returns a thumbnail's large size. | ['Returns', 'a', 'thumbnail', 's', 'large', 'size', '.'] | train | https://github.com/bitlabstudio/django-user-media/blob/63905aeb57640f116320ab8d7116e0ec35fde377/user_media/models.py#L122-L127 |
364 | wright-group/WrightTools | WrightTools/artists/_colors.py | get_color_cycle | def get_color_cycle(n, cmap="rainbow", rotations=3):
"""Get a list of RGBA colors following a colormap.
Useful for plotting lots of elements, keeping the color of each unique.
Parameters
----------
n : integer
The number of colors to return.
cmap : string (optional)
The colormap to use in the cycle. Default is rainbow.
rotations : integer (optional)
The number of times to repeat the colormap over the cycle. Default is 3.
Returns
-------
list
List of RGBA lists.
"""
cmap = colormaps[cmap]
if np.mod(n, rotations) == 0:
per = np.floor_divide(n, rotations)
else:
per = np.floor_divide(n, rotations) + 1
vals = list(np.linspace(0, 1, per))
vals = vals * rotations
vals = vals[:n]
out = cmap(vals)
return out | python | def get_color_cycle(n, cmap="rainbow", rotations=3):
"""Get a list of RGBA colors following a colormap.
Useful for plotting lots of elements, keeping the color of each unique.
Parameters
----------
n : integer
The number of colors to return.
cmap : string (optional)
The colormap to use in the cycle. Default is rainbow.
rotations : integer (optional)
The number of times to repeat the colormap over the cycle. Default is 3.
Returns
-------
list
List of RGBA lists.
"""
cmap = colormaps[cmap]
if np.mod(n, rotations) == 0:
per = np.floor_divide(n, rotations)
else:
per = np.floor_divide(n, rotations) + 1
vals = list(np.linspace(0, 1, per))
vals = vals * rotations
vals = vals[:n]
out = cmap(vals)
return out | ['def', 'get_color_cycle', '(', 'n', ',', 'cmap', '=', '"rainbow"', ',', 'rotations', '=', '3', ')', ':', 'cmap', '=', 'colormaps', '[', 'cmap', ']', 'if', 'np', '.', 'mod', '(', 'n', ',', 'rotations', ')', '==', '0', ':', 'per', '=', 'np', '.', 'floor_divide', '(', 'n', ',', 'rotations', ')', 'else', ':', 'per', '=', 'np', '.', 'floor_divide', '(', 'n', ',', 'rotations', ')', '+', '1', 'vals', '=', 'list', '(', 'np', '.', 'linspace', '(', '0', ',', '1', ',', 'per', ')', ')', 'vals', '=', 'vals', '*', 'rotations', 'vals', '=', 'vals', '[', ':', 'n', ']', 'out', '=', 'cmap', '(', 'vals', ')', 'return', 'out'] | Get a list of RGBA colors following a colormap.
Useful for plotting lots of elements, keeping the color of each unique.
Parameters
----------
n : integer
The number of colors to return.
cmap : string (optional)
The colormap to use in the cycle. Default is rainbow.
rotations : integer (optional)
The number of times to repeat the colormap over the cycle. Default is 3.
Returns
-------
list
List of RGBA lists. | ['Get', 'a', 'list', 'of', 'RGBA', 'colors', 'following', 'a', 'colormap', '.'] | train | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/artists/_colors.py#L253-L281 |
365 | liampauling/betfair | betfairlightweight/utils.py | check_status_code | def check_status_code(response, codes=None):
"""
Checks response.status_code is in codes.
:param requests.request response: Requests response
:param list codes: List of accepted codes or callable
:raises: StatusCodeError if code invalid
"""
codes = codes or [200]
if response.status_code not in codes:
raise StatusCodeError(response.status_code) | python | def check_status_code(response, codes=None):
"""
Checks response.status_code is in codes.
:param requests.request response: Requests response
:param list codes: List of accepted codes or callable
:raises: StatusCodeError if code invalid
"""
codes = codes or [200]
if response.status_code not in codes:
raise StatusCodeError(response.status_code) | ['def', 'check_status_code', '(', 'response', ',', 'codes', '=', 'None', ')', ':', 'codes', '=', 'codes', 'or', '[', '200', ']', 'if', 'response', '.', 'status_code', 'not', 'in', 'codes', ':', 'raise', 'StatusCodeError', '(', 'response', '.', 'status_code', ')'] | Checks response.status_code is in codes.
:param requests.request response: Requests response
:param list codes: List of accepted codes or callable
:raises: StatusCodeError if code invalid | ['Checks', 'response', '.', 'status_code', 'is', 'in', 'codes', '.'] | train | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/utils.py#L7-L17 |
366 | kwikteam/phy | phy/gui/widgets.py | HTMLWidget.eval_js | def eval_js(self, expr):
"""Evaluate a Javascript expression."""
if not self.is_built():
self._pending_js_eval.append(expr)
return
logger.log(5, "Evaluate Javascript: `%s`.", expr)
out = self.page().mainFrame().evaluateJavaScript(expr)
return _to_py(out) | python | def eval_js(self, expr):
"""Evaluate a Javascript expression."""
if not self.is_built():
self._pending_js_eval.append(expr)
return
logger.log(5, "Evaluate Javascript: `%s`.", expr)
out = self.page().mainFrame().evaluateJavaScript(expr)
return _to_py(out) | ['def', 'eval_js', '(', 'self', ',', 'expr', ')', ':', 'if', 'not', 'self', '.', 'is_built', '(', ')', ':', 'self', '.', '_pending_js_eval', '.', 'append', '(', 'expr', ')', 'return', 'logger', '.', 'log', '(', '5', ',', '"Evaluate Javascript: `%s`."', ',', 'expr', ')', 'out', '=', 'self', '.', 'page', '(', ')', '.', 'mainFrame', '(', ')', '.', 'evaluateJavaScript', '(', 'expr', ')', 'return', '_to_py', '(', 'out', ')'] | Evaluate a Javascript expression. | ['Evaluate', 'a', 'Javascript', 'expression', '.'] | train | https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/gui/widgets.py#L190-L197 |
367 | pywbem/pywbem | pywbem_mock/_resolvermixin.py | ResolverMixin._resolve_objects | def _resolve_objects(self, new_objects, superclass_objects, new_class,
superclass, classrepo, qualifier_repo, type_str,
verbose=None):
"""
Resolve a dictionary of objects where the objects can be CIMProperty,
CIMMethod, or CIMParameter. This method resolves each of the objects
in the dictionary, using the superclass if it is defined.
"""
if new_objects: # TODO Future REMOVE. This is test code
assert isinstance(new_objects, (dict, NocaseDict))
keys = new_objects.keys()
assert isinstance(new_objects[keys[0]], (CIMMethod, CIMProperty,
CIMParameter))
if not superclass:
for new_obj in six.itervalues(new_objects):
self._set_new_object(new_obj, None, new_class, None,
qualifier_repo,
False, type_str)
return
# process objects if superclass exists
for oname, new_obj in six.iteritems(new_objects):
if oname not in superclass_objects:
self._set_new_object(new_obj, None, new_class,
superclass, qualifier_repo,
False, type_str)
continue
# oname in superclass_objects
# TODO: We may have object naming because of override.
if 'Override' not in new_objects[oname].qualifiers:
if not isinstance(new_objects[oname], CIMParameter):
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("{0} {1!A} in {2!A} duplicates {0} in "
"{3!A} without override.",
type_str, oname, new_class.classname,
superclass.classname))
# TODO need to finish this. For now just let
# parameter slide. Keep the new one.
continue
# process object override
# get override name
override_name = new_objects[oname].qualifiers["override"].value
if isinstance(new_obj, (CIMParameter, CIMProperty)):
if new_obj.type == 'reference':
if override_name != oname:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class reference "
"{0} {1!A}. in class {2!A}"
"Override must not change {0} "
"name but override name is {3!A}",
type_str, oname, superclass.classname,
override_name))
try:
super_obj = superclass_objects[override_name]
except KeyError:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class override {0} {1!A}. in class "
"{2!A}. Override name {3!A}} not found in {3!A}.",
type_str, oname, new_class.classname, override_name,
superclass.classname))
# Test if new object characteristics consistent with
# requirements for that object type
if isinstance(super_obj, CIMProperty):
if super_obj.type != new_obj.type \
or super_obj.is_array != new_obj.is_array \
or super_obj.embedded_object != \
new_obj.embedded_object:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class property {0!A}. "
"Does not match overridden property "
"{1!A} in class {2!A}",
oname, super_obj.name,
superclass.classname))
elif isinstance(super_obj, CIMMethod):
if super_obj.return_type != new_obj.return_type:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class method {0!A}. "
"Mismatch method return typein "
"class {1!A}.", oname,
superclass.classname))
elif isinstance(super_obj, CIMParameter):
if super_obj.type != new_obj.type or \
super_obj.is_array != new_obj.is_array or \
super_obj.array_size != new_obj.array_size or \
super_obj.embedded_object != new_obj.embedded_object:
mname = None
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class parameter "
"{0!A} param {1|A}. "
"Does not match signature of "
"overridden method parameters "
"in class {2!A}.", mname, oname,
superclass.classname))
else:
assert True, "Invalid Type {0}" .format(type(super_obj))
self._set_new_object(new_obj, super_obj,
new_class,
superclass, qualifier_repo,
True, type_str)
# if type is method, resolve the parameters.
if isinstance(new_obj, CIMMethod):
self._resolve_objects(
new_obj.parameters,
superclass_objects[new_obj.name].parameters,
new_class,
superclass, classrepo, qualifier_repo,
"Parameter", verbose=verbose)
# Copy objects from from superclass that are not in new_class
# Placed after loop with items in new_object so they are not part
# of that loop.
for oname, ovalue in six.iteritems(superclass_objects):
if oname not in new_objects:
new_value = ovalue.copy()
new_value.propagated = True
assert ovalue.class_origin
new_value.class_origin = ovalue.class_origin
for qualifier in new_value.qualifiers.values():
qualifier.propagated = True
new_objects[oname] = new_value | python | def _resolve_objects(self, new_objects, superclass_objects, new_class,
superclass, classrepo, qualifier_repo, type_str,
verbose=None):
"""
Resolve a dictionary of objects where the objects can be CIMProperty,
CIMMethod, or CIMParameter. This method resolves each of the objects
in the dictionary, using the superclass if it is defined.
"""
if new_objects: # TODO Future REMOVE. This is test code
assert isinstance(new_objects, (dict, NocaseDict))
keys = new_objects.keys()
assert isinstance(new_objects[keys[0]], (CIMMethod, CIMProperty,
CIMParameter))
if not superclass:
for new_obj in six.itervalues(new_objects):
self._set_new_object(new_obj, None, new_class, None,
qualifier_repo,
False, type_str)
return
# process objects if superclass exists
for oname, new_obj in six.iteritems(new_objects):
if oname not in superclass_objects:
self._set_new_object(new_obj, None, new_class,
superclass, qualifier_repo,
False, type_str)
continue
# oname in superclass_objects
# TODO: We may have object naming because of override.
if 'Override' not in new_objects[oname].qualifiers:
if not isinstance(new_objects[oname], CIMParameter):
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("{0} {1!A} in {2!A} duplicates {0} in "
"{3!A} without override.",
type_str, oname, new_class.classname,
superclass.classname))
# TODO need to finish this. For now just let
# parameter slide. Keep the new one.
continue
# process object override
# get override name
override_name = new_objects[oname].qualifiers["override"].value
if isinstance(new_obj, (CIMParameter, CIMProperty)):
if new_obj.type == 'reference':
if override_name != oname:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class reference "
"{0} {1!A}. in class {2!A}"
"Override must not change {0} "
"name but override name is {3!A}",
type_str, oname, superclass.classname,
override_name))
try:
super_obj = superclass_objects[override_name]
except KeyError:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class override {0} {1!A}. in class "
"{2!A}. Override name {3!A}} not found in {3!A}.",
type_str, oname, new_class.classname, override_name,
superclass.classname))
# Test if new object characteristics consistent with
# requirements for that object type
if isinstance(super_obj, CIMProperty):
if super_obj.type != new_obj.type \
or super_obj.is_array != new_obj.is_array \
or super_obj.embedded_object != \
new_obj.embedded_object:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class property {0!A}. "
"Does not match overridden property "
"{1!A} in class {2!A}",
oname, super_obj.name,
superclass.classname))
elif isinstance(super_obj, CIMMethod):
if super_obj.return_type != new_obj.return_type:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class method {0!A}. "
"Mismatch method return typein "
"class {1!A}.", oname,
superclass.classname))
elif isinstance(super_obj, CIMParameter):
if super_obj.type != new_obj.type or \
super_obj.is_array != new_obj.is_array or \
super_obj.array_size != new_obj.array_size or \
super_obj.embedded_object != new_obj.embedded_object:
mname = None
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class parameter "
"{0!A} param {1|A}. "
"Does not match signature of "
"overridden method parameters "
"in class {2!A}.", mname, oname,
superclass.classname))
else:
assert True, "Invalid Type {0}" .format(type(super_obj))
self._set_new_object(new_obj, super_obj,
new_class,
superclass, qualifier_repo,
True, type_str)
# if type is method, resolve the parameters.
if isinstance(new_obj, CIMMethod):
self._resolve_objects(
new_obj.parameters,
superclass_objects[new_obj.name].parameters,
new_class,
superclass, classrepo, qualifier_repo,
"Parameter", verbose=verbose)
# Copy objects from from superclass that are not in new_class
# Placed after loop with items in new_object so they are not part
# of that loop.
for oname, ovalue in six.iteritems(superclass_objects):
if oname not in new_objects:
new_value = ovalue.copy()
new_value.propagated = True
assert ovalue.class_origin
new_value.class_origin = ovalue.class_origin
for qualifier in new_value.qualifiers.values():
qualifier.propagated = True
new_objects[oname] = new_value | ['def', '_resolve_objects', '(', 'self', ',', 'new_objects', ',', 'superclass_objects', ',', 'new_class', ',', 'superclass', ',', 'classrepo', ',', 'qualifier_repo', ',', 'type_str', ',', 'verbose', '=', 'None', ')', ':', 'if', 'new_objects', ':', '# TODO Future REMOVE. This is test code', 'assert', 'isinstance', '(', 'new_objects', ',', '(', 'dict', ',', 'NocaseDict', ')', ')', 'keys', '=', 'new_objects', '.', 'keys', '(', ')', 'assert', 'isinstance', '(', 'new_objects', '[', 'keys', '[', '0', ']', ']', ',', '(', 'CIMMethod', ',', 'CIMProperty', ',', 'CIMParameter', ')', ')', 'if', 'not', 'superclass', ':', 'for', 'new_obj', 'in', 'six', '.', 'itervalues', '(', 'new_objects', ')', ':', 'self', '.', '_set_new_object', '(', 'new_obj', ',', 'None', ',', 'new_class', ',', 'None', ',', 'qualifier_repo', ',', 'False', ',', 'type_str', ')', 'return', '# process objects if superclass exists', 'for', 'oname', ',', 'new_obj', 'in', 'six', '.', 'iteritems', '(', 'new_objects', ')', ':', 'if', 'oname', 'not', 'in', 'superclass_objects', ':', 'self', '.', '_set_new_object', '(', 'new_obj', ',', 'None', ',', 'new_class', ',', 'superclass', ',', 'qualifier_repo', ',', 'False', ',', 'type_str', ')', 'continue', '# oname in superclass_objects', '# TODO: We may have object naming because of override.', 'if', "'Override'", 'not', 'in', 'new_objects', '[', 'oname', ']', '.', 'qualifiers', ':', 'if', 'not', 'isinstance', '(', 'new_objects', '[', 'oname', ']', ',', 'CIMParameter', ')', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"{0} {1!A} in {2!A} duplicates {0} in "', '"{3!A} without override."', ',', 'type_str', ',', 'oname', ',', 'new_class', '.', 'classname', ',', 'superclass', '.', 'classname', ')', ')', '# TODO need to finish this. For now just let', '# parameter slide. Keep the new one.', 'continue', '# process object override', '# get override name', 'override_name', '=', 'new_objects', '[', 'oname', ']', '.', 'qualifiers', '[', '"override"', ']', '.', 'value', 'if', 'isinstance', '(', 'new_obj', ',', '(', 'CIMParameter', ',', 'CIMProperty', ')', ')', ':', 'if', 'new_obj', '.', 'type', '==', "'reference'", ':', 'if', 'override_name', '!=', 'oname', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"Invalid new_class reference "', '"{0} {1!A}. in class {2!A}"', '"Override must not change {0} "', '"name but override name is {3!A}"', ',', 'type_str', ',', 'oname', ',', 'superclass', '.', 'classname', ',', 'override_name', ')', ')', 'try', ':', 'super_obj', '=', 'superclass_objects', '[', 'override_name', ']', 'except', 'KeyError', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"Invalid new_class override {0} {1!A}. in class "', '"{2!A}. Override name {3!A}} not found in {3!A}."', ',', 'type_str', ',', 'oname', ',', 'new_class', '.', 'classname', ',', 'override_name', ',', 'superclass', '.', 'classname', ')', ')', '# Test if new object characteristics consistent with', '# requirements for that object type', 'if', 'isinstance', '(', 'super_obj', ',', 'CIMProperty', ')', ':', 'if', 'super_obj', '.', 'type', '!=', 'new_obj', '.', 'type', 'or', 'super_obj', '.', 'is_array', '!=', 'new_obj', '.', 'is_array', 'or', 'super_obj', '.', 'embedded_object', '!=', 'new_obj', '.', 'embedded_object', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"Invalid new_class property {0!A}. "', '"Does not match overridden property "', '"{1!A} in class {2!A}"', ',', 'oname', ',', 'super_obj', '.', 'name', ',', 'superclass', '.', 'classname', ')', ')', 'elif', 'isinstance', '(', 'super_obj', ',', 'CIMMethod', ')', ':', 'if', 'super_obj', '.', 'return_type', '!=', 'new_obj', '.', 'return_type', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"Invalid new_class method {0!A}. "', '"Mismatch method return typein "', '"class {1!A}."', ',', 'oname', ',', 'superclass', '.', 'classname', ')', ')', 'elif', 'isinstance', '(', 'super_obj', ',', 'CIMParameter', ')', ':', 'if', 'super_obj', '.', 'type', '!=', 'new_obj', '.', 'type', 'or', 'super_obj', '.', 'is_array', '!=', 'new_obj', '.', 'is_array', 'or', 'super_obj', '.', 'array_size', '!=', 'new_obj', '.', 'array_size', 'or', 'super_obj', '.', 'embedded_object', '!=', 'new_obj', '.', 'embedded_object', ':', 'mname', '=', 'None', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"Invalid new_class parameter "', '"{0!A} param {1|A}. "', '"Does not match signature of "', '"overridden method parameters "', '"in class {2!A}."', ',', 'mname', ',', 'oname', ',', 'superclass', '.', 'classname', ')', ')', 'else', ':', 'assert', 'True', ',', '"Invalid Type {0}"', '.', 'format', '(', 'type', '(', 'super_obj', ')', ')', 'self', '.', '_set_new_object', '(', 'new_obj', ',', 'super_obj', ',', 'new_class', ',', 'superclass', ',', 'qualifier_repo', ',', 'True', ',', 'type_str', ')', '# if type is method, resolve the parameters.', 'if', 'isinstance', '(', 'new_obj', ',', 'CIMMethod', ')', ':', 'self', '.', '_resolve_objects', '(', 'new_obj', '.', 'parameters', ',', 'superclass_objects', '[', 'new_obj', '.', 'name', ']', '.', 'parameters', ',', 'new_class', ',', 'superclass', ',', 'classrepo', ',', 'qualifier_repo', ',', '"Parameter"', ',', 'verbose', '=', 'verbose', ')', '# Copy objects from from superclass that are not in new_class', '# Placed after loop with items in new_object so they are not part', '# of that loop.', 'for', 'oname', ',', 'ovalue', 'in', 'six', '.', 'iteritems', '(', 'superclass_objects', ')', ':', 'if', 'oname', 'not', 'in', 'new_objects', ':', 'new_value', '=', 'ovalue', '.', 'copy', '(', ')', 'new_value', '.', 'propagated', '=', 'True', 'assert', 'ovalue', '.', 'class_origin', 'new_value', '.', 'class_origin', '=', 'ovalue', '.', 'class_origin', 'for', 'qualifier', 'in', 'new_value', '.', 'qualifiers', '.', 'values', '(', ')', ':', 'qualifier', '.', 'propagated', '=', 'True', 'new_objects', '[', 'oname', ']', '=', 'new_value'] | Resolve a dictionary of objects where the objects can be CIMProperty,
CIMMethod, or CIMParameter. This method resolves each of the objects
in the dictionary, using the superclass if it is defined. | ['Resolve', 'a', 'dictionary', 'of', 'objects', 'where', 'the', 'objects', 'can', 'be', 'CIMProperty', 'CIMMethod', 'or', 'CIMParameter', '.', 'This', 'method', 'resolves', 'each', 'of', 'the', 'objects', 'in', 'the', 'dictionary', 'using', 'the', 'superclass', 'if', 'it', 'is', 'defined', '.'] | train | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_resolvermixin.py#L163-L295 |
368 | karjaljo/hiisi | hiisi/hiisi.py | HiisiHDF.groups | def groups(self):
"""Method returns a list of all goup paths
Examples
--------
>>> for group in h5f.groups():
print(group)
'/'
'/dataset1'
'/dataset1/data1'
'/dataset1/data2'
"""
HiisiHDF._clear_cache()
self.CACHE['group_paths'].append('/')
self.visititems(HiisiHDF._is_group)
return HiisiHDF.CACHE['group_paths'] | python | def groups(self):
"""Method returns a list of all goup paths
Examples
--------
>>> for group in h5f.groups():
print(group)
'/'
'/dataset1'
'/dataset1/data1'
'/dataset1/data2'
"""
HiisiHDF._clear_cache()
self.CACHE['group_paths'].append('/')
self.visititems(HiisiHDF._is_group)
return HiisiHDF.CACHE['group_paths'] | ['def', 'groups', '(', 'self', ')', ':', 'HiisiHDF', '.', '_clear_cache', '(', ')', 'self', '.', 'CACHE', '[', "'group_paths'", ']', '.', 'append', '(', "'/'", ')', 'self', '.', 'visititems', '(', 'HiisiHDF', '.', '_is_group', ')', 'return', 'HiisiHDF', '.', 'CACHE', '[', "'group_paths'", ']'] | Method returns a list of all goup paths
Examples
--------
>>> for group in h5f.groups():
print(group)
'/'
'/dataset1'
'/dataset1/data1'
'/dataset1/data2' | ['Method', 'returns', 'a', 'list', 'of', 'all', 'goup', 'paths', 'Examples', '--------', '>>>', 'for', 'group', 'in', 'h5f', '.', 'groups', '()', ':', 'print', '(', 'group', ')', '/', '/', 'dataset1', '/', 'dataset1', '/', 'data1', '/', 'dataset1', '/', 'data2'] | train | https://github.com/karjaljo/hiisi/blob/de6a64df5dcbcb37d5d3d5468663e65a7794f9a8/hiisi/hiisi.py#L85-L100 |
369 | Opentrons/opentrons | api/src/opentrons/hardware_control/__init__.py | API.home_plunger | async def home_plunger(self, mount: top_types.Mount):
"""
Home the plunger motor for a mount, and then return it to the 'bottom'
position.
:param mount: the mount associated with the target plunger
:type mount: :py:class:`.top_types.Mount`
"""
instr = self._attached_instruments[mount]
if instr:
await self.home([Axis.of_plunger(mount)])
await self._move_plunger(mount,
instr.config.bottom) | python | async def home_plunger(self, mount: top_types.Mount):
"""
Home the plunger motor for a mount, and then return it to the 'bottom'
position.
:param mount: the mount associated with the target plunger
:type mount: :py:class:`.top_types.Mount`
"""
instr = self._attached_instruments[mount]
if instr:
await self.home([Axis.of_plunger(mount)])
await self._move_plunger(mount,
instr.config.bottom) | ['async', 'def', 'home_plunger', '(', 'self', ',', 'mount', ':', 'top_types', '.', 'Mount', ')', ':', 'instr', '=', 'self', '.', '_attached_instruments', '[', 'mount', ']', 'if', 'instr', ':', 'await', 'self', '.', 'home', '(', '[', 'Axis', '.', 'of_plunger', '(', 'mount', ')', ']', ')', 'await', 'self', '.', '_move_plunger', '(', 'mount', ',', 'instr', '.', 'config', '.', 'bottom', ')'] | Home the plunger motor for a mount, and then return it to the 'bottom'
position.
:param mount: the mount associated with the target plunger
:type mount: :py:class:`.top_types.Mount` | ['Home', 'the', 'plunger', 'motor', 'for', 'a', 'mount', 'and', 'then', 'return', 'it', 'to', 'the', 'bottom', 'position', '.'] | train | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/hardware_control/__init__.py#L383-L395 |
370 | VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.get_leads_lists | def get_leads_lists(self, offset=None, limit=None):
"""
Gives back all the leads lists saved on your account.
:param offset: Number of lists to skip.
:param limit: Maximum number of lists to return.
:return: Leads lists found as a dict.
"""
params = self.base_params
if offset:
params['offset'] = offset
if limit:
params['limit'] = limit
endpoint = self.base_endpoint.format('leads_lists')
return self._query_hunter(endpoint, params) | python | def get_leads_lists(self, offset=None, limit=None):
"""
Gives back all the leads lists saved on your account.
:param offset: Number of lists to skip.
:param limit: Maximum number of lists to return.
:return: Leads lists found as a dict.
"""
params = self.base_params
if offset:
params['offset'] = offset
if limit:
params['limit'] = limit
endpoint = self.base_endpoint.format('leads_lists')
return self._query_hunter(endpoint, params) | ['def', 'get_leads_lists', '(', 'self', ',', 'offset', '=', 'None', ',', 'limit', '=', 'None', ')', ':', 'params', '=', 'self', '.', 'base_params', 'if', 'offset', ':', 'params', '[', "'offset'", ']', '=', 'offset', 'if', 'limit', ':', 'params', '[', "'limit'", ']', '=', 'limit', 'endpoint', '=', 'self', '.', 'base_endpoint', '.', 'format', '(', "'leads_lists'", ')', 'return', 'self', '.', '_query_hunter', '(', 'endpoint', ',', 'params', ')'] | Gives back all the leads lists saved on your account.
:param offset: Number of lists to skip.
:param limit: Maximum number of lists to return.
:return: Leads lists found as a dict. | ['Gives', 'back', 'all', 'the', 'leads', 'lists', 'saved', 'on', 'your', 'account', '.'] | train | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L385-L404 |
371 | cloud-custodian/cloud-custodian | tools/c7n_logexporter/c7n_logexporter/exporter.py | export | def export(group, bucket, prefix, start, end, role, poll_period=120,
session=None, name="", region=None):
"""export a given log group to s3"""
start = start and isinstance(start, six.string_types) and parse(start) or start
end = (end and isinstance(start, six.string_types) and
parse(end) or end or datetime.now())
start = start.replace(tzinfo=tzlocal()).astimezone(tzutc())
end = end.replace(tzinfo=tzlocal()).astimezone(tzutc())
if session is None:
session = get_session(role, region)
client = session.client('logs')
paginator = client.get_paginator('describe_log_groups')
for p in paginator.paginate():
found = False
for _group in p['logGroups']:
if _group['logGroupName'] == group:
group = _group
found = True
break
if found:
break
if not found:
raise ValueError("Log group %s not found." % group)
if prefix:
prefix = "%s/%s" % (prefix.rstrip('/'), group['logGroupName'].strip('/'))
else:
prefix = group['logGroupName']
named_group = "%s:%s" % (name, group['logGroupName'])
log.info(
"Log exporting group:%s start:%s end:%s bucket:%s prefix:%s size:%s",
named_group,
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix,
group['storedBytes'])
t = time.time()
days = [(
start + timedelta(i)).replace(minute=0, hour=0, second=0, microsecond=0)
for i in range((end - start).days)]
day_count = len(days)
s3 = boto3.Session().client('s3')
days = filter_extant_exports(s3, bucket, prefix, days, start, end)
log.info("Group:%s filtering s3 extant keys from %d to %d start:%s end:%s",
named_group, day_count, len(days),
days[0] if days else '', days[-1] if days else '')
t = time.time()
retry = get_retry(('SlowDown',))
for idx, d in enumerate(days):
date = d.replace(minute=0, microsecond=0, hour=0)
export_prefix = "%s%s" % (prefix, date.strftime("/%Y/%m/%d"))
params = {
'taskName': "%s-%s" % ("c7n-log-exporter",
date.strftime("%Y-%m-%d")),
'logGroupName': group['logGroupName'],
'fromTime': int(time.mktime(
date.replace(
minute=0, microsecond=0, hour=0).timetuple()) * 1000),
'to': int(time.mktime(
date.replace(
minute=59, hour=23, microsecond=0).timetuple()) * 1000),
'destination': bucket,
'destinationPrefix': export_prefix
}
# if stream_prefix:
# params['logStreamPrefix'] = stream_prefix
try:
s3.head_object(Bucket=bucket, Key=prefix)
except ClientError as e:
if e.response['Error']['Code'] != '404': # Not Found
raise
s3.put_object(
Bucket=bucket,
Key=prefix,
Body=json.dumps({}),
ACL="bucket-owner-full-control",
ServerSideEncryption="AES256")
t = time.time()
counter = 0
while True:
counter += 1
try:
result = client.create_export_task(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'LimitExceededException':
time.sleep(poll_period)
# log every 30m of export waiting
if counter % 6 == 0:
log.debug(
"group:%s day:%s waiting for %0.2f minutes",
named_group, d.strftime('%Y-%m-%d'),
(counter * poll_period) / 60.0)
continue
raise
retry(
s3.put_object_tagging,
Bucket=bucket, Key=prefix,
Tagging={
'TagSet': [{
'Key': 'LastExport',
'Value': d.isoformat()}]})
break
log.info(
"Log export time:%0.2f group:%s day:%s bucket:%s prefix:%s task:%s",
time.time() - t,
named_group,
d.strftime("%Y-%m-%d"),
bucket,
params['destinationPrefix'],
result['taskId'])
log.info(
("Exported log group:%s time:%0.2f days:%d start:%s"
" end:%s bucket:%s prefix:%s"),
named_group,
time.time() - t,
len(days),
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix) | python | def export(group, bucket, prefix, start, end, role, poll_period=120,
session=None, name="", region=None):
"""export a given log group to s3"""
start = start and isinstance(start, six.string_types) and parse(start) or start
end = (end and isinstance(start, six.string_types) and
parse(end) or end or datetime.now())
start = start.replace(tzinfo=tzlocal()).astimezone(tzutc())
end = end.replace(tzinfo=tzlocal()).astimezone(tzutc())
if session is None:
session = get_session(role, region)
client = session.client('logs')
paginator = client.get_paginator('describe_log_groups')
for p in paginator.paginate():
found = False
for _group in p['logGroups']:
if _group['logGroupName'] == group:
group = _group
found = True
break
if found:
break
if not found:
raise ValueError("Log group %s not found." % group)
if prefix:
prefix = "%s/%s" % (prefix.rstrip('/'), group['logGroupName'].strip('/'))
else:
prefix = group['logGroupName']
named_group = "%s:%s" % (name, group['logGroupName'])
log.info(
"Log exporting group:%s start:%s end:%s bucket:%s prefix:%s size:%s",
named_group,
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix,
group['storedBytes'])
t = time.time()
days = [(
start + timedelta(i)).replace(minute=0, hour=0, second=0, microsecond=0)
for i in range((end - start).days)]
day_count = len(days)
s3 = boto3.Session().client('s3')
days = filter_extant_exports(s3, bucket, prefix, days, start, end)
log.info("Group:%s filtering s3 extant keys from %d to %d start:%s end:%s",
named_group, day_count, len(days),
days[0] if days else '', days[-1] if days else '')
t = time.time()
retry = get_retry(('SlowDown',))
for idx, d in enumerate(days):
date = d.replace(minute=0, microsecond=0, hour=0)
export_prefix = "%s%s" % (prefix, date.strftime("/%Y/%m/%d"))
params = {
'taskName': "%s-%s" % ("c7n-log-exporter",
date.strftime("%Y-%m-%d")),
'logGroupName': group['logGroupName'],
'fromTime': int(time.mktime(
date.replace(
minute=0, microsecond=0, hour=0).timetuple()) * 1000),
'to': int(time.mktime(
date.replace(
minute=59, hour=23, microsecond=0).timetuple()) * 1000),
'destination': bucket,
'destinationPrefix': export_prefix
}
# if stream_prefix:
# params['logStreamPrefix'] = stream_prefix
try:
s3.head_object(Bucket=bucket, Key=prefix)
except ClientError as e:
if e.response['Error']['Code'] != '404': # Not Found
raise
s3.put_object(
Bucket=bucket,
Key=prefix,
Body=json.dumps({}),
ACL="bucket-owner-full-control",
ServerSideEncryption="AES256")
t = time.time()
counter = 0
while True:
counter += 1
try:
result = client.create_export_task(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'LimitExceededException':
time.sleep(poll_period)
# log every 30m of export waiting
if counter % 6 == 0:
log.debug(
"group:%s day:%s waiting for %0.2f minutes",
named_group, d.strftime('%Y-%m-%d'),
(counter * poll_period) / 60.0)
continue
raise
retry(
s3.put_object_tagging,
Bucket=bucket, Key=prefix,
Tagging={
'TagSet': [{
'Key': 'LastExport',
'Value': d.isoformat()}]})
break
log.info(
"Log export time:%0.2f group:%s day:%s bucket:%s prefix:%s task:%s",
time.time() - t,
named_group,
d.strftime("%Y-%m-%d"),
bucket,
params['destinationPrefix'],
result['taskId'])
log.info(
("Exported log group:%s time:%0.2f days:%d start:%s"
" end:%s bucket:%s prefix:%s"),
named_group,
time.time() - t,
len(days),
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix) | ['def', 'export', '(', 'group', ',', 'bucket', ',', 'prefix', ',', 'start', ',', 'end', ',', 'role', ',', 'poll_period', '=', '120', ',', 'session', '=', 'None', ',', 'name', '=', '""', ',', 'region', '=', 'None', ')', ':', 'start', '=', 'start', 'and', 'isinstance', '(', 'start', ',', 'six', '.', 'string_types', ')', 'and', 'parse', '(', 'start', ')', 'or', 'start', 'end', '=', '(', 'end', 'and', 'isinstance', '(', 'start', ',', 'six', '.', 'string_types', ')', 'and', 'parse', '(', 'end', ')', 'or', 'end', 'or', 'datetime', '.', 'now', '(', ')', ')', 'start', '=', 'start', '.', 'replace', '(', 'tzinfo', '=', 'tzlocal', '(', ')', ')', '.', 'astimezone', '(', 'tzutc', '(', ')', ')', 'end', '=', 'end', '.', 'replace', '(', 'tzinfo', '=', 'tzlocal', '(', ')', ')', '.', 'astimezone', '(', 'tzutc', '(', ')', ')', 'if', 'session', 'is', 'None', ':', 'session', '=', 'get_session', '(', 'role', ',', 'region', ')', 'client', '=', 'session', '.', 'client', '(', "'logs'", ')', 'paginator', '=', 'client', '.', 'get_paginator', '(', "'describe_log_groups'", ')', 'for', 'p', 'in', 'paginator', '.', 'paginate', '(', ')', ':', 'found', '=', 'False', 'for', '_group', 'in', 'p', '[', "'logGroups'", ']', ':', 'if', '_group', '[', "'logGroupName'", ']', '==', 'group', ':', 'group', '=', '_group', 'found', '=', 'True', 'break', 'if', 'found', ':', 'break', 'if', 'not', 'found', ':', 'raise', 'ValueError', '(', '"Log group %s not found."', '%', 'group', ')', 'if', 'prefix', ':', 'prefix', '=', '"%s/%s"', '%', '(', 'prefix', '.', 'rstrip', '(', "'/'", ')', ',', 'group', '[', "'logGroupName'", ']', '.', 'strip', '(', "'/'", ')', ')', 'else', ':', 'prefix', '=', 'group', '[', "'logGroupName'", ']', 'named_group', '=', '"%s:%s"', '%', '(', 'name', ',', 'group', '[', "'logGroupName'", ']', ')', 'log', '.', 'info', '(', '"Log exporting group:%s start:%s end:%s bucket:%s prefix:%s size:%s"', ',', 'named_group', ',', 'start', '.', 'strftime', '(', "'%Y/%m/%d'", ')', ',', 'end', '.', 'strftime', '(', "'%Y/%m/%d'", ')', ',', 'bucket', ',', 'prefix', ',', 'group', '[', "'storedBytes'", ']', ')', 't', '=', 'time', '.', 'time', '(', ')', 'days', '=', '[', '(', 'start', '+', 'timedelta', '(', 'i', ')', ')', '.', 'replace', '(', 'minute', '=', '0', ',', 'hour', '=', '0', ',', 'second', '=', '0', ',', 'microsecond', '=', '0', ')', 'for', 'i', 'in', 'range', '(', '(', 'end', '-', 'start', ')', '.', 'days', ')', ']', 'day_count', '=', 'len', '(', 'days', ')', 's3', '=', 'boto3', '.', 'Session', '(', ')', '.', 'client', '(', "'s3'", ')', 'days', '=', 'filter_extant_exports', '(', 's3', ',', 'bucket', ',', 'prefix', ',', 'days', ',', 'start', ',', 'end', ')', 'log', '.', 'info', '(', '"Group:%s filtering s3 extant keys from %d to %d start:%s end:%s"', ',', 'named_group', ',', 'day_count', ',', 'len', '(', 'days', ')', ',', 'days', '[', '0', ']', 'if', 'days', 'else', "''", ',', 'days', '[', '-', '1', ']', 'if', 'days', 'else', "''", ')', 't', '=', 'time', '.', 'time', '(', ')', 'retry', '=', 'get_retry', '(', '(', "'SlowDown'", ',', ')', ')', 'for', 'idx', ',', 'd', 'in', 'enumerate', '(', 'days', ')', ':', 'date', '=', 'd', '.', 'replace', '(', 'minute', '=', '0', ',', 'microsecond', '=', '0', ',', 'hour', '=', '0', ')', 'export_prefix', '=', '"%s%s"', '%', '(', 'prefix', ',', 'date', '.', 'strftime', '(', '"/%Y/%m/%d"', ')', ')', 'params', '=', '{', "'taskName'", ':', '"%s-%s"', '%', '(', '"c7n-log-exporter"', ',', 'date', '.', 'strftime', '(', '"%Y-%m-%d"', ')', ')', ',', "'logGroupName'", ':', 'group', '[', "'logGroupName'", ']', ',', "'fromTime'", ':', 'int', '(', 'time', '.', 'mktime', '(', 'date', '.', 'replace', '(', 'minute', '=', '0', ',', 'microsecond', '=', '0', ',', 'hour', '=', '0', ')', '.', 'timetuple', '(', ')', ')', '*', '1000', ')', ',', "'to'", ':', 'int', '(', 'time', '.', 'mktime', '(', 'date', '.', 'replace', '(', 'minute', '=', '59', ',', 'hour', '=', '23', ',', 'microsecond', '=', '0', ')', '.', 'timetuple', '(', ')', ')', '*', '1000', ')', ',', "'destination'", ':', 'bucket', ',', "'destinationPrefix'", ':', 'export_prefix', '}', '# if stream_prefix:', "# params['logStreamPrefix'] = stream_prefix", 'try', ':', 's3', '.', 'head_object', '(', 'Bucket', '=', 'bucket', ',', 'Key', '=', 'prefix', ')', 'except', 'ClientError', 'as', 'e', ':', 'if', 'e', '.', 'response', '[', "'Error'", ']', '[', "'Code'", ']', '!=', "'404'", ':', '# Not Found', 'raise', 's3', '.', 'put_object', '(', 'Bucket', '=', 'bucket', ',', 'Key', '=', 'prefix', ',', 'Body', '=', 'json', '.', 'dumps', '(', '{', '}', ')', ',', 'ACL', '=', '"bucket-owner-full-control"', ',', 'ServerSideEncryption', '=', '"AES256"', ')', 't', '=', 'time', '.', 'time', '(', ')', 'counter', '=', '0', 'while', 'True', ':', 'counter', '+=', '1', 'try', ':', 'result', '=', 'client', '.', 'create_export_task', '(', '*', '*', 'params', ')', 'except', 'ClientError', 'as', 'e', ':', 'if', 'e', '.', 'response', '[', "'Error'", ']', '[', "'Code'", ']', '==', "'LimitExceededException'", ':', 'time', '.', 'sleep', '(', 'poll_period', ')', '# log every 30m of export waiting', 'if', 'counter', '%', '6', '==', '0', ':', 'log', '.', 'debug', '(', '"group:%s day:%s waiting for %0.2f minutes"', ',', 'named_group', ',', 'd', '.', 'strftime', '(', "'%Y-%m-%d'", ')', ',', '(', 'counter', '*', 'poll_period', ')', '/', '60.0', ')', 'continue', 'raise', 'retry', '(', 's3', '.', 'put_object_tagging', ',', 'Bucket', '=', 'bucket', ',', 'Key', '=', 'prefix', ',', 'Tagging', '=', '{', "'TagSet'", ':', '[', '{', "'Key'", ':', "'LastExport'", ',', "'Value'", ':', 'd', '.', 'isoformat', '(', ')', '}', ']', '}', ')', 'break', 'log', '.', 'info', '(', '"Log export time:%0.2f group:%s day:%s bucket:%s prefix:%s task:%s"', ',', 'time', '.', 'time', '(', ')', '-', 't', ',', 'named_group', ',', 'd', '.', 'strftime', '(', '"%Y-%m-%d"', ')', ',', 'bucket', ',', 'params', '[', "'destinationPrefix'", ']', ',', 'result', '[', "'taskId'", ']', ')', 'log', '.', 'info', '(', '(', '"Exported log group:%s time:%0.2f days:%d start:%s"', '" end:%s bucket:%s prefix:%s"', ')', ',', 'named_group', ',', 'time', '.', 'time', '(', ')', '-', 't', ',', 'len', '(', 'days', ')', ',', 'start', '.', 'strftime', '(', "'%Y/%m/%d'", ')', ',', 'end', '.', 'strftime', '(', "'%Y/%m/%d'", ')', ',', 'bucket', ',', 'prefix', ')'] | export a given log group to s3 | ['export', 'a', 'given', 'log', 'group', 'to', 's3'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_logexporter/c7n_logexporter/exporter.py#L777-L910 |
372 | ResidentMario/geoplot | geoplot/crs.py | Base.load | def load(self, df, centerings):
"""
A moderately mind-bendy meta-method which abstracts the internals of individual projections' load procedures.
Parameters
----------
proj : geoplot.crs object instance
A disguised reference to ``self``.
df : GeoDataFrame
The GeoDataFrame which has been passed as input to the plotter at the top level. This data is needed to
calculate reasonable centering variables in cases in which the user does not already provide them; which is,
incidentally, the reason behind all of this funny twice-instantiation loading in the first place.
centerings: dct
A dictionary containing names and centering methods. Certain projections have certain centering parameters
whilst others lack them. For example, the geospatial projection contains both ``central_longitude`` and
``central_latitude`` instance parameter, which together control the center of the plot, while the North Pole
Stereo projection has only a ``central_longitude`` instance parameter, implying that latitude is fixed (as
indeed it is, as this projection is centered on the North Pole!).
A top-level centerings method is provided in each of the ``geoplot`` top-level plot functions; each of the
projection wrapper classes defined here in turn selects the functions from this list relevent to this
particular instance and passes them to the ``_generic_load`` method here.
We then in turn execute these functions to get defaults for our ``df`` and pass them off to our output
``cartopy.crs`` instance.
Returns
-------
crs : ``cartopy.crs`` object instance
Returns a ``cartopy.crs`` object instance whose appropriate instance variables have been set to reasonable
defaults wherever not already provided by the user.
"""
centering_variables = dict()
if not df.empty and df.geometry.notna().any():
for key, func in centerings.items():
centering_variables[key] = func(df)
return getattr(ccrs, self.__class__.__name__)(**{**centering_variables, **self.args}) | python | def load(self, df, centerings):
"""
A moderately mind-bendy meta-method which abstracts the internals of individual projections' load procedures.
Parameters
----------
proj : geoplot.crs object instance
A disguised reference to ``self``.
df : GeoDataFrame
The GeoDataFrame which has been passed as input to the plotter at the top level. This data is needed to
calculate reasonable centering variables in cases in which the user does not already provide them; which is,
incidentally, the reason behind all of this funny twice-instantiation loading in the first place.
centerings: dct
A dictionary containing names and centering methods. Certain projections have certain centering parameters
whilst others lack them. For example, the geospatial projection contains both ``central_longitude`` and
``central_latitude`` instance parameter, which together control the center of the plot, while the North Pole
Stereo projection has only a ``central_longitude`` instance parameter, implying that latitude is fixed (as
indeed it is, as this projection is centered on the North Pole!).
A top-level centerings method is provided in each of the ``geoplot`` top-level plot functions; each of the
projection wrapper classes defined here in turn selects the functions from this list relevent to this
particular instance and passes them to the ``_generic_load`` method here.
We then in turn execute these functions to get defaults for our ``df`` and pass them off to our output
``cartopy.crs`` instance.
Returns
-------
crs : ``cartopy.crs`` object instance
Returns a ``cartopy.crs`` object instance whose appropriate instance variables have been set to reasonable
defaults wherever not already provided by the user.
"""
centering_variables = dict()
if not df.empty and df.geometry.notna().any():
for key, func in centerings.items():
centering_variables[key] = func(df)
return getattr(ccrs, self.__class__.__name__)(**{**centering_variables, **self.args}) | ['def', 'load', '(', 'self', ',', 'df', ',', 'centerings', ')', ':', 'centering_variables', '=', 'dict', '(', ')', 'if', 'not', 'df', '.', 'empty', 'and', 'df', '.', 'geometry', '.', 'notna', '(', ')', '.', 'any', '(', ')', ':', 'for', 'key', ',', 'func', 'in', 'centerings', '.', 'items', '(', ')', ':', 'centering_variables', '[', 'key', ']', '=', 'func', '(', 'df', ')', 'return', 'getattr', '(', 'ccrs', ',', 'self', '.', '__class__', '.', '__name__', ')', '(', '*', '*', '{', '*', '*', 'centering_variables', ',', '*', '*', 'self', '.', 'args', '}', ')'] | A moderately mind-bendy meta-method which abstracts the internals of individual projections' load procedures.
Parameters
----------
proj : geoplot.crs object instance
A disguised reference to ``self``.
df : GeoDataFrame
The GeoDataFrame which has been passed as input to the plotter at the top level. This data is needed to
calculate reasonable centering variables in cases in which the user does not already provide them; which is,
incidentally, the reason behind all of this funny twice-instantiation loading in the first place.
centerings: dct
A dictionary containing names and centering methods. Certain projections have certain centering parameters
whilst others lack them. For example, the geospatial projection contains both ``central_longitude`` and
``central_latitude`` instance parameter, which together control the center of the plot, while the North Pole
Stereo projection has only a ``central_longitude`` instance parameter, implying that latitude is fixed (as
indeed it is, as this projection is centered on the North Pole!).
A top-level centerings method is provided in each of the ``geoplot`` top-level plot functions; each of the
projection wrapper classes defined here in turn selects the functions from this list relevent to this
particular instance and passes them to the ``_generic_load`` method here.
We then in turn execute these functions to get defaults for our ``df`` and pass them off to our output
``cartopy.crs`` instance.
Returns
-------
crs : ``cartopy.crs`` object instance
Returns a ``cartopy.crs`` object instance whose appropriate instance variables have been set to reasonable
defaults wherever not already provided by the user. | ['A', 'moderately', 'mind', '-', 'bendy', 'meta', '-', 'method', 'which', 'abstracts', 'the', 'internals', 'of', 'individual', 'projections', 'load', 'procedures', '.'] | train | https://github.com/ResidentMario/geoplot/blob/942b474878187a87a95a27fbe41285dfdc1d20ca/geoplot/crs.py#L26-L62 |
373 | fitnr/convertdate | convertdate/hebrew.py | delay_1 | def delay_1(year):
'''Test for delay of start of new year and to avoid'''
# Sunday, Wednesday, and Friday as start of the new year.
months = trunc(((235 * year) - 234) / 19)
parts = 12084 + (13753 * months)
day = trunc((months * 29) + parts / 25920)
if ((3 * (day + 1)) % 7) < 3:
day += 1
return day | python | def delay_1(year):
'''Test for delay of start of new year and to avoid'''
# Sunday, Wednesday, and Friday as start of the new year.
months = trunc(((235 * year) - 234) / 19)
parts = 12084 + (13753 * months)
day = trunc((months * 29) + parts / 25920)
if ((3 * (day + 1)) % 7) < 3:
day += 1
return day | ['def', 'delay_1', '(', 'year', ')', ':', '# Sunday, Wednesday, and Friday as start of the new year.', 'months', '=', 'trunc', '(', '(', '(', '235', '*', 'year', ')', '-', '234', ')', '/', '19', ')', 'parts', '=', '12084', '+', '(', '13753', '*', 'months', ')', 'day', '=', 'trunc', '(', '(', 'months', '*', '29', ')', '+', 'parts', '/', '25920', ')', 'if', '(', '(', '3', '*', '(', 'day', '+', '1', ')', ')', '%', '7', ')', '<', '3', ':', 'day', '+=', '1', 'return', 'day'] | Test for delay of start of new year and to avoid | ['Test', 'for', 'delay', 'of', 'start', 'of', 'new', 'year', 'and', 'to', 'avoid'] | train | https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/hebrew.py#L46-L56 |
374 | saltstack/salt | salt/modules/azurearm_network.py | usages_list | def usages_list(location, **kwargs):
'''
.. versionadded:: 2019.2.0
List subscription network usage for a location.
:param location: The Azure location to query for network usage.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.usages_list westus
'''
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
result = __utils__['azurearm.paged_object_to_list'](netconn.usages.list(location))
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | python | def usages_list(location, **kwargs):
'''
.. versionadded:: 2019.2.0
List subscription network usage for a location.
:param location: The Azure location to query for network usage.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.usages_list westus
'''
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
result = __utils__['azurearm.paged_object_to_list'](netconn.usages.list(location))
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | ['def', 'usages_list', '(', 'location', ',', '*', '*', 'kwargs', ')', ':', 'netconn', '=', '__utils__', '[', "'azurearm.get_client'", ']', '(', "'network'", ',', '*', '*', 'kwargs', ')', 'try', ':', 'result', '=', '__utils__', '[', "'azurearm.paged_object_to_list'", ']', '(', 'netconn', '.', 'usages', '.', 'list', '(', 'location', ')', ')', 'except', 'CloudError', 'as', 'exc', ':', '__utils__', '[', "'azurearm.log_cloud_error'", ']', '(', "'network'", ',', 'str', '(', 'exc', ')', ',', '*', '*', 'kwargs', ')', 'result', '=', '{', "'error'", ':', 'str', '(', 'exc', ')', '}', 'return', 'result'] | .. versionadded:: 2019.2.0
List subscription network usage for a location.
:param location: The Azure location to query for network usage.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.usages_list westus | ['..', 'versionadded', '::', '2019', '.', '2', '.', '0'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L1359-L1381 |
375 | RedHatInsights/insights-core | insights/core/__init__.py | LogFileOutput.parse_content | def parse_content(self, content):
"""
Use all the defined scanners to search the log file, setting the
properties defined in the scanner.
"""
self.lines = content
for scanner in self.scanners:
scanner(self) | python | def parse_content(self, content):
"""
Use all the defined scanners to search the log file, setting the
properties defined in the scanner.
"""
self.lines = content
for scanner in self.scanners:
scanner(self) | ['def', 'parse_content', '(', 'self', ',', 'content', ')', ':', 'self', '.', 'lines', '=', 'content', 'for', 'scanner', 'in', 'self', '.', 'scanners', ':', 'scanner', '(', 'self', ')'] | Use all the defined scanners to search the log file, setting the
properties defined in the scanner. | ['Use', 'all', 'the', 'defined', 'scanners', 'to', 'search', 'the', 'log', 'file', 'setting', 'the', 'properties', 'defined', 'in', 'the', 'scanner', '.'] | train | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L902-L909 |
376 | klen/zeta-library | zetalibrary/scss/__init__.py | Scss._do_for | def _do_for(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @for
"""
var, _, name = name.partition('from')
frm, _, through = name.partition('through')
if not through:
frm, _, through = frm.partition('to')
frm = self.calculate(frm, rule[CONTEXT], rule[OPTIONS], rule)
through = self.calculate(through, rule[CONTEXT], rule[OPTIONS], rule)
try:
frm = int(float(frm))
through = int(float(through))
except ValueError:
pass
else:
if frm > through:
frm, through = through, frm
rev = reversed
else:
rev = lambda x: x
var = var.strip()
var = self.do_glob_math(
var, rule[CONTEXT], rule[OPTIONS], rule, True)
for i in rev(range(frm, through + 1)):
rule[CODESTR] = c_codestr
rule[CONTEXT][var] = str(i)
self.manage_children(
rule, p_selectors, p_parents, p_children, scope, media) | python | def _do_for(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @for
"""
var, _, name = name.partition('from')
frm, _, through = name.partition('through')
if not through:
frm, _, through = frm.partition('to')
frm = self.calculate(frm, rule[CONTEXT], rule[OPTIONS], rule)
through = self.calculate(through, rule[CONTEXT], rule[OPTIONS], rule)
try:
frm = int(float(frm))
through = int(float(through))
except ValueError:
pass
else:
if frm > through:
frm, through = through, frm
rev = reversed
else:
rev = lambda x: x
var = var.strip()
var = self.do_glob_math(
var, rule[CONTEXT], rule[OPTIONS], rule, True)
for i in rev(range(frm, through + 1)):
rule[CODESTR] = c_codestr
rule[CONTEXT][var] = str(i)
self.manage_children(
rule, p_selectors, p_parents, p_children, scope, media) | ['def', '_do_for', '(', 'self', ',', 'rule', ',', 'p_selectors', ',', 'p_parents', ',', 'p_children', ',', 'scope', ',', 'media', ',', 'c_lineno', ',', 'c_property', ',', 'c_codestr', ',', 'code', ',', 'name', ')', ':', 'var', ',', '_', ',', 'name', '=', 'name', '.', 'partition', '(', "'from'", ')', 'frm', ',', '_', ',', 'through', '=', 'name', '.', 'partition', '(', "'through'", ')', 'if', 'not', 'through', ':', 'frm', ',', '_', ',', 'through', '=', 'frm', '.', 'partition', '(', "'to'", ')', 'frm', '=', 'self', '.', 'calculate', '(', 'frm', ',', 'rule', '[', 'CONTEXT', ']', ',', 'rule', '[', 'OPTIONS', ']', ',', 'rule', ')', 'through', '=', 'self', '.', 'calculate', '(', 'through', ',', 'rule', '[', 'CONTEXT', ']', ',', 'rule', '[', 'OPTIONS', ']', ',', 'rule', ')', 'try', ':', 'frm', '=', 'int', '(', 'float', '(', 'frm', ')', ')', 'through', '=', 'int', '(', 'float', '(', 'through', ')', ')', 'except', 'ValueError', ':', 'pass', 'else', ':', 'if', 'frm', '>', 'through', ':', 'frm', ',', 'through', '=', 'through', ',', 'frm', 'rev', '=', 'reversed', 'else', ':', 'rev', '=', 'lambda', 'x', ':', 'x', 'var', '=', 'var', '.', 'strip', '(', ')', 'var', '=', 'self', '.', 'do_glob_math', '(', 'var', ',', 'rule', '[', 'CONTEXT', ']', ',', 'rule', '[', 'OPTIONS', ']', ',', 'rule', ',', 'True', ')', 'for', 'i', 'in', 'rev', '(', 'range', '(', 'frm', ',', 'through', '+', '1', ')', ')', ':', 'rule', '[', 'CODESTR', ']', '=', 'c_codestr', 'rule', '[', 'CONTEXT', ']', '[', 'var', ']', '=', 'str', '(', 'i', ')', 'self', '.', 'manage_children', '(', 'rule', ',', 'p_selectors', ',', 'p_parents', ',', 'p_children', ',', 'scope', ',', 'media', ')'] | Implements @for | ['Implements'] | train | https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L1320-L1349 |
377 | hardbyte/python-can | can/interfaces/ics_neovi/neovi_bus.py | NeoViBus.get_serial_number | def get_serial_number(device):
"""Decode (if needed) and return the ICS device serial string
:param device: ics device
:return: ics device serial string
:rtype: str
"""
a0000 = 604661760
if device.SerialNumber >= a0000:
return ics.base36enc(device.SerialNumber)
return str(device.SerialNumber) | python | def get_serial_number(device):
"""Decode (if needed) and return the ICS device serial string
:param device: ics device
:return: ics device serial string
:rtype: str
"""
a0000 = 604661760
if device.SerialNumber >= a0000:
return ics.base36enc(device.SerialNumber)
return str(device.SerialNumber) | ['def', 'get_serial_number', '(', 'device', ')', ':', 'a0000', '=', '604661760', 'if', 'device', '.', 'SerialNumber', '>=', 'a0000', ':', 'return', 'ics', '.', 'base36enc', '(', 'device', '.', 'SerialNumber', ')', 'return', 'str', '(', 'device', '.', 'SerialNumber', ')'] | Decode (if needed) and return the ICS device serial string
:param device: ics device
:return: ics device serial string
:rtype: str | ['Decode', '(', 'if', 'needed', ')', 'and', 'return', 'the', 'ICS', 'device', 'serial', 'string'] | train | https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/ics_neovi/neovi_bus.py#L164-L174 |
378 | vecnet/vecnet.openmalaria | vecnet/openmalaria/scenario/interventions.py | HumanInterventions.add | def add(self, intervention, id=None):
"""
Add an intervention to intervention/human section.
intervention is either ElementTree or xml snippet
"""
if self.et is None:
return
assert isinstance(intervention, six.string_types)
et = ElementTree.fromstring(intervention)
component = None
if et.find("ITN") is not None:
component = ITN(et)
elif et.find("GVI") is not None:
component = GVI(et)
elif et.find("MDA") is not None:
component = MDA(et)
elif et.find("TBV") is not None or et.find("PEV") is not None or et.find("BSV") is not None:
component = Vaccine(et)
else:
return
assert isinstance(component.name, six.string_types)
if id is not None:
assert isinstance(id, six.string_types)
et.attrib["id"] = id
index = len(self.et.findall("component"))
self.et.insert(index, et) | python | def add(self, intervention, id=None):
"""
Add an intervention to intervention/human section.
intervention is either ElementTree or xml snippet
"""
if self.et is None:
return
assert isinstance(intervention, six.string_types)
et = ElementTree.fromstring(intervention)
component = None
if et.find("ITN") is not None:
component = ITN(et)
elif et.find("GVI") is not None:
component = GVI(et)
elif et.find("MDA") is not None:
component = MDA(et)
elif et.find("TBV") is not None or et.find("PEV") is not None or et.find("BSV") is not None:
component = Vaccine(et)
else:
return
assert isinstance(component.name, six.string_types)
if id is not None:
assert isinstance(id, six.string_types)
et.attrib["id"] = id
index = len(self.et.findall("component"))
self.et.insert(index, et) | ['def', 'add', '(', 'self', ',', 'intervention', ',', 'id', '=', 'None', ')', ':', 'if', 'self', '.', 'et', 'is', 'None', ':', 'return', 'assert', 'isinstance', '(', 'intervention', ',', 'six', '.', 'string_types', ')', 'et', '=', 'ElementTree', '.', 'fromstring', '(', 'intervention', ')', 'component', '=', 'None', 'if', 'et', '.', 'find', '(', '"ITN"', ')', 'is', 'not', 'None', ':', 'component', '=', 'ITN', '(', 'et', ')', 'elif', 'et', '.', 'find', '(', '"GVI"', ')', 'is', 'not', 'None', ':', 'component', '=', 'GVI', '(', 'et', ')', 'elif', 'et', '.', 'find', '(', '"MDA"', ')', 'is', 'not', 'None', ':', 'component', '=', 'MDA', '(', 'et', ')', 'elif', 'et', '.', 'find', '(', '"TBV"', ')', 'is', 'not', 'None', 'or', 'et', '.', 'find', '(', '"PEV"', ')', 'is', 'not', 'None', 'or', 'et', '.', 'find', '(', '"BSV"', ')', 'is', 'not', 'None', ':', 'component', '=', 'Vaccine', '(', 'et', ')', 'else', ':', 'return', 'assert', 'isinstance', '(', 'component', '.', 'name', ',', 'six', '.', 'string_types', ')', 'if', 'id', 'is', 'not', 'None', ':', 'assert', 'isinstance', '(', 'id', ',', 'six', '.', 'string_types', ')', 'et', '.', 'attrib', '[', '"id"', ']', '=', 'id', 'index', '=', 'len', '(', 'self', '.', 'et', '.', 'findall', '(', '"component"', ')', ')', 'self', '.', 'et', '.', 'insert', '(', 'index', ',', 'et', ')'] | Add an intervention to intervention/human section.
intervention is either ElementTree or xml snippet | ['Add', 'an', 'intervention', 'to', 'intervention', '/', 'human', 'section', '.', 'intervention', 'is', 'either', 'ElementTree', 'or', 'xml', 'snippet'] | train | https://github.com/vecnet/vecnet.openmalaria/blob/795bc9d1b81a6c664f14879edda7a7c41188e95a/vecnet/openmalaria/scenario/interventions.py#L690-L720 |
379 | scanny/python-pptx | pptx/text/text.py | _Paragraph.clear | def clear(self):
"""
Remove all content from this paragraph. Paragraph properties are
preserved. Content includes runs, line breaks, and fields.
"""
for elm in self._element.content_children:
self._element.remove(elm)
return self | python | def clear(self):
"""
Remove all content from this paragraph. Paragraph properties are
preserved. Content includes runs, line breaks, and fields.
"""
for elm in self._element.content_children:
self._element.remove(elm)
return self | ['def', 'clear', '(', 'self', ')', ':', 'for', 'elm', 'in', 'self', '.', '_element', '.', 'content_children', ':', 'self', '.', '_element', '.', 'remove', '(', 'elm', ')', 'return', 'self'] | Remove all content from this paragraph. Paragraph properties are
preserved. Content includes runs, line breaks, and fields. | ['Remove', 'all', 'content', 'from', 'this', 'paragraph', '.', 'Paragraph', 'properties', 'are', 'preserved', '.', 'Content', 'includes', 'runs', 'line', 'breaks', 'and', 'fields', '.'] | train | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/text/text.py#L498-L505 |
380 | twilio/twilio-python | twilio/rest/__init__.py | Client.chat | def chat(self):
"""
Access the Chat Twilio Domain
:returns: Chat Twilio Domain
:rtype: twilio.rest.chat.Chat
"""
if self._chat is None:
from twilio.rest.chat import Chat
self._chat = Chat(self)
return self._chat | python | def chat(self):
"""
Access the Chat Twilio Domain
:returns: Chat Twilio Domain
:rtype: twilio.rest.chat.Chat
"""
if self._chat is None:
from twilio.rest.chat import Chat
self._chat = Chat(self)
return self._chat | ['def', 'chat', '(', 'self', ')', ':', 'if', 'self', '.', '_chat', 'is', 'None', ':', 'from', 'twilio', '.', 'rest', '.', 'chat', 'import', 'Chat', 'self', '.', '_chat', '=', 'Chat', '(', 'self', ')', 'return', 'self', '.', '_chat'] | Access the Chat Twilio Domain
:returns: Chat Twilio Domain
:rtype: twilio.rest.chat.Chat | ['Access', 'the', 'Chat', 'Twilio', 'Domain'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/__init__.py#L185-L195 |
381 | GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | one_to_max | def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result | python | def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result | ['def', 'one_to_max', '(', 'array_in', ')', ':', 'x', '=', 'np', '.', 'asanyarray', '(', 'array_in', ')', 'N_in', '=', 'x', '.', 'size', 'array_in', '=', 'x', '.', 'reshape', '(', 'N_in', ')', 'sorted_array', '=', 'np', '.', 'sort', '(', 'array_in', ')', 'sorting_indices', '=', 'np', '.', 'argsort', '(', 'array_in', ')', 'last', '=', 'np', '.', 'nan', 'current_index', '=', '-', '1', 'for', 'i', 'in', 'range', '(', 'N_in', ')', ':', 'if', 'last', '!=', 'sorted_array', '[', 'i', ']', 'or', 'np', '.', 'isnan', '(', 'last', ')', ':', 'last', '=', 'sorted_array', '[', 'i', ']', 'current_index', '+=', '1', 'sorted_array', '[', 'i', ']', '=', 'current_index', 'result', '=', 'np', '.', 'empty', '(', 'N_in', ',', 'dtype', '=', 'int', ')', 'result', '[', 'sorting_indices', ']', '=', 'sorted_array', 'return', 'result'] | Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities. | ['Alter', 'a', 'vector', 'of', 'cluster', 'labels', 'to', 'a', 'dense', 'mapping', '.', 'Given', 'that', 'this', 'function', 'is', 'herein', 'always', 'called', 'after', 'passing', 'a', 'vector', 'to', 'the', 'function', 'checkcl', 'one_to_max', 'relies', 'on', 'the', 'assumption', 'that', 'cluster_run', 'does', 'not', 'contain', 'any', 'NaN', 'entries', '.'] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L433-L469 |
382 | autokey/autokey | lib/autokey/scripting.py | Window.activate | def activate(self, title, switchDesktop=False, matchClass=False):
"""
Activate the specified window, giving it input focus
Usage: C{window.activate(title, switchDesktop=False, matchClass=False)}
If switchDesktop is False (default), the window will be moved to the current desktop
and activated. Otherwise, switch to the window's current desktop and activate it there.
@param title: window title to match against (as case-insensitive substring match)
@param switchDesktop: whether or not to switch to the window's current desktop
@param matchClass: if True, match on the window class instead of the title
"""
if switchDesktop:
args = ["-a", title]
else:
args = ["-R", title]
if matchClass:
args += ["-x"]
self._run_wmctrl(args) | python | def activate(self, title, switchDesktop=False, matchClass=False):
"""
Activate the specified window, giving it input focus
Usage: C{window.activate(title, switchDesktop=False, matchClass=False)}
If switchDesktop is False (default), the window will be moved to the current desktop
and activated. Otherwise, switch to the window's current desktop and activate it there.
@param title: window title to match against (as case-insensitive substring match)
@param switchDesktop: whether or not to switch to the window's current desktop
@param matchClass: if True, match on the window class instead of the title
"""
if switchDesktop:
args = ["-a", title]
else:
args = ["-R", title]
if matchClass:
args += ["-x"]
self._run_wmctrl(args) | ['def', 'activate', '(', 'self', ',', 'title', ',', 'switchDesktop', '=', 'False', ',', 'matchClass', '=', 'False', ')', ':', 'if', 'switchDesktop', ':', 'args', '=', '[', '"-a"', ',', 'title', ']', 'else', ':', 'args', '=', '[', '"-R"', ',', 'title', ']', 'if', 'matchClass', ':', 'args', '+=', '[', '"-x"', ']', 'self', '.', '_run_wmctrl', '(', 'args', ')'] | Activate the specified window, giving it input focus
Usage: C{window.activate(title, switchDesktop=False, matchClass=False)}
If switchDesktop is False (default), the window will be moved to the current desktop
and activated. Otherwise, switch to the window's current desktop and activate it there.
@param title: window title to match against (as case-insensitive substring match)
@param switchDesktop: whether or not to switch to the window's current desktop
@param matchClass: if True, match on the window class instead of the title | ['Activate', 'the', 'specified', 'window', 'giving', 'it', 'input', 'focus'] | train | https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/scripting.py#L963-L982 |
383 | tmoerman/arboreto | arboreto/core.py | target_gene_indices | def target_gene_indices(gene_names,
target_genes):
"""
:param gene_names: list of gene names.
:param target_genes: either int (the top n), 'all', or a collection (subset of gene_names).
:return: the (column) indices of the target genes in the expression_matrix.
"""
if isinstance(target_genes, list) and len(target_genes) == 0:
return []
if isinstance(target_genes, str) and target_genes.upper() == 'ALL':
return list(range(len(gene_names)))
elif isinstance(target_genes, int):
top_n = target_genes
assert top_n > 0
return list(range(min(top_n, len(gene_names))))
elif isinstance(target_genes, list):
if not target_genes: # target_genes is empty
return target_genes
elif all(isinstance(target_gene, str) for target_gene in target_genes):
return [index for index, gene in enumerate(gene_names) if gene in target_genes]
elif all(isinstance(target_gene, int) for target_gene in target_genes):
return target_genes
else:
raise ValueError("Mixed types in target genes.")
else:
raise ValueError("Unable to interpret target_genes.") | python | def target_gene_indices(gene_names,
target_genes):
"""
:param gene_names: list of gene names.
:param target_genes: either int (the top n), 'all', or a collection (subset of gene_names).
:return: the (column) indices of the target genes in the expression_matrix.
"""
if isinstance(target_genes, list) and len(target_genes) == 0:
return []
if isinstance(target_genes, str) and target_genes.upper() == 'ALL':
return list(range(len(gene_names)))
elif isinstance(target_genes, int):
top_n = target_genes
assert top_n > 0
return list(range(min(top_n, len(gene_names))))
elif isinstance(target_genes, list):
if not target_genes: # target_genes is empty
return target_genes
elif all(isinstance(target_gene, str) for target_gene in target_genes):
return [index for index, gene in enumerate(gene_names) if gene in target_genes]
elif all(isinstance(target_gene, int) for target_gene in target_genes):
return target_genes
else:
raise ValueError("Mixed types in target genes.")
else:
raise ValueError("Unable to interpret target_genes.") | ['def', 'target_gene_indices', '(', 'gene_names', ',', 'target_genes', ')', ':', 'if', 'isinstance', '(', 'target_genes', ',', 'list', ')', 'and', 'len', '(', 'target_genes', ')', '==', '0', ':', 'return', '[', ']', 'if', 'isinstance', '(', 'target_genes', ',', 'str', ')', 'and', 'target_genes', '.', 'upper', '(', ')', '==', "'ALL'", ':', 'return', 'list', '(', 'range', '(', 'len', '(', 'gene_names', ')', ')', ')', 'elif', 'isinstance', '(', 'target_genes', ',', 'int', ')', ':', 'top_n', '=', 'target_genes', 'assert', 'top_n', '>', '0', 'return', 'list', '(', 'range', '(', 'min', '(', 'top_n', ',', 'len', '(', 'gene_names', ')', ')', ')', ')', 'elif', 'isinstance', '(', 'target_genes', ',', 'list', ')', ':', 'if', 'not', 'target_genes', ':', '# target_genes is empty', 'return', 'target_genes', 'elif', 'all', '(', 'isinstance', '(', 'target_gene', ',', 'str', ')', 'for', 'target_gene', 'in', 'target_genes', ')', ':', 'return', '[', 'index', 'for', 'index', ',', 'gene', 'in', 'enumerate', '(', 'gene_names', ')', 'if', 'gene', 'in', 'target_genes', ']', 'elif', 'all', '(', 'isinstance', '(', 'target_gene', ',', 'int', ')', 'for', 'target_gene', 'in', 'target_genes', ')', ':', 'return', 'target_genes', 'else', ':', 'raise', 'ValueError', '(', '"Mixed types in target genes."', ')', 'else', ':', 'raise', 'ValueError', '(', '"Unable to interpret target_genes."', ')'] | :param gene_names: list of gene names.
:param target_genes: either int (the top n), 'all', or a collection (subset of gene_names).
:return: the (column) indices of the target genes in the expression_matrix. | [':', 'param', 'gene_names', ':', 'list', 'of', 'gene', 'names', '.', ':', 'param', 'target_genes', ':', 'either', 'int', '(', 'the', 'top', 'n', ')', 'all', 'or', 'a', 'collection', '(', 'subset', 'of', 'gene_names', ')', '.', ':', 'return', ':', 'the', '(', 'column', ')', 'indices', 'of', 'the', 'target', 'genes', 'in', 'the', 'expression_matrix', '.'] | train | https://github.com/tmoerman/arboreto/blob/3ff7b6f987b32e5774771751dea646fa6feaaa52/arboreto/core.py#L326-L357 |
384 | brbsix/subnuker | subnuker.py | getch | def getch():
"""Request a single character input from the user."""
if sys.platform in ['darwin', 'linux']:
import termios
import tty
file_descriptor = sys.stdin.fileno()
settings = termios.tcgetattr(file_descriptor)
try:
tty.setraw(file_descriptor)
return sys.stdin.read(1)
finally:
termios.tcsetattr(file_descriptor, termios.TCSADRAIN, settings)
elif sys.platform in ['cygwin', 'win32']:
import msvcrt
return msvcrt.getwch() | python | def getch():
"""Request a single character input from the user."""
if sys.platform in ['darwin', 'linux']:
import termios
import tty
file_descriptor = sys.stdin.fileno()
settings = termios.tcgetattr(file_descriptor)
try:
tty.setraw(file_descriptor)
return sys.stdin.read(1)
finally:
termios.tcsetattr(file_descriptor, termios.TCSADRAIN, settings)
elif sys.platform in ['cygwin', 'win32']:
import msvcrt
return msvcrt.getwch() | ['def', 'getch', '(', ')', ':', 'if', 'sys', '.', 'platform', 'in', '[', "'darwin'", ',', "'linux'", ']', ':', 'import', 'termios', 'import', 'tty', 'file_descriptor', '=', 'sys', '.', 'stdin', '.', 'fileno', '(', ')', 'settings', '=', 'termios', '.', 'tcgetattr', '(', 'file_descriptor', ')', 'try', ':', 'tty', '.', 'setraw', '(', 'file_descriptor', ')', 'return', 'sys', '.', 'stdin', '.', 'read', '(', '1', ')', 'finally', ':', 'termios', '.', 'tcsetattr', '(', 'file_descriptor', ',', 'termios', '.', 'TCSADRAIN', ',', 'settings', ')', 'elif', 'sys', '.', 'platform', 'in', '[', "'cygwin'", ',', "'win32'", ']', ':', 'import', 'msvcrt', 'return', 'msvcrt', '.', 'getwch', '(', ')'] | Request a single character input from the user. | ['Request', 'a', 'single', 'character', 'input', 'from', 'the', 'user', '.'] | train | https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L348-L363 |
385 | amperser/proselint | proselint/checks/misc/currency.py | check | def check(text):
"""Check the text."""
err = "misc.currency"
msg = u"Incorrect use of symbols in {}."
symbols = [
"\$[\d]* ?(?:dollars|usd|us dollars)"
]
return existence_check(text, symbols, err, msg) | python | def check(text):
"""Check the text."""
err = "misc.currency"
msg = u"Incorrect use of symbols in {}."
symbols = [
"\$[\d]* ?(?:dollars|usd|us dollars)"
]
return existence_check(text, symbols, err, msg) | ['def', 'check', '(', 'text', ')', ':', 'err', '=', '"misc.currency"', 'msg', '=', 'u"Incorrect use of symbols in {}."', 'symbols', '=', '[', '"\\$[\\d]* ?(?:dollars|usd|us dollars)"', ']', 'return', 'existence_check', '(', 'text', ',', 'symbols', ',', 'err', ',', 'msg', ')'] | Check the text. | ['Check', 'the', 'text', '.'] | train | https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/checks/misc/currency.py#L20-L29 |
386 | maxalbert/tohu | tohu/v4/item_list.py | ItemList.to_csv | def to_csv(self, filename=None, *, fields=None, append=False, header=True, header_prefix='', sep=',', newline='\n'):
"""
Parameters
----------
filename: str or None
The file to which output will be written. By default, any existing content is
overwritten. Use `append=True` to open the file in append mode instead.
If filename is None, the generated CSV output is returned instead of written
to a file.
fields: list or dict
List of field names to export, or dictionary mapping output column names
to attribute names of the generators.
Examples:
fields=['field_name_1', 'field_name_2']
fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'}
append: bool
If `True`, open the file in 'append' mode to avoid overwriting existing content.
Default is `False`, i.e. any existing content will be overwritten.
This argument only has an effect if `filename` is given (i.e. if output happens
to a file instead of returning a CSV string).
header: bool or str or None
If `header=False` or `header=None` then no header line will be written.
If `header` is a string then this string will be used as the header line.
If `header=True` then a header line will be automatically generated from
the field names of the custom generator.
header_prefix: str
If `header=True` then the auto-generated header line will be prefixed
with `header_prefix` (otherwise this argument has no effect). For example,
set `header_prefix='#'` to make the header line start with '#'. Default: ''
sep: str
Field separator to use in the output. Default: ','
newline: str
Line terminator to use in the output. Default: '\n'
Returns
-------
The return value depends on the value of `filename`.
If `filename` is given, writes the output to the file and returns `None`.
If `filename` is `None`, returns a string containing the CSV output.
"""
assert isinstance(append, bool)
if fields is None:
raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list")
if isinstance(fields, (list, tuple)):
fields = {name: name for name in fields}
header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline)
if filename is not None:
# ensure parent directory of output file exits
dirname = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(dirname):
os.makedirs(dirname)
file_or_string = open(filename, 'a' if append else 'w') if (filename is not None) else io.StringIO()
retval = None
attr_getters = [attrgetter(attr_name) for attr_name in fields.values()]
try:
file_or_string.write(header_line)
for x in self.items:
line = sep.join([format(func(x)) for func in attr_getters]) + newline
file_or_string.write(line)
if filename is None:
retval = file_or_string.getvalue()
finally:
file_or_string.close()
return retval | python | def to_csv(self, filename=None, *, fields=None, append=False, header=True, header_prefix='', sep=',', newline='\n'):
"""
Parameters
----------
filename: str or None
The file to which output will be written. By default, any existing content is
overwritten. Use `append=True` to open the file in append mode instead.
If filename is None, the generated CSV output is returned instead of written
to a file.
fields: list or dict
List of field names to export, or dictionary mapping output column names
to attribute names of the generators.
Examples:
fields=['field_name_1', 'field_name_2']
fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'}
append: bool
If `True`, open the file in 'append' mode to avoid overwriting existing content.
Default is `False`, i.e. any existing content will be overwritten.
This argument only has an effect if `filename` is given (i.e. if output happens
to a file instead of returning a CSV string).
header: bool or str or None
If `header=False` or `header=None` then no header line will be written.
If `header` is a string then this string will be used as the header line.
If `header=True` then a header line will be automatically generated from
the field names of the custom generator.
header_prefix: str
If `header=True` then the auto-generated header line will be prefixed
with `header_prefix` (otherwise this argument has no effect). For example,
set `header_prefix='#'` to make the header line start with '#'. Default: ''
sep: str
Field separator to use in the output. Default: ','
newline: str
Line terminator to use in the output. Default: '\n'
Returns
-------
The return value depends on the value of `filename`.
If `filename` is given, writes the output to the file and returns `None`.
If `filename` is `None`, returns a string containing the CSV output.
"""
assert isinstance(append, bool)
if fields is None:
raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list")
if isinstance(fields, (list, tuple)):
fields = {name: name for name in fields}
header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline)
if filename is not None:
# ensure parent directory of output file exits
dirname = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(dirname):
os.makedirs(dirname)
file_or_string = open(filename, 'a' if append else 'w') if (filename is not None) else io.StringIO()
retval = None
attr_getters = [attrgetter(attr_name) for attr_name in fields.values()]
try:
file_or_string.write(header_line)
for x in self.items:
line = sep.join([format(func(x)) for func in attr_getters]) + newline
file_or_string.write(line)
if filename is None:
retval = file_or_string.getvalue()
finally:
file_or_string.close()
return retval | ['def', 'to_csv', '(', 'self', ',', 'filename', '=', 'None', ',', '*', ',', 'fields', '=', 'None', ',', 'append', '=', 'False', ',', 'header', '=', 'True', ',', 'header_prefix', '=', "''", ',', 'sep', '=', "','", ',', 'newline', '=', "'\\n'", ')', ':', 'assert', 'isinstance', '(', 'append', ',', 'bool', ')', 'if', 'fields', 'is', 'None', ':', 'raise', 'NotImplementedError', '(', '"TODO: derive field names automatically from the generator which produced this item list"', ')', 'if', 'isinstance', '(', 'fields', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'fields', '=', '{', 'name', ':', 'name', 'for', 'name', 'in', 'fields', '}', 'header_line', '=', '_generate_csv_header_line', '(', 'header', '=', 'header', ',', 'header_prefix', '=', 'header_prefix', ',', 'header_names', '=', 'fields', '.', 'keys', '(', ')', ',', 'sep', '=', 'sep', ',', 'newline', '=', 'newline', ')', 'if', 'filename', 'is', 'not', 'None', ':', '# ensure parent directory of output file exits', 'dirname', '=', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'abspath', '(', 'filename', ')', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'dirname', ')', ':', 'os', '.', 'makedirs', '(', 'dirname', ')', 'file_or_string', '=', 'open', '(', 'filename', ',', "'a'", 'if', 'append', 'else', "'w'", ')', 'if', '(', 'filename', 'is', 'not', 'None', ')', 'else', 'io', '.', 'StringIO', '(', ')', 'retval', '=', 'None', 'attr_getters', '=', '[', 'attrgetter', '(', 'attr_name', ')', 'for', 'attr_name', 'in', 'fields', '.', 'values', '(', ')', ']', 'try', ':', 'file_or_string', '.', 'write', '(', 'header_line', ')', 'for', 'x', 'in', 'self', '.', 'items', ':', 'line', '=', 'sep', '.', 'join', '(', '[', 'format', '(', 'func', '(', 'x', ')', ')', 'for', 'func', 'in', 'attr_getters', ']', ')', '+', 'newline', 'file_or_string', '.', 'write', '(', 'line', ')', 'if', 'filename', 'is', 'None', ':', 'retval', '=', 'file_or_string', '.', 'getvalue', '(', ')', 'finally', ':', 'file_or_string', '.', 'close', '(', ')', 'return', 'retval'] | Parameters
----------
filename: str or None
The file to which output will be written. By default, any existing content is
overwritten. Use `append=True` to open the file in append mode instead.
If filename is None, the generated CSV output is returned instead of written
to a file.
fields: list or dict
List of field names to export, or dictionary mapping output column names
to attribute names of the generators.
Examples:
fields=['field_name_1', 'field_name_2']
fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'}
append: bool
If `True`, open the file in 'append' mode to avoid overwriting existing content.
Default is `False`, i.e. any existing content will be overwritten.
This argument only has an effect if `filename` is given (i.e. if output happens
to a file instead of returning a CSV string).
header: bool or str or None
If `header=False` or `header=None` then no header line will be written.
If `header` is a string then this string will be used as the header line.
If `header=True` then a header line will be automatically generated from
the field names of the custom generator.
header_prefix: str
If `header=True` then the auto-generated header line will be prefixed
with `header_prefix` (otherwise this argument has no effect). For example,
set `header_prefix='#'` to make the header line start with '#'. Default: ''
sep: str
Field separator to use in the output. Default: ','
newline: str
Line terminator to use in the output. Default: '\n'
Returns
-------
The return value depends on the value of `filename`.
If `filename` is given, writes the output to the file and returns `None`.
If `filename` is `None`, returns a string containing the CSV output. | ['Parameters', '----------', 'filename', ':', 'str', 'or', 'None', 'The', 'file', 'to', 'which', 'output', 'will', 'be', 'written', '.', 'By', 'default', 'any', 'existing', 'content', 'is', 'overwritten', '.', 'Use', 'append', '=', 'True', 'to', 'open', 'the', 'file', 'in', 'append', 'mode', 'instead', '.', 'If', 'filename', 'is', 'None', 'the', 'generated', 'CSV', 'output', 'is', 'returned', 'instead', 'of', 'written', 'to', 'a', 'file', '.', 'fields', ':', 'list', 'or', 'dict', 'List', 'of', 'field', 'names', 'to', 'export', 'or', 'dictionary', 'mapping', 'output', 'column', 'names', 'to', 'attribute', 'names', 'of', 'the', 'generators', '.'] | train | https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/item_list.py#L132-L206 |
387 | 72squared/redpipe | redpipe/keyspaces.py | String.incrby | def incrby(self, name, amount=1):
"""
increment the value for key by value: int
:param name: str the name of the redis key
:param amount: int
:return: Future()
"""
with self.pipe as pipe:
return pipe.incrby(self.redis_key(name), amount=amount) | python | def incrby(self, name, amount=1):
"""
increment the value for key by value: int
:param name: str the name of the redis key
:param amount: int
:return: Future()
"""
with self.pipe as pipe:
return pipe.incrby(self.redis_key(name), amount=amount) | ['def', 'incrby', '(', 'self', ',', 'name', ',', 'amount', '=', '1', ')', ':', 'with', 'self', '.', 'pipe', 'as', 'pipe', ':', 'return', 'pipe', '.', 'incrby', '(', 'self', '.', 'redis_key', '(', 'name', ')', ',', 'amount', '=', 'amount', ')'] | increment the value for key by value: int
:param name: str the name of the redis key
:param amount: int
:return: Future() | ['increment', 'the', 'value', 'for', 'key', 'by', 'value', ':', 'int'] | train | https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L664-L673 |
388 | spulec/moto | moto/dynamodb/models.py | DynamoType.compare | def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.value, *range_values) | python | def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.value, *range_values) | ['def', 'compare', '(', 'self', ',', 'range_comparison', ',', 'range_objs', ')', ':', 'range_values', '=', '[', 'obj', '.', 'value', 'for', 'obj', 'in', 'range_objs', ']', 'comparison_func', '=', 'get_comparison_func', '(', 'range_comparison', ')', 'return', 'comparison_func', '(', 'self', '.', 'value', ',', '*', 'range_values', ')'] | Compares this type against comparison filters | ['Compares', 'this', 'type', 'against', 'comparison', 'filters'] | train | https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/dynamodb/models.py#L47-L53 |
389 | androguard/androguard | androguard/core/bytecodes/dvm.py | MapList.show | def show(self):
"""
Print with a pretty display the MapList object
"""
bytecode._Print("MAP_LIST SIZE", self.size)
for i in self.map_item:
if i.item != self:
# FIXME this does not work for CodeItems!
# as we do not have the method analysis here...
i.show() | python | def show(self):
"""
Print with a pretty display the MapList object
"""
bytecode._Print("MAP_LIST SIZE", self.size)
for i in self.map_item:
if i.item != self:
# FIXME this does not work for CodeItems!
# as we do not have the method analysis here...
i.show() | ['def', 'show', '(', 'self', ')', ':', 'bytecode', '.', '_Print', '(', '"MAP_LIST SIZE"', ',', 'self', '.', 'size', ')', 'for', 'i', 'in', 'self', '.', 'map_item', ':', 'if', 'i', '.', 'item', '!=', 'self', ':', '# FIXME this does not work for CodeItems!', '# as we do not have the method analysis here...', 'i', '.', 'show', '(', ')'] | Print with a pretty display the MapList object | ['Print', 'with', 'a', 'pretty', 'display', 'the', 'MapList', 'object'] | train | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L7550-L7559 |
390 | openego/ding0 | ding0/grid/mv_grid/mv_connect.py | disconnect_node | def disconnect_node(node, target_obj_result, graph, debug):
""" Disconnects `node` from `target_obj`
Args
----
node: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
target_obj_result: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
debug: bool
If True, information is printed during process
"""
# backup kind and type of branch
branch_kind = graph.adj[node][target_obj_result]['branch'].kind
branch_type = graph.adj[node][target_obj_result]['branch'].type
branch_ring = graph.adj[node][target_obj_result]['branch'].ring
graph.remove_edge(node, target_obj_result)
if isinstance(target_obj_result, MVCableDistributorDing0):
neighbor_nodes = list(graph.neighbors(target_obj_result))
if len(neighbor_nodes) == 2:
node.grid.remove_cable_distributor(target_obj_result)
branch_length = calc_geo_dist_vincenty(neighbor_nodes[0], neighbor_nodes[1])
graph.add_edge(neighbor_nodes[0], neighbor_nodes[1], branch=BranchDing0(length=branch_length,
kind=branch_kind,
type=branch_type,
ring=branch_ring))
if debug:
logger.debug('disconnect edge {0}-{1}'.format(node, target_obj_result)) | python | def disconnect_node(node, target_obj_result, graph, debug):
""" Disconnects `node` from `target_obj`
Args
----
node: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
target_obj_result: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
debug: bool
If True, information is printed during process
"""
# backup kind and type of branch
branch_kind = graph.adj[node][target_obj_result]['branch'].kind
branch_type = graph.adj[node][target_obj_result]['branch'].type
branch_ring = graph.adj[node][target_obj_result]['branch'].ring
graph.remove_edge(node, target_obj_result)
if isinstance(target_obj_result, MVCableDistributorDing0):
neighbor_nodes = list(graph.neighbors(target_obj_result))
if len(neighbor_nodes) == 2:
node.grid.remove_cable_distributor(target_obj_result)
branch_length = calc_geo_dist_vincenty(neighbor_nodes[0], neighbor_nodes[1])
graph.add_edge(neighbor_nodes[0], neighbor_nodes[1], branch=BranchDing0(length=branch_length,
kind=branch_kind,
type=branch_type,
ring=branch_ring))
if debug:
logger.debug('disconnect edge {0}-{1}'.format(node, target_obj_result)) | ['def', 'disconnect_node', '(', 'node', ',', 'target_obj_result', ',', 'graph', ',', 'debug', ')', ':', '# backup kind and type of branch', 'branch_kind', '=', 'graph', '.', 'adj', '[', 'node', ']', '[', 'target_obj_result', ']', '[', "'branch'", ']', '.', 'kind', 'branch_type', '=', 'graph', '.', 'adj', '[', 'node', ']', '[', 'target_obj_result', ']', '[', "'branch'", ']', '.', 'type', 'branch_ring', '=', 'graph', '.', 'adj', '[', 'node', ']', '[', 'target_obj_result', ']', '[', "'branch'", ']', '.', 'ring', 'graph', '.', 'remove_edge', '(', 'node', ',', 'target_obj_result', ')', 'if', 'isinstance', '(', 'target_obj_result', ',', 'MVCableDistributorDing0', ')', ':', 'neighbor_nodes', '=', 'list', '(', 'graph', '.', 'neighbors', '(', 'target_obj_result', ')', ')', 'if', 'len', '(', 'neighbor_nodes', ')', '==', '2', ':', 'node', '.', 'grid', '.', 'remove_cable_distributor', '(', 'target_obj_result', ')', 'branch_length', '=', 'calc_geo_dist_vincenty', '(', 'neighbor_nodes', '[', '0', ']', ',', 'neighbor_nodes', '[', '1', ']', ')', 'graph', '.', 'add_edge', '(', 'neighbor_nodes', '[', '0', ']', ',', 'neighbor_nodes', '[', '1', ']', ',', 'branch', '=', 'BranchDing0', '(', 'length', '=', 'branch_length', ',', 'kind', '=', 'branch_kind', ',', 'type', '=', 'branch_type', ',', 'ring', '=', 'branch_ring', ')', ')', 'if', 'debug', ':', 'logger', '.', 'debug', '(', "'disconnect edge {0}-{1}'", '.', 'format', '(', 'node', ',', 'target_obj_result', ')', ')'] | Disconnects `node` from `target_obj`
Args
----
node: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
target_obj_result: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
debug: bool
If True, information is printed during process | ['Disconnects', 'node', 'from', 'target_obj'] | train | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/mv_connect.py#L543-L580 |
391 | jameslyons/python_speech_features | python_speech_features/base.py | delta | def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = len(feat)
denominator = 2 * sum([i**2 for i in range(1, N+1)])
delta_feat = numpy.empty_like(feat)
padded = numpy.pad(feat, ((N, N), (0, 0)), mode='edge') # padded version of feat
for t in range(NUMFRAMES):
delta_feat[t] = numpy.dot(numpy.arange(-N, N+1), padded[t : t+2*N+1]) / denominator # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat | python | def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = len(feat)
denominator = 2 * sum([i**2 for i in range(1, N+1)])
delta_feat = numpy.empty_like(feat)
padded = numpy.pad(feat, ((N, N), (0, 0)), mode='edge') # padded version of feat
for t in range(NUMFRAMES):
delta_feat[t] = numpy.dot(numpy.arange(-N, N+1), padded[t : t+2*N+1]) / denominator # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat | ['def', 'delta', '(', 'feat', ',', 'N', ')', ':', 'if', 'N', '<', '1', ':', 'raise', 'ValueError', '(', "'N must be an integer >= 1'", ')', 'NUMFRAMES', '=', 'len', '(', 'feat', ')', 'denominator', '=', '2', '*', 'sum', '(', '[', 'i', '**', '2', 'for', 'i', 'in', 'range', '(', '1', ',', 'N', '+', '1', ')', ']', ')', 'delta_feat', '=', 'numpy', '.', 'empty_like', '(', 'feat', ')', 'padded', '=', 'numpy', '.', 'pad', '(', 'feat', ',', '(', '(', 'N', ',', 'N', ')', ',', '(', '0', ',', '0', ')', ')', ',', 'mode', '=', "'edge'", ')', '# padded version of feat', 'for', 't', 'in', 'range', '(', 'NUMFRAMES', ')', ':', 'delta_feat', '[', 't', ']', '=', 'numpy', '.', 'dot', '(', 'numpy', '.', 'arange', '(', '-', 'N', ',', 'N', '+', '1', ')', ',', 'padded', '[', 't', ':', 't', '+', '2', '*', 'N', '+', '1', ']', ')', '/', 'denominator', '# [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]', 'return', 'delta_feat'] | Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector. | ['Compute', 'delta', 'features', 'from', 'a', 'feature', 'vector', 'sequence', '.'] | train | https://github.com/jameslyons/python_speech_features/blob/40c590269b57c64a8c1f1ddaaff2162008d1850c/python_speech_features/base.py#L195-L210 |
392 | richardkiss/pycoin | pycoin/key/HierarchicalKey.py | HierarchicalKey.subkeys | def subkeys(self, path):
"""
A generalized form that can return multiple subkeys.
"""
for _ in subpaths_for_path_range(path, hardening_chars="'pH"):
yield self.subkey_for_path(_) | python | def subkeys(self, path):
"""
A generalized form that can return multiple subkeys.
"""
for _ in subpaths_for_path_range(path, hardening_chars="'pH"):
yield self.subkey_for_path(_) | ['def', 'subkeys', '(', 'self', ',', 'path', ')', ':', 'for', '_', 'in', 'subpaths_for_path_range', '(', 'path', ',', 'hardening_chars', '=', '"\'pH"', ')', ':', 'yield', 'self', '.', 'subkey_for_path', '(', '_', ')'] | A generalized form that can return multiple subkeys. | ['A', 'generalized', 'form', 'that', 'can', 'return', 'multiple', 'subkeys', '.'] | train | https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/key/HierarchicalKey.py#L6-L11 |
393 | bitlabstudio/cmsplugin-image-gallery | image_gallery/templatetags/image_gallery_tags.py | render_pictures | def render_pictures(context, selection='recent', amount=3):
"""Template tag to render a list of pictures."""
pictures = Image.objects.filter(
folder__id__in=Gallery.objects.filter(is_published=True).values_list(
'folder__pk', flat=True))
if selection == 'recent':
context.update({
'pictures': pictures.order_by('-uploaded_at')[:amount]
})
elif selection == 'random':
context.update({
'pictures': pictures.order_by('?')[:amount]
})
else:
return None
return context | python | def render_pictures(context, selection='recent', amount=3):
"""Template tag to render a list of pictures."""
pictures = Image.objects.filter(
folder__id__in=Gallery.objects.filter(is_published=True).values_list(
'folder__pk', flat=True))
if selection == 'recent':
context.update({
'pictures': pictures.order_by('-uploaded_at')[:amount]
})
elif selection == 'random':
context.update({
'pictures': pictures.order_by('?')[:amount]
})
else:
return None
return context | ['def', 'render_pictures', '(', 'context', ',', 'selection', '=', "'recent'", ',', 'amount', '=', '3', ')', ':', 'pictures', '=', 'Image', '.', 'objects', '.', 'filter', '(', 'folder__id__in', '=', 'Gallery', '.', 'objects', '.', 'filter', '(', 'is_published', '=', 'True', ')', '.', 'values_list', '(', "'folder__pk'", ',', 'flat', '=', 'True', ')', ')', 'if', 'selection', '==', "'recent'", ':', 'context', '.', 'update', '(', '{', "'pictures'", ':', 'pictures', '.', 'order_by', '(', "'-uploaded_at'", ')', '[', ':', 'amount', ']', '}', ')', 'elif', 'selection', '==', "'random'", ':', 'context', '.', 'update', '(', '{', "'pictures'", ':', 'pictures', '.', 'order_by', '(', "'?'", ')', '[', ':', 'amount', ']', '}', ')', 'else', ':', 'return', 'None', 'return', 'context'] | Template tag to render a list of pictures. | ['Template', 'tag', 'to', 'render', 'a', 'list', 'of', 'pictures', '.'] | train | https://github.com/bitlabstudio/cmsplugin-image-gallery/blob/f16a2d5d0a6fde469bc07436ff0cd84af2c78e5c/image_gallery/templatetags/image_gallery_tags.py#L12-L27 |
394 | roboogle/gtkmvc3 | gtkmvco/gtkmvc3/support/utils.py | __nt_relpath | def __nt_relpath(path, start=os.curdir):
"""Return a relative version of a path"""
if not path: raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" \
% (path, start))
else: raise ValueError("path is on drive %s, start on drive %s" \
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else: i += 1
pass
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list: return os.curdir
return os.path.join(*rel_list) | python | def __nt_relpath(path, start=os.curdir):
"""Return a relative version of a path"""
if not path: raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" \
% (path, start))
else: raise ValueError("path is on drive %s, start on drive %s" \
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else: i += 1
pass
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list: return os.curdir
return os.path.join(*rel_list) | ['def', '__nt_relpath', '(', 'path', ',', 'start', '=', 'os', '.', 'curdir', ')', ':', 'if', 'not', 'path', ':', 'raise', 'ValueError', '(', '"no path specified"', ')', 'start_list', '=', 'os', '.', 'path', '.', 'abspath', '(', 'start', ')', '.', 'split', '(', 'os', '.', 'sep', ')', 'path_list', '=', 'os', '.', 'path', '.', 'abspath', '(', 'path', ')', '.', 'split', '(', 'os', '.', 'sep', ')', 'if', 'start_list', '[', '0', ']', '.', 'lower', '(', ')', '!=', 'path_list', '[', '0', ']', '.', 'lower', '(', ')', ':', 'unc_path', ',', 'rest', '=', 'os', '.', 'path', '.', 'splitunc', '(', 'path', ')', 'unc_start', ',', 'rest', '=', 'os', '.', 'path', '.', 'splitunc', '(', 'start', ')', 'if', 'bool', '(', 'unc_path', ')', '^', 'bool', '(', 'unc_start', ')', ':', 'raise', 'ValueError', '(', '"Cannot mix UNC and non-UNC paths (%s and %s)"', '%', '(', 'path', ',', 'start', ')', ')', 'else', ':', 'raise', 'ValueError', '(', '"path is on drive %s, start on drive %s"', '%', '(', 'path_list', '[', '0', ']', ',', 'start_list', '[', '0', ']', ')', ')', '# Work out how much of the filepath is shared by start and path.', 'for', 'i', 'in', 'range', '(', 'min', '(', 'len', '(', 'start_list', ')', ',', 'len', '(', 'path_list', ')', ')', ')', ':', 'if', 'start_list', '[', 'i', ']', '.', 'lower', '(', ')', '!=', 'path_list', '[', 'i', ']', '.', 'lower', '(', ')', ':', 'break', 'else', ':', 'i', '+=', '1', 'pass', 'rel_list', '=', '[', 'os', '.', 'pardir', ']', '*', '(', 'len', '(', 'start_list', ')', '-', 'i', ')', '+', 'path_list', '[', 'i', ':', ']', 'if', 'not', 'rel_list', ':', 'return', 'os', '.', 'curdir', 'return', 'os', '.', 'path', '.', 'join', '(', '*', 'rel_list', ')'] | Return a relative version of a path | ['Return', 'a', 'relative', 'version', 'of', 'a', 'path'] | train | https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/support/utils.py#L112-L136 |
395 | redodo/formats | formats/banks.py | FormatBank.convert | def convert(self, type_from, type_to, data):
"""Parsers data from with one format and composes with another.
:param type_from: The unique name of the format to parse with
:param type_to: The unique name of the format to compose with
:param data: The text to convert
"""
try:
return self.compose(type_to, self.parse(type_from, data))
except Exception as e:
raise ValueError(
"Couldn't convert '{from_}' to '{to}'. Possibly "
"because the parser of '{from_}' generates a "
"data structure incompatible with the composer "
"of '{to}'. This is the original error: \n\n"
"{error}: {message}".format(from_=type_from, to=type_to,
error=e.__class__.__name__,
message=e.message)) | python | def convert(self, type_from, type_to, data):
"""Parsers data from with one format and composes with another.
:param type_from: The unique name of the format to parse with
:param type_to: The unique name of the format to compose with
:param data: The text to convert
"""
try:
return self.compose(type_to, self.parse(type_from, data))
except Exception as e:
raise ValueError(
"Couldn't convert '{from_}' to '{to}'. Possibly "
"because the parser of '{from_}' generates a "
"data structure incompatible with the composer "
"of '{to}'. This is the original error: \n\n"
"{error}: {message}".format(from_=type_from, to=type_to,
error=e.__class__.__name__,
message=e.message)) | ['def', 'convert', '(', 'self', ',', 'type_from', ',', 'type_to', ',', 'data', ')', ':', 'try', ':', 'return', 'self', '.', 'compose', '(', 'type_to', ',', 'self', '.', 'parse', '(', 'type_from', ',', 'data', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'ValueError', '(', '"Couldn\'t convert \'{from_}\' to \'{to}\'. Possibly "', '"because the parser of \'{from_}\' generates a "', '"data structure incompatible with the composer "', '"of \'{to}\'. This is the original error: \\n\\n"', '"{error}: {message}"', '.', 'format', '(', 'from_', '=', 'type_from', ',', 'to', '=', 'type_to', ',', 'error', '=', 'e', '.', '__class__', '.', '__name__', ',', 'message', '=', 'e', '.', 'message', ')', ')'] | Parsers data from with one format and composes with another.
:param type_from: The unique name of the format to parse with
:param type_to: The unique name of the format to compose with
:param data: The text to convert | ['Parsers', 'data', 'from', 'with', 'one', 'format', 'and', 'composes', 'with', 'another', '.'] | train | https://github.com/redodo/formats/blob/5bc7a79a2c93ef895534edbbf83f1efe2f62e081/formats/banks.py#L122-L139 |
396 | kensho-technologies/graphql-compiler | graphql_compiler/compiler/expressions.py | BinaryComposition.to_match | def to_match(self):
"""Return a unicode object with the MATCH representation of this BinaryComposition."""
self.validate()
# The MATCH versions of some operators require an inverted order of arguments.
# pylint: disable=unused-variable
regular_operator_format = '(%(left)s %(operator)s %(right)s)'
inverted_operator_format = '(%(right)s %(operator)s %(left)s)' # noqa
intersects_operator_format = '(%(operator)s(%(left)s, %(right)s).asList().size() > 0)'
# pylint: enable=unused-variable
# Null literals use 'is/is not' as (in)equality operators, while other values use '=/<>'.
if any((isinstance(self.left, Literal) and self.left.value is None,
isinstance(self.right, Literal) and self.right.value is None)):
translation_table = {
u'=': (u'IS', regular_operator_format),
u'!=': (u'IS NOT', regular_operator_format),
}
else:
translation_table = {
u'=': (u'=', regular_operator_format),
u'!=': (u'<>', regular_operator_format),
u'>=': (u'>=', regular_operator_format),
u'<=': (u'<=', regular_operator_format),
u'>': (u'>', regular_operator_format),
u'<': (u'<', regular_operator_format),
u'+': (u'+', regular_operator_format),
u'||': (u'OR', regular_operator_format),
u'&&': (u'AND', regular_operator_format),
u'contains': (u'CONTAINS', regular_operator_format),
u'intersects': (u'intersect', intersects_operator_format),
u'has_substring': (None, None), # must be lowered into compatible form using LIKE
# MATCH-specific operators
u'LIKE': (u'LIKE', regular_operator_format),
u'INSTANCEOF': (u'INSTANCEOF', regular_operator_format),
}
match_operator, format_spec = translation_table.get(self.operator, (None, None))
if not match_operator:
raise AssertionError(u'Unrecognized operator used: '
u'{} {}'.format(self.operator, self))
return format_spec % dict(operator=match_operator,
left=self.left.to_match(),
right=self.right.to_match()) | python | def to_match(self):
"""Return a unicode object with the MATCH representation of this BinaryComposition."""
self.validate()
# The MATCH versions of some operators require an inverted order of arguments.
# pylint: disable=unused-variable
regular_operator_format = '(%(left)s %(operator)s %(right)s)'
inverted_operator_format = '(%(right)s %(operator)s %(left)s)' # noqa
intersects_operator_format = '(%(operator)s(%(left)s, %(right)s).asList().size() > 0)'
# pylint: enable=unused-variable
# Null literals use 'is/is not' as (in)equality operators, while other values use '=/<>'.
if any((isinstance(self.left, Literal) and self.left.value is None,
isinstance(self.right, Literal) and self.right.value is None)):
translation_table = {
u'=': (u'IS', regular_operator_format),
u'!=': (u'IS NOT', regular_operator_format),
}
else:
translation_table = {
u'=': (u'=', regular_operator_format),
u'!=': (u'<>', regular_operator_format),
u'>=': (u'>=', regular_operator_format),
u'<=': (u'<=', regular_operator_format),
u'>': (u'>', regular_operator_format),
u'<': (u'<', regular_operator_format),
u'+': (u'+', regular_operator_format),
u'||': (u'OR', regular_operator_format),
u'&&': (u'AND', regular_operator_format),
u'contains': (u'CONTAINS', regular_operator_format),
u'intersects': (u'intersect', intersects_operator_format),
u'has_substring': (None, None), # must be lowered into compatible form using LIKE
# MATCH-specific operators
u'LIKE': (u'LIKE', regular_operator_format),
u'INSTANCEOF': (u'INSTANCEOF', regular_operator_format),
}
match_operator, format_spec = translation_table.get(self.operator, (None, None))
if not match_operator:
raise AssertionError(u'Unrecognized operator used: '
u'{} {}'.format(self.operator, self))
return format_spec % dict(operator=match_operator,
left=self.left.to_match(),
right=self.right.to_match()) | ['def', 'to_match', '(', 'self', ')', ':', 'self', '.', 'validate', '(', ')', '# The MATCH versions of some operators require an inverted order of arguments.', '# pylint: disable=unused-variable', 'regular_operator_format', '=', "'(%(left)s %(operator)s %(right)s)'", 'inverted_operator_format', '=', "'(%(right)s %(operator)s %(left)s)'", '# noqa', 'intersects_operator_format', '=', "'(%(operator)s(%(left)s, %(right)s).asList().size() > 0)'", '# pylint: enable=unused-variable', "# Null literals use 'is/is not' as (in)equality operators, while other values use '=/<>'.", 'if', 'any', '(', '(', 'isinstance', '(', 'self', '.', 'left', ',', 'Literal', ')', 'and', 'self', '.', 'left', '.', 'value', 'is', 'None', ',', 'isinstance', '(', 'self', '.', 'right', ',', 'Literal', ')', 'and', 'self', '.', 'right', '.', 'value', 'is', 'None', ')', ')', ':', 'translation_table', '=', '{', "u'='", ':', '(', "u'IS'", ',', 'regular_operator_format', ')', ',', "u'!='", ':', '(', "u'IS NOT'", ',', 'regular_operator_format', ')', ',', '}', 'else', ':', 'translation_table', '=', '{', "u'='", ':', '(', "u'='", ',', 'regular_operator_format', ')', ',', "u'!='", ':', '(', "u'<>'", ',', 'regular_operator_format', ')', ',', "u'>='", ':', '(', "u'>='", ',', 'regular_operator_format', ')', ',', "u'<='", ':', '(', "u'<='", ',', 'regular_operator_format', ')', ',', "u'>'", ':', '(', "u'>'", ',', 'regular_operator_format', ')', ',', "u'<'", ':', '(', "u'<'", ',', 'regular_operator_format', ')', ',', "u'+'", ':', '(', "u'+'", ',', 'regular_operator_format', ')', ',', "u'||'", ':', '(', "u'OR'", ',', 'regular_operator_format', ')', ',', "u'&&'", ':', '(', "u'AND'", ',', 'regular_operator_format', ')', ',', "u'contains'", ':', '(', "u'CONTAINS'", ',', 'regular_operator_format', ')', ',', "u'intersects'", ':', '(', "u'intersect'", ',', 'intersects_operator_format', ')', ',', "u'has_substring'", ':', '(', 'None', ',', 'None', ')', ',', '# must be lowered into compatible form using LIKE', '# MATCH-specific operators', "u'LIKE'", ':', '(', "u'LIKE'", ',', 'regular_operator_format', ')', ',', "u'INSTANCEOF'", ':', '(', "u'INSTANCEOF'", ',', 'regular_operator_format', ')', ',', '}', 'match_operator', ',', 'format_spec', '=', 'translation_table', '.', 'get', '(', 'self', '.', 'operator', ',', '(', 'None', ',', 'None', ')', ')', 'if', 'not', 'match_operator', ':', 'raise', 'AssertionError', '(', "u'Unrecognized operator used: '", "u'{} {}'", '.', 'format', '(', 'self', '.', 'operator', ',', 'self', ')', ')', 'return', 'format_spec', '%', 'dict', '(', 'operator', '=', 'match_operator', ',', 'left', '=', 'self', '.', 'left', '.', 'to_match', '(', ')', ',', 'right', '=', 'self', '.', 'right', '.', 'to_match', '(', ')', ')'] | Return a unicode object with the MATCH representation of this BinaryComposition. | ['Return', 'a', 'unicode', 'object', 'with', 'the', 'MATCH', 'representation', 'of', 'this', 'BinaryComposition', '.'] | train | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/expressions.py#L791-L836 |
397 | coursera/courseraoauth2client | courseraoauth2client/commands/version.py | parser | def parser(subparsers):
"Build an argparse argument parser to parse the command line."
# create the parser for the version subcommand.
parser_version = subparsers.add_parser(
'version',
help="Output the version of %(prog)s to the console.")
parser_version.set_defaults(func=command_version)
return parser_version | python | def parser(subparsers):
"Build an argparse argument parser to parse the command line."
# create the parser for the version subcommand.
parser_version = subparsers.add_parser(
'version',
help="Output the version of %(prog)s to the console.")
parser_version.set_defaults(func=command_version)
return parser_version | ['def', 'parser', '(', 'subparsers', ')', ':', '# create the parser for the version subcommand.', 'parser_version', '=', 'subparsers', '.', 'add_parser', '(', "'version'", ',', 'help', '=', '"Output the version of %(prog)s to the console."', ')', 'parser_version', '.', 'set_defaults', '(', 'func', '=', 'command_version', ')', 'return', 'parser_version'] | Build an argparse argument parser to parse the command line. | ['Build', 'an', 'argparse', 'argument', 'parser', 'to', 'parse', 'the', 'command', 'line', '.'] | train | https://github.com/coursera/courseraoauth2client/blob/4edd991defe26bfc768ab28a30368cace40baf44/courseraoauth2client/commands/version.py#L53-L62 |
398 | orb-framework/orb | orb/core/database.py | Database.addNamespace | def addNamespace(self, namespace, **context):
"""
Creates a new namespace within this database.
:param namespace: <str>
"""
self.connection().addNamespace(namespace, orb.Context(**context)) | python | def addNamespace(self, namespace, **context):
"""
Creates a new namespace within this database.
:param namespace: <str>
"""
self.connection().addNamespace(namespace, orb.Context(**context)) | ['def', 'addNamespace', '(', 'self', ',', 'namespace', ',', '*', '*', 'context', ')', ':', 'self', '.', 'connection', '(', ')', '.', 'addNamespace', '(', 'namespace', ',', 'orb', '.', 'Context', '(', '*', '*', 'context', ')', ')'] | Creates a new namespace within this database.
:param namespace: <str> | ['Creates', 'a', 'new', 'namespace', 'within', 'this', 'database', '.'] | train | https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/database.py#L77-L83 |
399 | mabuchilab/QNET | src/qnet/printing/asciiprinter.py | QnetAsciiPrinter._render_op | def _render_op(
self, identifier, hs=None, dagger=False, args=None, superop=False):
"""Render an operator
Args:
identifier (str or SymbolicLabelBase): The identifier (name/symbol)
of the operator. May include a subscript, denoted by '_'.
hs (qnet.algebra.hilbert_space_algebra.HilbertSpace): The Hilbert
space in which the operator is defined
dagger (bool): Whether the operator should be daggered
args (list): A list of expressions that will be rendered with
:meth:`doprint`, joined with commas, enclosed in parenthesis
superop (bool): Whether the operator is a super-operator
"""
hs_label = None
if hs is not None and self._settings['show_hs_label']:
hs_label = self._render_hs_label(hs)
name, total_subscript, total_superscript, args_str \
= self._split_op(identifier, hs_label, dagger, args)
res = name
if len(total_subscript) > 0:
res += "_" + total_subscript
if len(total_superscript) > 0:
res += "^" + total_superscript
if len(args_str) > 0:
res += args_str
return res | python | def _render_op(
self, identifier, hs=None, dagger=False, args=None, superop=False):
"""Render an operator
Args:
identifier (str or SymbolicLabelBase): The identifier (name/symbol)
of the operator. May include a subscript, denoted by '_'.
hs (qnet.algebra.hilbert_space_algebra.HilbertSpace): The Hilbert
space in which the operator is defined
dagger (bool): Whether the operator should be daggered
args (list): A list of expressions that will be rendered with
:meth:`doprint`, joined with commas, enclosed in parenthesis
superop (bool): Whether the operator is a super-operator
"""
hs_label = None
if hs is not None and self._settings['show_hs_label']:
hs_label = self._render_hs_label(hs)
name, total_subscript, total_superscript, args_str \
= self._split_op(identifier, hs_label, dagger, args)
res = name
if len(total_subscript) > 0:
res += "_" + total_subscript
if len(total_superscript) > 0:
res += "^" + total_superscript
if len(args_str) > 0:
res += args_str
return res | ['def', '_render_op', '(', 'self', ',', 'identifier', ',', 'hs', '=', 'None', ',', 'dagger', '=', 'False', ',', 'args', '=', 'None', ',', 'superop', '=', 'False', ')', ':', 'hs_label', '=', 'None', 'if', 'hs', 'is', 'not', 'None', 'and', 'self', '.', '_settings', '[', "'show_hs_label'", ']', ':', 'hs_label', '=', 'self', '.', '_render_hs_label', '(', 'hs', ')', 'name', ',', 'total_subscript', ',', 'total_superscript', ',', 'args_str', '=', 'self', '.', '_split_op', '(', 'identifier', ',', 'hs_label', ',', 'dagger', ',', 'args', ')', 'res', '=', 'name', 'if', 'len', '(', 'total_subscript', ')', '>', '0', ':', 'res', '+=', '"_"', '+', 'total_subscript', 'if', 'len', '(', 'total_superscript', ')', '>', '0', ':', 'res', '+=', '"^"', '+', 'total_superscript', 'if', 'len', '(', 'args_str', ')', '>', '0', ':', 'res', '+=', 'args_str', 'return', 'res'] | Render an operator
Args:
identifier (str or SymbolicLabelBase): The identifier (name/symbol)
of the operator. May include a subscript, denoted by '_'.
hs (qnet.algebra.hilbert_space_algebra.HilbertSpace): The Hilbert
space in which the operator is defined
dagger (bool): Whether the operator should be daggered
args (list): A list of expressions that will be rendered with
:meth:`doprint`, joined with commas, enclosed in parenthesis
superop (bool): Whether the operator is a super-operator | ['Render', 'an', 'operator'] | train | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/printing/asciiprinter.py#L152-L178 |