code
stringlengths 18
4.5k
| signature
stringlengths 7
879
| docstring
stringlengths 3
4.31k
| loss_without_docstring
float64 1.08
2.36k
| loss_with_docstring
float64 1.07
1.49k
| factor
float64 1
23.3
| rendered
stringlengths 91
5.22k
| quality_prob
float64 0.5
0.97
| learning_prob
float64 0.5
1
|
---|---|---|---|---|---|---|---|---|
if Class is True: Class = self.__class__
if scope is True: scope = STRUCTURESCOPE
structural = Class is not None and issubclass(Class,AbstractStructureElement)
if reverse:
order = reversed
descendindex = -1
else:
order = lambda x: x #pylint: disable=redefined-variable-type
descendindex = 0
child = self
parent = self.parent
while parent: #pylint: disable=too-many-nested-blocks
if len(parent) > 1:
returnnext = False
for e in order(parent):
if e is child:
#we found the current item, next item will be the one to return
returnnext = True
elif returnnext and e.auth and not isinstance(e,AbstractAnnotationLayer) and (not structural or (structural and (not isinstance(e,(AbstractTokenAnnotation,TextContent)) ) )):
if structural and isinstance(e,Correction):
if not list(e.select(AbstractStructureElement)): #skip-over non-structural correction
continue
if Class is None or (isinstance(Class,tuple) and (any(isinstance(e,C) for C in Class))) or isinstance(e,Class):
return e
else:
#this is not yet the element of the type we are looking for, we are going to descend again in the very leftmost (rightmost if reversed) branch only
while e.data:
e = e.data[descendindex]
if not isinstance(e, AbstractElement):
return None #we've gone too far
if e.auth and not isinstance(e,AbstractAnnotationLayer):
if Class is None or (isinstance(Class,tuple) and (any(isinstance(e,C) for C in Class))) or isinstance(e,Class):
return e
else:
#descend deeper
continue
return None
#generational iteration
child = parent
if scope is not None and child.__class__ in scope:
#you shall not pass!
break
parent = parent.parent
return None | def next(self, Class=True, scope=True, reverse=False) | Returns the next element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``, may also be a tuple of multiple classes. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
* ``scope``: A list of classes which are never crossed looking for a next element. Set to ``True`` to constrain to a default list of structure elements (Sentence,Paragraph,Division,Event, ListItem,Caption), set to ``None`` to not constrain at all. | 5.550529 | 5.39329 | 1.029155 | def next(self, Class=True, scope=True, reverse=False):
"""
Returns the next element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``, may also be a tuple of multiple classes. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
* ``scope``: A list of classes which are never crossed looking for a next element. Set to ``True`` to constrain to a default list of structure elements (Sentence,Paragraph,Division,Event, ListItem,Caption), set to ``None`` to not constrain at all.
"""
if Class is True: Class = self.__class__
if scope is True: scope = STRUCTURESCOPE
structural = Class is not None and issubclass(Class,AbstractStructureElement)
if reverse:
order = reversed
descendindex = -1
else:
order = lambda x: x #pylint: disable=redefined-variable-type
descendindex = 0
child = self
parent = self.parent
while parent: #pylint: disable=too-many-nested-blocks
if len(parent) > 1:
returnnext = False
for e in order(parent):
if e is child:
#we found the current item, next item will be the one to return
returnnext = True
elif returnnext and e.auth and not isinstance(e,AbstractAnnotationLayer) and (not structural or (structural and (not isinstance(e,(AbstractTokenAnnotation,TextContent)) ) )):
if structural and isinstance(e,Correction):
if not list(e.select(AbstractStructureElement)): #skip-over non-structural correction
continue
if Class is None or (isinstance(Class,tuple) and (any(isinstance(e,C) for C in Class))) or isinstance(e,Class):
return e
else:
#this is not yet the element of the type we are looking for, we are going to descend again in the very leftmost (rightmost if reversed) branch only
while e.data:
e = e.data[descendindex]
if not isinstance(e, AbstractElement):
return None #we've gone too far
if e.auth and not isinstance(e,AbstractAnnotationLayer):
if Class is None or (isinstance(Class,tuple) and (any(isinstance(e,C) for C in Class))) or isinstance(e,Class):
return e
else:
#descend deeper
continue
return None
#generational iteration
child = parent
if scope is not None and child.__class__ in scope:
#you shall not pass!
break
parent = parent.parent
return None | 0.785648 | 0.509093 |
depth = 0
e = self
while True:
if e.parent:
e = e.parent #pylint: disable=redefined-variable-type
else:
#no parent, breaking
return False
if isinstance(e,AbstractStructureElement) or isinstance(e,AbstractSubtokenAnnotation):
depth += 1
if depth == 2:
return e
return False | def finddefaultreference(self) | Find the default reference for text offsets:
The parent of the current textcontent's parent (counting only Structure Elements and Subtoken Annotation Elements)
Note: This returns not a TextContent element, but its parent. Whether the textcontent actually exists is checked later/elsewhere | 6.768433 | 4.726374 | 1.432056 | def finddefaultreference(self):
"""
Find the default reference for text offsets:
The parent of the current textcontent's parent (counting only Structure Elements and Subtoken Annotation Elements)
Note: This returns not a TextContent element, but its parent. Whether the textcontent actually exists is checked later/elsewhere
"""
depth = 0
e = self
while True:
if e.parent:
e = e.parent #pylint: disable=redefined-variable-type
else:
#no parent, breaking
return False
if isinstance(e,AbstractStructureElement) or isinstance(e,AbstractSubtokenAnnotation):
depth += 1
if depth == 2:
return e
return False | 0.659378 | 0.54468 |
l = []
for e in self.data:
l += e.items()
return l | def items(self) | Returns a depth-first flat list of all items in the document | 6.009556 | 5.584836 | 1.076049 | def items(self):
"""
Returns a depth-first flat list of all items in the document
"""
l = []
for e in self.data:
l += e.items()
return l | 0.716281 | 0.523786 |
finalsolution = None
bestscore = None
for solution in self:
if bestscore == None:
bestscore = solution.score()
finalsolution = solution
elif self.minimize:
score = solution.score()
if score < bestscore:
bestscore = score
finalsolution = solution
elif not self.minimize:
score = solution.score()
if score > bestscore:
bestscore = score
finalsolution = solution
return finalsolution | def searchbest(self) | Returns the single best result (if multiple have the same score, the first match is returned) | 2.211629 | 2.082187 | 1.062166 | def searchbest(self):
"""
Returns the single best result (if multiple have the same score, the first match is returned)
"""
finalsolution = None
bestscore = None
for solution in self:
if bestscore == None:
bestscore = solution.score()
finalsolution = solution
elif self.minimize:
score = solution.score()
if score < bestscore:
bestscore = score
finalsolution = solution
elif not self.minimize:
score = solution.score()
if score > bestscore:
bestscore = score
finalsolution = solution
return finalsolution | 0.729026 | 0.514217 |
solutions = deque([], n)
for solution in self:
solutions.append(solution)
return solutions | def searchlast(self,n=10) | Return the last n results (or possibly less if not found). Note that the last results are not necessarily the best ones! Depending on the search type. | 9.863415 | 8.865089 | 1.112613 | def searchlast(self,n=10):
"""
Return the last n results (or possibly less if not found). Note that the last results are not necessarily the best ones! Depending on the search type.
"""
solutions = deque([], n)
for solution in self:
solutions.append(solution)
return solutions | 0.639708 | 0.501038 |
l = []
for word_id, senses,distance in self:
for sense, confidence in senses:
if not sense in l: l.append(sense)
if bestonly:
break
return l | def senses(self, bestonly=False) | Returns a list of all predicted senses | 5.897015 | 5.76519 | 1.022866 | def senses(self, bestonly=False):
"""
Returns a list of all predicted senses
"""
l = []
for word_id, senses,distance in self:
for sense, confidence in senses:
if not sense in l: l.append(sense)
if bestonly:
break
return l | 0.62681 | 0.507385 |
if self.children:
return sum( ( c.size() for c in self.children.values() ) ) + 1
else:
return 1 | def size(self) | Size is number of nodes under the trie, including the current node | 3.820153 | 3.063506 | 1.246987 | def size(self):
"""
Size is number of nodes under the trie, including the current node
"""
if self.children:
return sum( ( c.size() for c in self.children.values() ) ) + 1
else:
return 1 | 0.534066 | 0.596051 |
global namespaces
return self.tree.xpath(expression, namespaces=namespaces) | def xpath(self, expression) | Executes an xpath expression using the correct namespaces | 8.032832 | 6.176901 | 1.300463 | def xpath(self, expression):
"""
Executes an xpath expression using the correct namespaces
"""
global namespaces
return self.tree.xpath(expression, namespaces=namespaces) | 0.585931 | 0.84241 |
'''
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method.
'''
node_interface = GraphQLInterfaceType(
'Node',
description='An object with an ID',
fields=lambda: OrderedDict((
('id', GraphQLField(
GraphQLNonNull(GraphQLID),
description='The id of the object.',
resolver=id_resolver,
)),
)),
resolve_type=type_resolver
)
node_field = GraphQLField(
node_interface,
description='Fetches an object given its ID',
args=OrderedDict((
('id', GraphQLArgument(
GraphQLNonNull(GraphQLID),
description='The ID of an object'
)),
)),
resolver=lambda obj, args, *_: id_fetcher(args.get('id'), *_)
)
return node_interface, node_field | def node_definitions(id_fetcher, type_resolver=None, id_resolver=None) | Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method. | 3.925619 | 1.697994 | 2.311916 | def node_definitions(id_fetcher, type_resolver=None, id_resolver=None):
"""
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method.
"""
'''
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method.
'''
node_interface = GraphQLInterfaceType(
'Node',
description='An object with an ID',
fields=lambda: OrderedDict((
('id', GraphQLField(
GraphQLNonNull(GraphQLID),
description='The id of the object.',
resolver=id_resolver,
)),
)),
resolve_type=type_resolver
)
node_field = GraphQLField(
node_interface,
description='Fetches an object given its ID',
args=OrderedDict((
('id', GraphQLArgument(
GraphQLNonNull(GraphQLID),
description='The ID of an object'
)),
)),
resolver=lambda obj, args, *_: id_fetcher(args.get('id'), *_)
)
return node_interface, node_field | 0.723065 | 0.739281 |
'''
Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
'''
if not is_str(cursor):
return default_offset
offset = cursor_to_offset(cursor)
try:
return int(offset)
except:
return default_offset | def get_offset_with_default(cursor=None, default_offset=0) | Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default. | 3.98127 | 2.254083 | 1.766248 | def get_offset_with_default(cursor=None, default_offset=0):
"""
Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
"""
'''
Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
'''
if not is_str(cursor):
return default_offset
offset = cursor_to_offset(cursor)
try:
return int(offset)
except:
return default_offset | 0.613179 | 0.515132 |
# Get a list of node ids from the edge data
nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges)
# Convert to a data-storing object and initialize some values
d = 3 if is_3d else 2
nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes}
# Repeat n times (is there a more Pythonic way to do this?)
for _ in repeat(None, iterations):
# Add in Coulomb-esque node-node repulsive forces
for node1, node2 in combinations(nodes.values(), 2):
_coulomb(node1, node2, force_strength, max_distance)
# And Hooke-esque edge spring forces
for edge in edges:
_hooke(nodes[edge['source']], nodes[edge['target']],
force_strength * edge.get('size', 1), max_distance)
# Move by resultant force
for node in nodes.values():
# Constrain the force to the bounds specified by input parameter
force = [_constrain(dampening * f, -max_velocity, max_velocity)
for f in node['force']]
# Update velocities and reset force
node['velocity'] = [v + dv
for v, dv in zip(node['velocity'], force)]
node['force'] = [0] * d
# Clean and return
for node in nodes.values():
del node['force']
node['location'] = node['velocity']
del node['velocity']
# Even if it's 2D, let's specify three dimensions
if not is_3d:
node['location'] += [0.0]
return nodes | def run(edges, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True) | Runs a force-directed-layout algorithm on the input graph.
iterations - Number of FDL iterations to run in coordinate generation
force_strength - Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening - Multiplier to reduce force applied to nodes
max_velocity - Maximum distance a node can move in one step
max_distance - The maximum distance considered for interactions | 3.911408 | 3.815891 | 1.025032 | def run(edges, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""
Runs a force-directed-layout algorithm on the input graph.
iterations - Number of FDL iterations to run in coordinate generation
force_strength - Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening - Multiplier to reduce force applied to nodes
max_velocity - Maximum distance a node can move in one step
max_distance - The maximum distance considered for interactions
"""
# Get a list of node ids from the edge data
nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges)
# Convert to a data-storing object and initialize some values
d = 3 if is_3d else 2
nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes}
# Repeat n times (is there a more Pythonic way to do this?)
for _ in repeat(None, iterations):
# Add in Coulomb-esque node-node repulsive forces
for node1, node2 in combinations(nodes.values(), 2):
_coulomb(node1, node2, force_strength, max_distance)
# And Hooke-esque edge spring forces
for edge in edges:
_hooke(nodes[edge['source']], nodes[edge['target']],
force_strength * edge.get('size', 1), max_distance)
# Move by resultant force
for node in nodes.values():
# Constrain the force to the bounds specified by input parameter
force = [_constrain(dampening * f, -max_velocity, max_velocity)
for f in node['force']]
# Update velocities and reset force
node['velocity'] = [v + dv
for v, dv in zip(node['velocity'], force)]
node['force'] = [0] * d
# Clean and return
for node in nodes.values():
del node['force']
node['location'] = node['velocity']
del node['velocity']
# Even if it's 2D, let's specify three dimensions
if not is_3d:
node['location'] += [0.0]
return nodes | 0.620219 | 0.634932 |
logger.debug("starting")
assert pipeline
assert steps_group
logger.debug(f"retrieving {steps_group} steps from pipeline")
if steps_group in pipeline:
steps = pipeline[steps_group]
if steps is None:
logger.warn(
f"{steps_group}: sequence has no elements. So it won't do "
"anything.")
logger.debug("done")
return None
steps_count = len(steps)
logger.debug(f"{steps_count} steps found under {steps_group} in "
"pipeline definition.")
logger.debug("done")
return steps
else:
logger.debug(
f"pipeline doesn't have a {steps_group} collection. Add a "
f"{steps_group}: sequence to the yaml if you want {steps_group} "
"actually to do something.")
logger.debug("done")
return None | def get_pipeline_steps(pipeline, steps_group) | Get the steps attribute of module pipeline.
If there is no steps sequence on the pipeline, return None. Guess you
could theoretically want to run a pipeline with nothing in it. | 4.105674 | 3.856518 | 1.064607 | def get_pipeline_steps(pipeline, steps_group):
"""
Get the steps attribute of module pipeline.
If there is no steps sequence on the pipeline, return None. Guess you
could theoretically want to run a pipeline with nothing in it.
"""
logger.debug("starting")
assert pipeline
assert steps_group
logger.debug(f"retrieving {steps_group} steps from pipeline")
if steps_group in pipeline:
steps = pipeline[steps_group]
if steps is None:
logger.warn(
f"{steps_group}: sequence has no elements. So it won't do "
"anything.")
logger.debug("done")
return None
steps_count = len(steps)
logger.debug(f"{steps_count} steps found under {steps_group} in "
"pipeline definition.")
logger.debug("done")
return steps
else:
logger.debug(
f"pipeline doesn't have a {steps_group} collection. Add a "
f"{steps_group}: sequence to the yaml if you want {steps_group} "
"actually to do something.")
logger.debug("done")
return None | 0.68988 | 0.5984 |
tag_representers = [PyString, SicString]
yaml_loader = get_yaml_parser_safe()
for representer in tag_representers:
yaml_loader.register_class(representer)
pipeline_definition = yaml_loader.load(file)
return pipeline_definition | def get_pipeline_yaml(file) | Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml. | 7.524 | 5.018706 | 1.499191 | def get_pipeline_yaml(file):
"""
Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml.
"""
tag_representers = [PyString, SicString]
yaml_loader = get_yaml_parser_safe()
for representer in tag_representers:
yaml_loader.register_class(representer)
pipeline_definition = yaml_loader.load(file)
return pipeline_definition | 0.718829 | 0.606761 |
def partial(func, col, *args, **kwargs):
def new_func(gdf):
return func(gdf[col], *args, **kwargs)
return new_func
def make_statement(func, col):
if isinstance(func, str):
expr = '{}({})'.format(func, col)
elif callable(func):
expr = partial(func, col, *verb.args, **verb.kwargs)
else:
raise TypeError("{} is not a function".format(func))
return expr
def func_name(func):
if isinstance(func, str):
return func
try:
return func.__name__
except AttributeError:
return ''
# Generate function names. They act as identifiers (postfixed
# to the original columns) in the new_column names.
if isinstance(verb.functions, (tuple, list)):
names = (func_name(func) for func in verb.functions)
names_and_functions = zip(names, verb.functions)
else:
names_and_functions = verb.functions.items()
# Create statements for the expressions
# and postfix identifiers
columns = Selector.get(verb) # columns to act on
postfixes = []
stmts = []
for name, func in names_and_functions:
postfixes.append(name)
for col in columns:
stmts.append(make_statement(func, col))
if not stmts:
stmts = columns
# Names of the new columns
# e.g col1_mean, col2_mean, col1_std, col2_std
add_postfix = (isinstance(verb.functions, dict) or
len(verb.functions) > 1)
if add_postfix:
fmt = '{}_{}'.format
new_columns = [fmt(c, p) for p in postfixes for c in columns]
else:
new_columns = columns
expressions = [Expression(stmt, col)
for stmt, col in zip(stmts, new_columns)]
return expressions, new_columns | def build_expressions(verb) | Build expressions for helper verbs
Parameters
----------
verb : verb
A verb with a *functions* attribute.
Returns
-------
out : tuple
(List of Expressions, New columns). The expressions and the
new columns in which the results of those expressions will
be stored. Even when a result will stored in a column with
an existing label, that column is still considered new,
i.e An expression ``x='x+1'``, will create a new_column `x`
to replace an old column `x`. | 3.490683 | 3.391462 | 1.029256 | def build_expressions(verb):
"""
Build expressions for helper verbs
Parameters
----------
verb : verb
A verb with a *functions* attribute.
Returns
-------
out : tuple
(List of Expressions, New columns). The expressions and the
new columns in which the results of those expressions will
be stored. Even when a result will stored in a column with
an existing label, that column is still considered new,
i.e An expression ``x='x+1'``, will create a new_column `x`
to replace an old column `x`.
"""
def partial(func, col, *args, **kwargs):
def new_func(gdf):
return func(gdf[col], *args, **kwargs)
return new_func
def make_statement(func, col):
if isinstance(func, str):
expr = '{}({})'.format(func, col)
elif callable(func):
expr = partial(func, col, *verb.args, **verb.kwargs)
else:
raise TypeError("{} is not a function".format(func))
return expr
def func_name(func):
if isinstance(func, str):
return func
try:
return func.__name__
except AttributeError:
return ''
# Generate function names. They act as identifiers (postfixed
# to the original columns) in the new_column names.
if isinstance(verb.functions, (tuple, list)):
names = (func_name(func) for func in verb.functions)
names_and_functions = zip(names, verb.functions)
else:
names_and_functions = verb.functions.items()
# Create statements for the expressions
# and postfix identifiers
columns = Selector.get(verb) # columns to act on
postfixes = []
stmts = []
for name, func in names_and_functions:
postfixes.append(name)
for col in columns:
stmts.append(make_statement(func, col))
if not stmts:
stmts = columns
# Names of the new columns
# e.g col1_mean, col2_mean, col1_std, col2_std
add_postfix = (isinstance(verb.functions, dict) or
len(verb.functions) > 1)
if add_postfix:
fmt = '{}_{}'.format
new_columns = [fmt(c, p) for p in postfixes for c in columns]
else:
new_columns = columns
expressions = [Expression(stmt, col)
for stmt, col in zip(stmts, new_columns)]
return expressions, new_columns | 0.788359 | 0.708515 |
# Note: There's an experimental JSON encoder floating around in
# pandas land that hasn't made it into the main branch. This
# function should be revisited if it ever does.
if not pd:
raise LoadError('pandas could not be imported')
if not hasattr(data, 'index'):
raise ValueError('Please load a Pandas object.')
if name:
vega_data = cls(name=name, **kwargs)
else:
vega_data = cls(name='table', **kwargs)
pd_obj = data.copy()
if columns:
pd_obj = data[columns]
if key_on != 'idx':
pd_obj.index = data[key_on]
if records:
# The worst
vega_data.values = json.loads(pd_obj.to_json(orient='records'))
return vega_data
vega_data.values = []
if isinstance(pd_obj, pd.Series):
data_key = data.name or series_key
for i, v in pd_obj.iteritems():
value = {}
value['idx'] = cls.serialize(i)
value['col'] = data_key
value['val'] = cls.serialize(v)
vega_data.values.append(value)
elif isinstance(pd_obj, pd.DataFrame):
# We have to explicitly convert the column names to strings
# because the json serializer doesn't allow for integer keys.
for i, row in pd_obj.iterrows():
for num, (k, v) in enumerate(row.iteritems()):
value = {}
value['idx'] = cls.serialize(i)
value['col'] = cls.serialize(k)
value['val'] = cls.serialize(v)
if grouped:
value['group'] = num
vega_data.values.append(value)
else:
raise ValueError('cannot load from data type '
+ type(pd_obj).__name__)
return vega_data | def from_pandas(cls, data, columns=None, key_on='idx', name=None,
series_key='data', grouped=False, records=False, **kwargs) | Load values from a pandas ``Series`` or ``DataFrame`` object
Parameters
----------
data : pandas ``Series`` or ``DataFrame``
Pandas object to import data from.
columns: list, default None
DataFrame columns to convert to Data. Keys default to col names.
If columns are given and on_index is False, x-axis data will
default to the first column.
key_on: string, default 'index'
Value to key on for x-axis data. Defaults to index.
name : string, default None
Applies to the ``name`` attribute of the generated class. If
``None`` (default), then the ``name`` attribute of ``pd_obj`` is
used if it exists, or ``'table'`` if it doesn't.
series_key : string, default 'data'
Applies only to ``Series``. If ``None`` (default), then defaults to
data.name. For example, if ``series_key`` is ``'x'``, then the
entries of the ``values`` list
will be ``{'idx': ..., 'col': 'x', 'val': ...}``.
grouped: boolean, default False
Pass true for an extra grouping parameter
records: boolean, defaule False
Requires Pandas 0.12 or greater. Writes the Pandas DataFrame
using the df.to_json(orient='records') formatting.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor. | 3.233727 | 3.127881 | 1.03384 | def from_pandas(cls, data, columns=None, key_on='idx', name=None,
series_key='data', grouped=False, records=False, **kwargs):
"""
Load values from a pandas ``Series`` or ``DataFrame`` object
Parameters
----------
data : pandas ``Series`` or ``DataFrame``
Pandas object to import data from.
columns: list, default None
DataFrame columns to convert to Data. Keys default to col names.
If columns are given and on_index is False, x-axis data will
default to the first column.
key_on: string, default 'index'
Value to key on for x-axis data. Defaults to index.
name : string, default None
Applies to the ``name`` attribute of the generated class. If
``None`` (default), then the ``name`` attribute of ``pd_obj`` is
used if it exists, or ``'table'`` if it doesn't.
series_key : string, default 'data'
Applies only to ``Series``. If ``None`` (default), then defaults to
data.name. For example, if ``series_key`` is ``'x'``, then the
entries of the ``values`` list
will be ``{'idx': ..., 'col': 'x', 'val': ...}``.
grouped: boolean, default False
Pass true for an extra grouping parameter
records: boolean, defaule False
Requires Pandas 0.12 or greater. Writes the Pandas DataFrame
using the df.to_json(orient='records') formatting.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor.
"""
# Note: There's an experimental JSON encoder floating around in
# pandas land that hasn't made it into the main branch. This
# function should be revisited if it ever does.
if not pd:
raise LoadError('pandas could not be imported')
if not hasattr(data, 'index'):
raise ValueError('Please load a Pandas object.')
if name:
vega_data = cls(name=name, **kwargs)
else:
vega_data = cls(name='table', **kwargs)
pd_obj = data.copy()
if columns:
pd_obj = data[columns]
if key_on != 'idx':
pd_obj.index = data[key_on]
if records:
# The worst
vega_data.values = json.loads(pd_obj.to_json(orient='records'))
return vega_data
vega_data.values = []
if isinstance(pd_obj, pd.Series):
data_key = data.name or series_key
for i, v in pd_obj.iteritems():
value = {}
value['idx'] = cls.serialize(i)
value['col'] = data_key
value['val'] = cls.serialize(v)
vega_data.values.append(value)
elif isinstance(pd_obj, pd.DataFrame):
# We have to explicitly convert the column names to strings
# because the json serializer doesn't allow for integer keys.
for i, row in pd_obj.iterrows():
for num, (k, v) in enumerate(row.iteritems()):
value = {}
value['idx'] = cls.serialize(i)
value['col'] = cls.serialize(k)
value['val'] = cls.serialize(v)
if grouped:
value['group'] = num
vega_data.values.append(value)
else:
raise ValueError('cannot load from data type '
+ type(pd_obj).__name__)
return vega_data | 0.655253 | 0.510496 |
if not np:
raise LoadError('numpy could not be imported')
_assert_is_type('numpy object', np_obj, np.ndarray)
# Integer index if none is provided
index = index or range(np_obj.shape[0])
# Explicitly map dict-keys to strings for JSON serializer.
columns = list(map(str, columns))
index_key = index_key or cls._default_index_key
if len(index) != np_obj.shape[0]:
raise LoadError(
'length of index must be equal to number of rows of array')
elif len(columns) != np_obj.shape[1]:
raise LoadError(
'length of columns must be equal to number of columns of '
'array')
data = cls(name=name, **kwargs)
data.values = [
dict([(index_key, cls.serialize(idx))] +
[(col, x) for col, x in zip(columns, row)])
for idx, row in zip(index, np_obj.tolist())]
return data | def from_numpy(cls, np_obj, name, columns, index=None, index_key=None,
**kwargs) | Load values from a numpy array
Parameters
----------
np_obj : numpy.ndarray
numpy array to load data from
name : string
``name`` field for the data
columns : iterable
Sequence of column names, from left to right. Must have same
length as the number of columns of ``np_obj``.
index : iterable, default None
Sequence of indices from top to bottom. If ``None`` (default),
then the indices are integers starting at 0. Must have same
length as the number of rows of ``np_obj``.
index_key : string, default None
Key to use for the index. If ``None`` (default), ``idx`` is
used.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor
Notes
-----
The individual elements of ``np_obj``, ``columns``, and ``index``
must return valid values from :func:`Data.serialize`. | 3.328766 | 3.271502 | 1.017504 | def from_numpy(cls, np_obj, name, columns, index=None, index_key=None,
**kwargs):
"""
Load values from a numpy array
Parameters
----------
np_obj : numpy.ndarray
numpy array to load data from
name : string
``name`` field for the data
columns : iterable
Sequence of column names, from left to right. Must have same
length as the number of columns of ``np_obj``.
index : iterable, default None
Sequence of indices from top to bottom. If ``None`` (default),
then the indices are integers starting at 0. Must have same
length as the number of rows of ``np_obj``.
index_key : string, default None
Key to use for the index. If ``None`` (default), ``idx`` is
used.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor
Notes
-----
The individual elements of ``np_obj``, ``columns``, and ``index``
must return valid values from :func:`Data.serialize`.
"""
if not np:
raise LoadError('numpy could not be imported')
_assert_is_type('numpy object', np_obj, np.ndarray)
# Integer index if none is provided
index = index or range(np_obj.shape[0])
# Explicitly map dict-keys to strings for JSON serializer.
columns = list(map(str, columns))
index_key = index_key or cls._default_index_key
if len(index) != np_obj.shape[0]:
raise LoadError(
'length of index must be equal to number of rows of array')
elif len(columns) != np_obj.shape[1]:
raise LoadError(
'length of columns must be equal to number of columns of '
'array')
data = cls(name=name, **kwargs)
data.values = [
dict([(index_key, cls.serialize(idx))] +
[(col, x) for col, x in zip(columns, row)])
for idx, row in zip(index, np_obj.tolist())]
return data | 0.837188 | 0.722845 |
if not name:
name = 'table'
cls.raw_data = data
# Tuples
if isinstance(data, tuple):
values = [{"x": x[0], "y": x[1]} for x in data]
# Lists
elif isinstance(data, list):
values = [{"x": x, "y": y}
for x, y in zip(range(len(data) + 1), data)]
# Dicts
elif isinstance(data, dict) or isinstance(data, pd.Series):
values = [{"x": x, "y": y} for x, y in sorted(data.items())]
# Dataframes
elif isinstance(data, pd.DataFrame):
if len(columns) > 1 and use_index:
raise ValueError('If using index as x-axis, len(columns)'
'cannot be > 1')
if use_index or len(columns) == 1:
values = [{"x": cls.serialize(x[0]),
"y": cls.serialize(x[1][columns[0]])}
for x in data.iterrows()]
else:
values = [{"x": cls.serialize(x[1][columns[0]]),
"y": cls.serialize(x[1][columns[1]])}
for x in data.iterrows()]
# NumPy arrays
elif isinstance(data, np.ndarray):
values = cls._numpy_to_values(data)
else:
raise TypeError('unknown data type %s' % type(data))
return cls(name, values=values) | def keypairs(cls, data, columns=None, use_index=False, name=None) | This will format the data as Key: Value pairs, rather than the
idx/col/val style. This is useful for some transforms, and to
key choropleth map data
Standard Data Types:
List: [0, 10, 20, 30, 40]
Paired Tuples: ((0, 1), (0, 2), (0, 3))
Dict: {'A': 10, 'B': 20, 'C': 30, 'D': 40, 'E': 50}
Plus Pandas DataFrame and Series, and Numpy ndarray
Parameters
----------
data:
List, Tuple, Dict, Pandas Series/DataFrame, Numpy ndarray
columns: list, default None
If passing Pandas DataFrame, you must pass at least one column
name.If one column is passed, x-values will default to the index
values.If two column names are passed, x-values are columns[0],
y-values columns[1].
use_index: boolean, default False
Use the DataFrame index for your x-values | 2.374697 | 2.351368 | 1.009921 | def keypairs(cls, data, columns=None, use_index=False, name=None):
"""
This will format the data as Key: Value pairs, rather than the
idx/col/val style. This is useful for some transforms, and to
key choropleth map data
Standard Data Types:
List: [0, 10, 20, 30, 40]
Paired Tuples: ((0, 1), (0, 2), (0, 3))
Dict: {'A': 10, 'B': 20, 'C': 30, 'D': 40, 'E': 50}
Plus Pandas DataFrame and Series, and Numpy ndarray
Parameters
----------
data:
List, Tuple, Dict, Pandas Series/DataFrame, Numpy ndarray
columns: list, default None
If passing Pandas DataFrame, you must pass at least one column
name.If one column is passed, x-values will default to the index
values.If two column names are passed, x-values are columns[0],
y-values columns[1].
use_index: boolean, default False
Use the DataFrame index for your x-values
"""
if not name:
name = 'table'
cls.raw_data = data
# Tuples
if isinstance(data, tuple):
values = [{"x": x[0], "y": x[1]} for x in data]
# Lists
elif isinstance(data, list):
values = [{"x": x, "y": y}
for x, y in zip(range(len(data) + 1), data)]
# Dicts
elif isinstance(data, dict) or isinstance(data, pd.Series):
values = [{"x": x, "y": y} for x, y in sorted(data.items())]
# Dataframes
elif isinstance(data, pd.DataFrame):
if len(columns) > 1 and use_index:
raise ValueError('If using index as x-axis, len(columns)'
'cannot be > 1')
if use_index or len(columns) == 1:
values = [{"x": cls.serialize(x[0]),
"y": cls.serialize(x[1][columns[0]])}
for x in data.iterrows()]
else:
values = [{"x": cls.serialize(x[1][columns[0]]),
"y": cls.serialize(x[1][columns[1]])}
for x in data.iterrows()]
# NumPy arrays
elif isinstance(data, np.ndarray):
values = cls._numpy_to_values(data)
else:
raise TypeError('unknown data type %s' % type(data))
return cls(name, values=values) | 0.786889 | 0.643427 |
'''Convert a NumPy array to values attribute'''
def to_list_no_index(xvals, yvals):
return [{"x": x, "y": np.asscalar(y)}
for x, y in zip(xvals, yvals)]
if len(data.shape) == 1 or data.shape[1] == 1:
xvals = range(data.shape[0] + 1)
values = to_list_no_index(xvals, data)
elif len(data.shape) == 2:
if data.shape[1] == 2:
# NumPy arrays and matrices have different iteration rules.
if isinstance(data, np.matrix):
xidx = (0, 0)
yidx = (0, 1)
else:
xidx = 0
yidx = 1
xvals = [np.asscalar(row[xidx]) for row in data]
yvals = [np.asscalar(row[yidx]) for row in data]
values = [{"x": x, "y": y} for x, y in zip(xvals, yvals)]
else:
raise ValueError('arrays with > 2 columns not supported')
else:
raise ValueError('invalid dimensions for ndarray')
return values | def _numpy_to_values(data) | Convert a NumPy array to values attribute | 2.689877 | 2.631855 | 1.022046 | def _numpy_to_values(data):
"""
Convert a NumPy array to values attribute
"""
'''Convert a NumPy array to values attribute'''
def to_list_no_index(xvals, yvals):
return [{"x": x, "y": np.asscalar(y)}
for x, y in zip(xvals, yvals)]
if len(data.shape) == 1 or data.shape[1] == 1:
xvals = range(data.shape[0] + 1)
values = to_list_no_index(xvals, data)
elif len(data.shape) == 2:
if data.shape[1] == 2:
# NumPy arrays and matrices have different iteration rules.
if isinstance(data, np.matrix):
xidx = (0, 0)
yidx = (0, 1)
else:
xidx = 0
yidx = 1
xvals = [np.asscalar(row[xidx]) for row in data]
yvals = [np.asscalar(row[yidx]) for row in data]
values = [{"x": x, "y": y} for x, y in zip(xvals, yvals)]
else:
raise ValueError('arrays with > 2 columns not supported')
else:
raise ValueError('invalid dimensions for ndarray')
return values | 0.601008 | 0.73848 |
retval = tuple()
for val in self.VALUES:
retval += (getattr(self, val),)
return retval | def get_value_tuple(self) | Returns a tuple of the color's values (in order). For example,
an LabColor object will return (lab_l, lab_a, lab_b), where each
member of the tuple is the float value for said variable. | 6.593547 | 5.879219 | 1.1215 | def get_value_tuple(self):
"""
Returns a tuple of the color's values (in order). For example,
an LabColor object will return (lab_l, lab_a, lab_b), where each
member of the tuple is the float value for said variable.
"""
retval = tuple()
for val in self.VALUES:
retval += (getattr(self, val),)
return retval | 0.664568 | 0.57824 |
# This holds the obect's spectral data, and will be passed to
# numpy.array() to create a numpy array (matrix) for the matrix math
# that will be done during the conversion to XYZ.
values = []
# Use the required value list to build this dynamically. Default to
# 0.0, since that ultimately won't affect the outcome due to the math
# involved.
for val in self.VALUES:
values.append(getattr(self, val, 0.0))
# Create and the actual numpy array/matrix from the spectral list.
color_array = numpy.array([values])
return color_array | def get_numpy_array(self) | Dump this color into NumPy array. | 11.971473 | 11.074844 | 1.080961 | def get_numpy_array(self):
"""
Dump this color into NumPy array.
"""
# This holds the obect's spectral data, and will be passed to
# numpy.array() to create a numpy array (matrix) for the matrix math
# that will be done during the conversion to XYZ.
values = []
# Use the required value list to build this dynamically. Default to
# 0.0, since that ultimately won't affect the outcome due to the math
# involved.
for val in self.VALUES:
values.append(getattr(self, val, 0.0))
# Create and the actual numpy array/matrix from the spectral list.
color_array = numpy.array([values])
return color_array | 0.562417 | 0.6705 |
blue_density = ansi_density(color, ANSI_STATUS_T_BLUE)
green_density = ansi_density(color, ANSI_STATUS_T_GREEN)
red_density = ansi_density(color, ANSI_STATUS_T_RED)
densities = [blue_density, green_density, red_density]
min_density = min(densities)
max_density = max(densities)
density_range = max_density - min_density
# See comments in density_standards.py for VISUAL_DENSITY_THRESH to
# understand what this is doing.
if density_range <= VISUAL_DENSITY_THRESH:
return ansi_density(color, ISO_VISUAL)
elif blue_density > green_density and blue_density > red_density:
return blue_density
elif green_density > blue_density and green_density > red_density:
return green_density
else:
return red_density | def auto_density(color) | Given a SpectralColor, automatically choose the correct ANSI T filter.
Returns a tuple with a string representation of the filter the
calculated density.
:param SpectralColor color: The SpectralColor object to calculate
density for.
:rtype: float
:returns: The density value, with the filter selected automatically. | 2.772206 | 2.771969 | 1.000086 | def auto_density(color):
"""
Given a SpectralColor, automatically choose the correct ANSI T filter.
Returns a tuple with a string representation of the filter the
calculated density.
:param SpectralColor color: The SpectralColor object to calculate
density for.
:rtype: float
:returns: The density value, with the filter selected automatically.
"""
blue_density = ansi_density(color, ANSI_STATUS_T_BLUE)
green_density = ansi_density(color, ANSI_STATUS_T_GREEN)
red_density = ansi_density(color, ANSI_STATUS_T_RED)
densities = [blue_density, green_density, red_density]
min_density = min(densities)
max_density = max(densities)
density_range = max_density - min_density
# See comments in density_standards.py for VISUAL_DENSITY_THRESH to
# understand what this is doing.
if density_range <= VISUAL_DENSITY_THRESH:
return ansi_density(color, ISO_VISUAL)
elif blue_density > green_density and blue_density > red_density:
return blue_density
elif green_density > blue_density and green_density > red_density:
return green_density
else:
return red_density | 0.848219 | 0.810291 |
color1_vector = _get_lab_color1_vector(color1)
color2_matrix = _get_lab_color2_matrix(color2)
delta_e = color_diff_matrix.delta_e_cie1976(color1_vector, color2_matrix)[0]
return numpy.asscalar(delta_e) | def delta_e_cie1976(color1, color2) | Calculates the Delta E (CIE1976) of two colors. | 3.257775 | 3.218294 | 1.012268 | def delta_e_cie1976(color1, color2):
"""
Calculates the Delta E (CIE1976) of two colors.
"""
color1_vector = _get_lab_color1_vector(color1)
color2_matrix = _get_lab_color2_matrix(color2)
delta_e = color_diff_matrix.delta_e_cie1976(color1_vector, color2_matrix)[0]
return numpy.asscalar(delta_e) | 0.693668 | 0.655694 |
def decorator(f):
f.start_type = start_type
f.target_type = target_type
_conversion_manager.add_type_conversion(start_type, target_type, f)
return f
return decorator | def color_conversion_function(start_type, target_type) | Decorator to indicate a function that performs a conversion from one color
space to another.
This decorator will return the original function unmodified, however it will
be registered in the _conversion_manager so it can be used to perform color
space transformations between color spaces that do not have direct
conversion functions (e.g., Luv to CMYK).
Note: For a conversion to/from RGB supply the BaseRGBColor class.
:param start_type: Starting color space type
:param target_type: Target color space type | 2.876948 | 2.635455 | 1.091632 | def color_conversion_function(start_type, target_type):
"""
Decorator to indicate a function that performs a conversion from one color
space to another.
This decorator will return the original function unmodified, however it will
be registered in the _conversion_manager so it can be used to perform color
space transformations between color spaces that do not have direct
conversion functions (e.g., Luv to CMYK).
Note: For a conversion to/from RGB supply the BaseRGBColor class.
:param start_type: Starting color space type
:param target_type: Target color space type
"""
def decorator(f):
f.start_type = start_type
f.target_type = target_type
_conversion_manager.add_type_conversion(start_type, target_type, f)
return f
return decorator | 0.788268 | 0.714298 |
rgb = self.xyz_to_rgb(xyz)
logger.debug('RGB: {}'.format(rgb))
rgb_w = self.xyz_to_rgb(xyz_w)
logger.debug('RGB_W: {}'.format(rgb_w))
y_w = xyz_w[1]
y_b = xyz_b[1]
h_rgb = 3 * rgb_w / (rgb_w.sum())
logger.debug('H_RGB: {}'.format(h_rgb))
# Chromatic adaptation factors
if not discount_illuminant:
f_rgb = (1 + (l_a ** (1 / 3)) + h_rgb) / (1 + (l_a ** (1 / 3)) + (1 / h_rgb))
else:
f_rgb = numpy.ones(numpy.shape(h_rgb))
logger.debug('F_RGB: {}'.format(f_rgb))
# Adaptation factor
if helson_judd:
d_rgb = self._f_n((y_b / y_w) * f_l * f_rgb[1]) - self._f_n((y_b / y_w) * f_l * f_rgb)
assert d_rgb[1] == 0
else:
d_rgb = numpy.zeros(numpy.shape(f_rgb))
logger.debug('D_RGB: {}'.format(d_rgb))
# Cone bleaching factors
rgb_b = (10 ** 7) / ((10 ** 7) + 5 * l_a * (rgb_w / 100))
logger.debug('B_RGB: {}'.format(rgb_b))
if xyz_p is not None and p is not None:
logger.debug('Account for simultaneous chromatic contrast')
rgb_p = self.xyz_to_rgb(xyz_p)
rgb_w = self.adjust_white_for_scc(rgb_p, rgb_b, rgb_w, p)
# Adapt rgb using modified
rgb_a = 1 + rgb_b * (self._f_n(f_l * f_rgb * rgb / rgb_w) + d_rgb)
logger.debug('RGB_A: {}'.format(rgb_a))
return rgb_a | def _adaptation(self, f_l, l_a, xyz, xyz_w, xyz_b, xyz_p=None, p=None, helson_judd=False, discount_illuminant=True) | :param f_l: Luminance adaptation factor
:param l_a: Adapting luminance
:param xyz: Stimulus color in XYZ
:param xyz_w: Reference white color in XYZ
:param xyz_b: Background color in XYZ
:param xyz_p: Proxima field color in XYZ
:param p: Simultaneous contrast/assimilation parameter. | 2.953449 | 2.886516 | 1.023188 | def _adaptation(self, f_l, l_a, xyz, xyz_w, xyz_b, xyz_p=None, p=None, helson_judd=False, discount_illuminant=True):
"""
:param f_l: Luminance adaptation factor
:param l_a: Adapting luminance
:param xyz: Stimulus color in XYZ
:param xyz_w: Reference white color in XYZ
:param xyz_b: Background color in XYZ
:param xyz_p: Proxima field color in XYZ
:param p: Simultaneous contrast/assimilation parameter.
"""
rgb = self.xyz_to_rgb(xyz)
logger.debug('RGB: {}'.format(rgb))
rgb_w = self.xyz_to_rgb(xyz_w)
logger.debug('RGB_W: {}'.format(rgb_w))
y_w = xyz_w[1]
y_b = xyz_b[1]
h_rgb = 3 * rgb_w / (rgb_w.sum())
logger.debug('H_RGB: {}'.format(h_rgb))
# Chromatic adaptation factors
if not discount_illuminant:
f_rgb = (1 + (l_a ** (1 / 3)) + h_rgb) / (1 + (l_a ** (1 / 3)) + (1 / h_rgb))
else:
f_rgb = numpy.ones(numpy.shape(h_rgb))
logger.debug('F_RGB: {}'.format(f_rgb))
# Adaptation factor
if helson_judd:
d_rgb = self._f_n((y_b / y_w) * f_l * f_rgb[1]) - self._f_n((y_b / y_w) * f_l * f_rgb)
assert d_rgb[1] == 0
else:
d_rgb = numpy.zeros(numpy.shape(f_rgb))
logger.debug('D_RGB: {}'.format(d_rgb))
# Cone bleaching factors
rgb_b = (10 ** 7) / ((10 ** 7) + 5 * l_a * (rgb_w / 100))
logger.debug('B_RGB: {}'.format(rgb_b))
if xyz_p is not None and p is not None:
logger.debug('Account for simultaneous chromatic contrast')
rgb_p = self.xyz_to_rgb(xyz_p)
rgb_w = self.adjust_white_for_scc(rgb_p, rgb_b, rgb_w, p)
# Adapt rgb using modified
rgb_a = 1 + rgb_b * (self._f_n(f_l * f_rgb * rgb / rgb_w) + d_rgb)
logger.debug('RGB_A: {}'.format(rgb_a))
return rgb_a | 0.701496 | 0.511412 |
x_e = 0.3320
y_e = 0.1858
n = ((x / (x + z + z)) - x_e) / ((y / (x + z + z)) - y_e)
a_0 = -949.86315
a_1 = 6253.80338
a_2 = 28.70599
a_3 = 0.00004
t_1 = 0.92159
t_2 = 0.20039
t_3 = 0.07125
cct = a_0 + a_1 * numpy.exp(-n / t_1) + a_2 * numpy.exp(-n / t_2) + a_3 * numpy.exp(-n / t_3)
return cct | def _get_cct(x, y, z) | Reference
Hernandez-Andres, J., Lee, R. L., & Romero, J. (1999).
Calculating correlated color temperatures across the entire gamut of daylight and skylight chromaticities.
Applied Optics, 38(27), 5703-5709. | 3.796291 | 3.73003 | 1.017764 | def _get_cct(x, y, z):
"""
Reference
Hernandez-Andres, J., Lee, R. L., & Romero, J. (1999).
Calculating correlated color temperatures across the entire gamut of daylight and skylight chromaticities.
Applied Optics, 38(27), 5703-5709.
"""
x_e = 0.3320
y_e = 0.1858
n = ((x / (x + z + z)) - x_e) / ((y / (x + z + z)) - y_e)
a_0 = -949.86315
a_1 = 6253.80338
a_2 = 28.70599
a_3 = 0.00004
t_1 = 0.92159
t_2 = 0.20039
t_3 = 0.07125
cct = a_0 + a_1 * numpy.exp(-n / t_1) + a_2 * numpy.exp(-n / t_2) + a_3 * numpy.exp(-n / t_3)
return cct | 0.588653 | 0.530419 |
# Transform input colors to cone responses
rgb = self._xyz_to_rgb(xyz)
logger.debug("RGB: {}".format(rgb))
rgb_b = self._xyz_to_rgb(self._xyz_b)
rgb_w = self._xyz_to_rgb(xyz_w)
rgb_w = Hunt.adjust_white_for_scc(rgb, rgb_b, rgb_w, self._p)
logger.debug("RGB_W: {}".format(rgb_w))
# Compute adapted tristimulus-responses
rgb_c = self._white_adaption(rgb, rgb_w, d)
logger.debug("RGB_C: {}".format(rgb_c))
rgb_cw = self._white_adaption(rgb_w, rgb_w, d)
logger.debug("RGB_CW: {}".format(rgb_cw))
# Convert adapted tristimulus-responses to Hunt-Pointer-Estevez fundamentals
rgb_p = self._compute_hunt_pointer_estevez_fundamentals(rgb_c)
logger.debug("RGB': {}".format(rgb_p))
rgb_wp = self._compute_hunt_pointer_estevez_fundamentals(rgb_cw)
logger.debug("RGB'_W: {}".format(rgb_wp))
# Compute post-adaptation non-linearities
rgb_ap = self._compute_nonlinearities(f_l, rgb_p)
rgb_awp = self._compute_nonlinearities(f_l, rgb_wp)
return rgb_ap, rgb_awp | def _compute_adaptation(self, xyz, xyz_w, f_l, d) | Modified adaptation procedure incorporating simultaneous chromatic contrast from Hunt model.
:param xyz: Stimulus XYZ.
:param xyz_w: Reference white XYZ.
:param f_l: Luminance adaptation factor
:param d: Degree of adaptation.
:return: Tuple of adapted rgb and rgb_w arrays. | 3.020088 | 2.857865 | 1.056764 | def _compute_adaptation(self, xyz, xyz_w, f_l, d):
"""
Modified adaptation procedure incorporating simultaneous chromatic contrast from Hunt model.
:param xyz: Stimulus XYZ.
:param xyz_w: Reference white XYZ.
:param f_l: Luminance adaptation factor
:param d: Degree of adaptation.
:return: Tuple of adapted rgb and rgb_w arrays.
"""
# Transform input colors to cone responses
rgb = self._xyz_to_rgb(xyz)
logger.debug("RGB: {}".format(rgb))
rgb_b = self._xyz_to_rgb(self._xyz_b)
rgb_w = self._xyz_to_rgb(xyz_w)
rgb_w = Hunt.adjust_white_for_scc(rgb, rgb_b, rgb_w, self._p)
logger.debug("RGB_W: {}".format(rgb_w))
# Compute adapted tristimulus-responses
rgb_c = self._white_adaption(rgb, rgb_w, d)
logger.debug("RGB_C: {}".format(rgb_c))
rgb_cw = self._white_adaption(rgb_w, rgb_w, d)
logger.debug("RGB_CW: {}".format(rgb_cw))
# Convert adapted tristimulus-responses to Hunt-Pointer-Estevez fundamentals
rgb_p = self._compute_hunt_pointer_estevez_fundamentals(rgb_c)
logger.debug("RGB': {}".format(rgb_p))
rgb_wp = self._compute_hunt_pointer_estevez_fundamentals(rgb_cw)
logger.debug("RGB'_W: {}".format(rgb_wp))
# Compute post-adaptation non-linearities
rgb_ap = self._compute_nonlinearities(f_l, rgb_p)
rgb_awp = self._compute_nonlinearities(f_l, rgb_wp)
return rgb_ap, rgb_awp | 0.846768 | 0.510192 |
year = super(BuildableDayArchiveView, self).get_year()
month = super(BuildableDayArchiveView, self).get_month()
day = super(BuildableDayArchiveView, self).get_day()
fmt = self.get_day_format()
dt = date(int(year), int(month), int(day))
return dt.strftime(fmt) | def get_day(self) | Return the day from the database in the format expected by the URL. | 2.811006 | 2.513987 | 1.118147 | def get_day(self):
"""
Return the day from the database in the format expected by the URL.
"""
year = super(BuildableDayArchiveView, self).get_year()
month = super(BuildableDayArchiveView, self).get_month()
day = super(BuildableDayArchiveView, self).get_day()
fmt = self.get_day_format()
dt = date(int(year), int(month), int(day))
return dt.strftime(fmt) | 0.576244 | 0.501038 |
if isinstance(p, str):
p = string(p)
return regex(r'\s*') >> p << regex(r'\s*') | def lexeme(p) | From a parser (or string), make a parser that consumes
whitespace on either side. | 5.255884 | 4.095832 | 1.283227 | def lexeme(p):
"""
From a parser (or string), make a parser that consumes
whitespace on either side.
"""
if isinstance(p, str):
p = string(p)
return regex(r'\s*') >> p << regex(r'\s*') | 0.662524 | 0.660501 |
with open(schemafile) as f:
return cls(json.load(f)) | def from_schemafile(cls, schemafile) | Create a Flatson instance from a schemafile | 3.612632 | 3.453533 | 1.046069 | def from_schemafile(cls, schemafile):
"""
Create a Flatson instance from a schemafile
"""
with open(schemafile) as f:
return cls(json.load(f)) | 0.562537 | 0.519643 |
if self._conn.status == psycopg2.extensions.STATUS_BEGIN:
return self.READY
return self._conn.status | def _status(self) | Return the current connection status as an integer value.
The status should match one of the following constants:
- queries.Session.INTRANS: Connection established, in transaction
- queries.Session.PREPARED: Prepared for second phase of transaction
- queries.Session.READY: Connected, no active transaction
:rtype: int | 7.788153 | 6.305507 | 1.235135 | def _status(self):
"""
Return the current connection status as an integer value.
The status should match one of the following constants:
- queries.Session.INTRANS: Connection established, in transaction
- queries.Session.PREPARED: Prepared for second phase of transaction
- queries.Session.READY: Connected, no active transaction
:rtype: int
"""
if self._conn.status == psycopg2.extensions.STATUS_BEGIN:
return self.READY
return self._conn.status | 0.658253 | 0.533397 |
if not self.cursor.rowcount:
return []
self.cursor.scroll(0, 'absolute')
return self.cursor.fetchall() | def items(self) | Return all of the rows that are in the result set.
:rtype: list | 4.90019 | 4.43229 | 1.105566 | def items(self):
"""
Return all of the rows that are in the result set.
:rtype: list
"""
if not self.cursor.rowcount:
return []
self.cursor.scroll(0, 'absolute')
return self.cursor.fetchall() | 0.715735 | 0.512998 |
if self.term.is_a_tty:
return self.term.width // self.hint_width
return 1 | def num_columns(self) | Number of columns displayed. | 13.668602 | 8.479787 | 1.611904 | def num_columns(self):
"""
Number of columns displayed.
"""
if self.term.is_a_tty:
return self.term.width // self.hint_width
return 1 | 0.567817 | 0.664921 |
path_processor = ((lambda x : x) if path_prefix is None
else get_rp_stripper(path_prefix))
reports = []
for result in bads:
if len(result) == 3:
depended_lib, depending_lib, missing_archs = result
reports.append("{0} needs {1} {2} missing from {3}".format(
path_processor(depending_lib),
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depended_lib)))
elif len(result) == 2:
depending_lib, missing_archs = result
reports.append("Required {0} {1} missing from {2}".format(
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depending_lib)))
else:
raise ValueError('Report tuple should be length 2 or 3')
return '\n'.join(sorted(reports)) | def bads_report(bads, path_prefix=None) | Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing | 2.796139 | 2.235158 | 1.25098 | def bads_report(bads, path_prefix=None):
"""
Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing
"""
path_processor = ((lambda x : x) if path_prefix is None
else get_rp_stripper(path_prefix))
reports = []
for result in bads:
if len(result) == 3:
depended_lib, depending_lib, missing_archs = result
reports.append("{0} needs {1} {2} missing from {3}".format(
path_processor(depending_lib),
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depended_lib)))
elif len(result) == 2:
depending_lib, missing_archs = result
reports.append("Required {0} {1} missing from {2}".format(
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depending_lib)))
else:
raise ValueError('Report tuple should be length 2 or 3')
return '\n'.join(sorted(reports)) | 0.813683 | 0.522994 |
if not lib_path.startswith('@rpath/'):
return lib_path
lib_rpath = lib_path.split('/', 1)[1]
for rpath in rpaths:
rpath_lib = realpath(pjoin(rpath, lib_rpath))
if os.path.exists(rpath_lib):
return rpath_lib
warnings.warn(
"Couldn't find {0} on paths:\n\t{1}".format(
lib_path,
'\n\t'.join(realpath(path) for path in rpaths),
)
)
return lib_path | def resolve_rpath(lib_path, rpaths) | Return `lib_path` with its `@rpath` resolved
If the `lib_path` doesn't have `@rpath` then it's returned as is.
If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path`
combination found. If the library can't be found in `rpaths` then a
detailed warning is printed and `lib_path` is returned as is.
Parameters
----------
lib_path : str
The path to a library file, which may or may not start with `@rpath`.
rpaths : sequence of str
A sequence of search paths, usually gotten from a call to `get_rpaths`.
Returns
-------
lib_path : str
A str with the resolved libraries realpath. | 2.451395 | 2.355479 | 1.04072 | def resolve_rpath(lib_path, rpaths):
"""
Return `lib_path` with its `@rpath` resolved
If the `lib_path` doesn't have `@rpath` then it's returned as is.
If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path`
combination found. If the library can't be found in `rpaths` then a
detailed warning is printed and `lib_path` is returned as is.
Parameters
----------
lib_path : str
The path to a library file, which may or may not start with `@rpath`.
rpaths : sequence of str
A sequence of search paths, usually gotten from a call to `get_rpaths`.
Returns
-------
lib_path : str
A str with the resolved libraries realpath.
"""
if not lib_path.startswith('@rpath/'):
return lib_path
lib_rpath = lib_path.split('/', 1)[1]
for rpath in rpaths:
rpath_lib = realpath(pjoin(rpath, lib_rpath))
if os.path.exists(rpath_lib):
return rpath_lib
warnings.warn(
"Couldn't find {0} on paths:\n\t{1}".format(
lib_path,
'\n\t'.join(realpath(path) for path in rpaths),
)
)
return lib_path | 0.80354 | 0.54056 |
N = community.shape[0]
C = community.shape[1]
T = P = np.zeros([N, N])
for t in range(len(community[0, :])):
for i in range(len(community[:, 0])):
for j in range(len(community[:, 0])):
if i == j:
continue
# T_ij indicates the number of times that i and j are assigned to the same community across time
if community[i][t] == community[j][t]:
T[i, j] += 1
# module allegiance matrix, probability that ij were assigned to the same community
P = (1/C)*T
return P | def allegiance(community) | Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1 | 4.341524 | 3.232135 | 1.343237 | def allegiance(community):
"""
Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1
"""
N = community.shape[0]
C = community.shape[1]
T = P = np.zeros([N, N])
for t in range(len(community[0, :])):
for i in range(len(community[:, 0])):
for j in range(len(community[:, 0])):
if i == j:
continue
# T_ij indicates the number of times that i and j are assigned to the same community across time
if community[i][t] == community[j][t]:
T[i, j] += 1
# module allegiance matrix, probability that ij were assigned to the same community
P = (1/C)*T
return P | 0.789518 | 0.815857 |
if isinstance(ncontacts, list):
if len(ncontacts) != nnodes:
raise ValueError(
'Number of contacts, if a list, should be one per node')
if isinstance(lam, list):
if len(lam) != nnodes:
raise ValueError(
'Lambda value of Poisson distribution, if a list, should be one per node')
if isinstance(lam, list) and not isinstance(ncontacts, list) or not isinstance(lam, list) and isinstance(ncontacts, list):
raise ValueError(
'When one of lambda or ncontacts is given as a list, the other argument must also be a list.')
if nettype == 'bu':
edgen = int((nnodes*(nnodes-1))/2)
elif nettype == 'bd':
edgen = int(nnodes*nnodes)
if not isinstance(lam, list) and not isinstance(ncontacts, list):
icts = np.random.poisson(lam, size=(edgen, ncontacts))
net = np.zeros([edgen, icts.sum(axis=1).max()+1])
for n in range(edgen):
net[n, np.unique(np.cumsum(icts[n]))] = 1
else:
icts = []
ict_max = 0
for n in range(edgen):
icts.append(np.random.poisson(lam[n], size=ncontacts[n]))
if sum(icts[-1]) > ict_max:
ict_max = sum(icts[-1])
net = np.zeros([nnodes, ict_max+1])
for n in range(nnodes):
net[n, np.unique(np.cumsum(icts[n]))] = 1
if nettype == 'bu':
nettmp = np.zeros([nnodes, nnodes, net.shape[-1]])
ind = np.triu_indices(nnodes, k=1)
nettmp[ind[0], ind[1], :] = net
net = nettmp + nettmp.transpose([1, 0, 2])
elif nettype == 'bd':
net = net.reshape([nnodes, nnodes, net.shape[-1]], order='F')
net = set_diagonal(net, 0)
if netrep == 'contact':
if not netinfo:
netinfo = {}
netinfo['nettype'] = 'b' + nettype[-1]
net = graphlet2contact(net, netinfo)
return net | def rand_poisson(nnodes, ncontacts, lam=1, nettype='bu', netinfo=None, netrep='graphlet') | Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed. | 2.343398 | 2.287699 | 1.024347 | def rand_poisson(nnodes, ncontacts, lam=1, nettype='bu', netinfo=None, netrep='graphlet'):
"""
Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed.
"""
if isinstance(ncontacts, list):
if len(ncontacts) != nnodes:
raise ValueError(
'Number of contacts, if a list, should be one per node')
if isinstance(lam, list):
if len(lam) != nnodes:
raise ValueError(
'Lambda value of Poisson distribution, if a list, should be one per node')
if isinstance(lam, list) and not isinstance(ncontacts, list) or not isinstance(lam, list) and isinstance(ncontacts, list):
raise ValueError(
'When one of lambda or ncontacts is given as a list, the other argument must also be a list.')
if nettype == 'bu':
edgen = int((nnodes*(nnodes-1))/2)
elif nettype == 'bd':
edgen = int(nnodes*nnodes)
if not isinstance(lam, list) and not isinstance(ncontacts, list):
icts = np.random.poisson(lam, size=(edgen, ncontacts))
net = np.zeros([edgen, icts.sum(axis=1).max()+1])
for n in range(edgen):
net[n, np.unique(np.cumsum(icts[n]))] = 1
else:
icts = []
ict_max = 0
for n in range(edgen):
icts.append(np.random.poisson(lam[n], size=ncontacts[n]))
if sum(icts[-1]) > ict_max:
ict_max = sum(icts[-1])
net = np.zeros([nnodes, ict_max+1])
for n in range(nnodes):
net[n, np.unique(np.cumsum(icts[n]))] = 1
if nettype == 'bu':
nettmp = np.zeros([nnodes, nnodes, net.shape[-1]])
ind = np.triu_indices(nnodes, k=1)
nettmp[ind[0], ind[1], :] = net
net = nettmp + nettmp.transpose([1, 0, 2])
elif nettype == 'bd':
net = net.reshape([nnodes, nnodes, net.shape[-1]], order='F')
net = set_diagonal(net, 0)
if netrep == 'contact':
if not netinfo:
netinfo = {}
netinfo['nettype'] = 'b' + nettype[-1]
net = graphlet2contact(net, netinfo)
return net | 0.770422 | 0.568296 |
if not report:
report = {}
# Note the min value of all time series will now be at least 1.
mindata = 1 - np.nanmin(data)
data = data + mindata
ind = np.triu_indices(data.shape[0], k=1)
boxcox_list = np.array([sp.stats.boxcox(np.squeeze(
data[ind[0][n], ind[1][n], :])) for n in range(0, len(ind[0]))])
boxcox_data = np.zeros(data.shape)
boxcox_data[ind[0], ind[1], :] = np.vstack(boxcox_list[:, 0])
boxcox_data[ind[1], ind[0], :] = np.vstack(boxcox_list[:, 0])
bccheck = np.array(np.transpose(boxcox_data, [2, 0, 1]))
bccheck = (bccheck - bccheck.mean(axis=0)) / bccheck.std(axis=0)
bccheck = np.squeeze(np.mean(bccheck, axis=0))
np.fill_diagonal(bccheck, 0)
report['boxcox'] = {}
report['boxcox']['performed'] = 'yes'
report['boxcox']['lambda'] = [
tuple([ind[0][n], ind[1][n], boxcox_list[n, -1]]) for n in range(0, len(ind[0]))]
report['boxcox']['shift'] = mindata
report['boxcox']['shited_to'] = 1
if np.sum(np.isnan(bccheck)) > 0:
report['boxcox'] = {}
report['boxcox']['performed'] = 'FAILED'
report['boxcox']['failure_reason'] = (
'Box cox transform is returning edges with uniform values through time. '
'This is probabaly due to one or more outliers or a very skewed distribution. '
'Have you corrected for sources of noise (e.g. movement)? '
'If yes, some time-series might need additional transforms to approximate to Gaussian.'
)
report['boxcox']['failure_consequence'] = (
'Box cox transform was skipped from the postprocess pipeline.'
)
boxcox_data = data - mindata
error_msg = ('TENETO WARNING: Box Cox transform problem. \n'
'Box Cox transform not performed. \n'
'See report for more details.')
print(error_msg)
return boxcox_data, report | def postpro_boxcox(data, report=None) | Performs box cox transform on everything in data.
If report variable is passed, this is added to the report. | 3.859484 | 3.796534 | 1.016581 | def postpro_boxcox(data, report=None):
"""
Performs box cox transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Note the min value of all time series will now be at least 1.
mindata = 1 - np.nanmin(data)
data = data + mindata
ind = np.triu_indices(data.shape[0], k=1)
boxcox_list = np.array([sp.stats.boxcox(np.squeeze(
data[ind[0][n], ind[1][n], :])) for n in range(0, len(ind[0]))])
boxcox_data = np.zeros(data.shape)
boxcox_data[ind[0], ind[1], :] = np.vstack(boxcox_list[:, 0])
boxcox_data[ind[1], ind[0], :] = np.vstack(boxcox_list[:, 0])
bccheck = np.array(np.transpose(boxcox_data, [2, 0, 1]))
bccheck = (bccheck - bccheck.mean(axis=0)) / bccheck.std(axis=0)
bccheck = np.squeeze(np.mean(bccheck, axis=0))
np.fill_diagonal(bccheck, 0)
report['boxcox'] = {}
report['boxcox']['performed'] = 'yes'
report['boxcox']['lambda'] = [
tuple([ind[0][n], ind[1][n], boxcox_list[n, -1]]) for n in range(0, len(ind[0]))]
report['boxcox']['shift'] = mindata
report['boxcox']['shited_to'] = 1
if np.sum(np.isnan(bccheck)) > 0:
report['boxcox'] = {}
report['boxcox']['performed'] = 'FAILED'
report['boxcox']['failure_reason'] = (
'Box cox transform is returning edges with uniform values through time. '
'This is probabaly due to one or more outliers or a very skewed distribution. '
'Have you corrected for sources of noise (e.g. movement)? '
'If yes, some time-series might need additional transforms to approximate to Gaussian.'
)
report['boxcox']['failure_consequence'] = (
'Box cox transform was skipped from the postprocess pipeline.'
)
boxcox_data = data - mindata
error_msg = ('TENETO WARNING: Box Cox transform problem. \n'
'Box Cox transform not performed. \n'
'See report for more details.')
print(error_msg)
return boxcox_data, report | 0.578389 | 0.546073 |
# Data should be timexnode
report = {}
# Derivative
tdat = data[1:, :] - data[:-1, :]
# Normalize
tdat = tdat / np.std(tdat, axis=0)
# Coupling
coupling = np.array([tdat[:, i] * tdat[:, j] for i in np.arange(0,
tdat.shape[1]) for j in np.arange(0, tdat.shape[1])])
coupling = np.reshape(
coupling, [tdat.shape[1], tdat.shape[1], tdat.shape[0]])
# Average over window using strides
shape = coupling.shape[:-1] + (coupling.shape[-1] -
params['windowsize'] + 1, params['windowsize'])
strides = coupling.strides + (coupling.strides[-1],)
coupling_windowed = np.mean(np.lib.stride_tricks.as_strided(
coupling, shape=shape, strides=strides), -1)
report = {}
report['method'] = 'temporalderivative'
report['temporalderivative'] = {}
report['temporalderivative']['windowsize'] = params['windowsize']
return coupling_windowed, report | def _temporal_derivative(data, params, report) | Performs mtd method. See func: teneto.derive.derive. | 3.043904 | 3.043396 | 1.000167 | def _temporal_derivative(data, params, report):
"""
Performs mtd method. See func: teneto.derive.derive.
"""
# Data should be timexnode
report = {}
# Derivative
tdat = data[1:, :] - data[:-1, :]
# Normalize
tdat = tdat / np.std(tdat, axis=0)
# Coupling
coupling = np.array([tdat[:, i] * tdat[:, j] for i in np.arange(0,
tdat.shape[1]) for j in np.arange(0, tdat.shape[1])])
coupling = np.reshape(
coupling, [tdat.shape[1], tdat.shape[1], tdat.shape[0]])
# Average over window using strides
shape = coupling.shape[:-1] + (coupling.shape[-1] -
params['windowsize'] + 1, params['windowsize'])
strides = coupling.strides + (coupling.strides[-1],)
coupling_windowed = np.mean(np.lib.stride_tricks.as_strided(
coupling, shape=shape, strides=strides), -1)
report = {}
report['method'] = 'temporalderivative'
report['temporalderivative'] = {}
report['temporalderivative']['windowsize'] = params['windowsize']
return coupling_windowed, report | 0.729496 | 0.605391 |
if threshold_type == 'percent':
netout = binarize_percent(netin, threshold_level, sign, axis)
elif threshold_type == 'magnitude':
netout = binarize_magnitude(netin, threshold_level, sign)
elif threshold_type == 'rdp':
netout = binarize_rdp(netin, threshold_level, sign, axis)
else:
raise ValueError('Unknown value to parameter: threshold_type.')
return netout | def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time') | Binarizes a network, returning the network. General wrapper function for different binarization functions.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
threshold_type : str
What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'.
threshold_level : str
Paramter dependent on threshold type.
If 'rdp', it is the delta (i.e. error allowed in compression).
If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal).
If 'magnitude', it is the amplitude of signal to keep.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str
Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet.
Returns
-------
netout : array or dict (depending on input)
Binarized network | 2.25078 | 2.055175 | 1.095177 | def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time'):
"""
Binarizes a network, returning the network. General wrapper function for different binarization functions.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
threshold_type : str
What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'.
threshold_level : str
Paramter dependent on threshold type.
If 'rdp', it is the delta (i.e. error allowed in compression).
If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal).
If 'magnitude', it is the amplitude of signal to keep.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str
Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet.
Returns
-------
netout : array or dict (depending on input)
Binarized network
"""
if threshold_type == 'percent':
netout = binarize_percent(netin, threshold_level, sign, axis)
elif threshold_type == 'magnitude':
netout = binarize_magnitude(netin, threshold_level, sign)
elif threshold_type == 'rdp':
netout = binarize_rdp(netin, threshold_level, sign, axis)
else:
raise ValueError('Unknown value to parameter: threshold_type.')
return netout | 0.881385 | 0.869604 |
inputtype = checkInput(netIn)
# Convert TN to G representation
if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN':
G = netIn.df_to_array()
netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape}
elif inputtype == 'TN' and 'TN' in allowedformats and outputformat == 'TN':
TN = netIn
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'G':
G = contact2graphlet(netIn)
netInfo = dict(netIn)
netInfo.pop('contacts')
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_dict=netIn)
elif inputtype == 'G' and 'G' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_array=netIn)
# Get network type if not set yet
elif inputtype == 'G' and 'G' in allowedformats:
netInfo = {}
netInfo['netshape'] = netIn.shape
netInfo['nettype'] = gen_nettype(netIn)
G = netIn
elif inputtype == 'C' and outputformat == 'C':
pass
else:
raise ValueError('Input invalid.')
if outputformat == 'TN' and not isinstance(TN.network, str):
TN.network['i'] = TN.network['i'].astype(int)
TN.network['j'] = TN.network['j'].astype(int)
TN.network['t'] = TN.network['t'].astype(int)
if outputformat == 'C' or outputformat == 'G':
netInfo['inputtype'] = inputtype
if inputtype != 'C' and outputformat == 'C':
C = graphlet2contact(G, netInfo)
if outputformat == 'G':
return G, netInfo
elif outputformat == 'C':
return C
elif outputformat == 'TN':
return TN | def process_input(netIn, allowedformats, outputformat='G') | Takes input network and checks what the input is.
Parameters
----------
netIn : array, dict, or TemporalNetwork
Network (graphlet, contact or object)
allowedformats : str
Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.
outputformat: str, default=G
Target output format. Options: 'C' or 'G'.
Returns
-------
C : dict
OR
G : array
Graphlet representation.
netInfo : dict
Metainformation about network.
OR
tnet : object
object of TemporalNetwork class | 2.646709 | 2.436064 | 1.086469 | def process_input(netIn, allowedformats, outputformat='G'):
"""
Takes input network and checks what the input is.
Parameters
----------
netIn : array, dict, or TemporalNetwork
Network (graphlet, contact or object)
allowedformats : str
Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.
outputformat: str, default=G
Target output format. Options: 'C' or 'G'.
Returns
-------
C : dict
OR
G : array
Graphlet representation.
netInfo : dict
Metainformation about network.
OR
tnet : object
object of TemporalNetwork class
"""
inputtype = checkInput(netIn)
# Convert TN to G representation
if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN':
G = netIn.df_to_array()
netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape}
elif inputtype == 'TN' and 'TN' in allowedformats and outputformat == 'TN':
TN = netIn
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'G':
G = contact2graphlet(netIn)
netInfo = dict(netIn)
netInfo.pop('contacts')
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_dict=netIn)
elif inputtype == 'G' and 'G' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_array=netIn)
# Get network type if not set yet
elif inputtype == 'G' and 'G' in allowedformats:
netInfo = {}
netInfo['netshape'] = netIn.shape
netInfo['nettype'] = gen_nettype(netIn)
G = netIn
elif inputtype == 'C' and outputformat == 'C':
pass
else:
raise ValueError('Input invalid.')
if outputformat == 'TN' and not isinstance(TN.network, str):
TN.network['i'] = TN.network['i'].astype(int)
TN.network['j'] = TN.network['j'].astype(int)
TN.network['t'] = TN.network['t'].astype(int)
if outputformat == 'C' or outputformat == 'G':
netInfo['inputtype'] = inputtype
if inputtype != 'C' and outputformat == 'C':
C = graphlet2contact(G, netInfo)
if outputformat == 'G':
return G, netInfo
elif outputformat == 'C':
return C
elif outputformat == 'TN':
return TN | 0.740737 | 0.682097 |
d = collections.OrderedDict()
for c in C['contacts']:
ct = tuple(c)
if ct in d:
d[ct] += 1
else:
d[ct] = 1
new_contacts = []
new_values = []
for (key, value) in d.items():
new_values.append(value)
new_contacts.append(key)
C_out = C
C_out['contacts'] = new_contacts
C_out['values'] = new_values
return C_out | def multiple_contacts_get_values(C) | Given an contact representation with repeated contacts, this function removes duplicates and creates a value
Parameters
----------
C : dict
contact representation with multiple repeated contacts.
Returns
-------
:C_out: dict
Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field. | 2.388001 | 2.190151 | 1.090336 | def multiple_contacts_get_values(C):
"""
Given an contact representation with repeated contacts, this function removes duplicates and creates a value
Parameters
----------
C : dict
contact representation with multiple repeated contacts.
Returns
-------
:C_out: dict
Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field.
"""
d = collections.OrderedDict()
for c in C['contacts']:
ct = tuple(c)
if ct in d:
d[ct] += 1
else:
d[ct] = 1
new_contacts = []
new_values = []
for (key, value) in d.items():
new_values.append(value)
new_contacts.append(key)
C_out = C
C_out['contacts'] = new_contacts
C_out['values'] = new_values
return C_out | 0.820343 | 0.676834 |
if len(df) > 0:
idx = np.array(list(map(list, df.values)))
G = np.zeros([netshape[0], netshape[0], netshape[1]])
if idx.shape[1] == 3:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2]]])
idx = idx.astype(int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = 1
elif idx.shape[1] == 4:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2, 3]]])
weights = idx[:, 3]
idx = np.array(idx[:, :3], dtype=int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = weights
else:
G = np.zeros([netshape[0], netshape[0], netshape[1]])
return G | def df_to_array(df, netshape, nettype) | Returns a numpy array (snapshot representation) from thedataframe contact list
Parameters:
df : pandas df
pandas df with columns, i,j,t.
netshape : tuple
network shape, format: (node, time)
nettype : str
'wu', 'wd', 'bu', 'bd'
Returns:
--------
G : array
(node,node,time) array for the network | 1.946345 | 1.890199 | 1.029704 | def df_to_array(df, netshape, nettype):
"""
Returns a numpy array (snapshot representation) from thedataframe contact list
Parameters:
df : pandas df
pandas df with columns, i,j,t.
netshape : tuple
network shape, format: (node, time)
nettype : str
'wu', 'wd', 'bu', 'bd'
Returns:
--------
G : array
(node,node,time) array for the network
"""
if len(df) > 0:
idx = np.array(list(map(list, df.values)))
G = np.zeros([netshape[0], netshape[0], netshape[1]])
if idx.shape[1] == 3:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2]]])
idx = idx.astype(int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = 1
elif idx.shape[1] == 4:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2, 3]]])
weights = idx[:, 3]
idx = np.array(idx[:, :3], dtype=int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = weights
else:
G = np.zeros([netshape[0], netshape[0], netshape[1]])
return G | 0.704592 | 0.628835 |
if distance_func_name == 'default' and netinfo['nettype'][0] == 'b':
print('Default distance funciton specified. As network is binary, using Hamming')
distance_func_name = 'hamming'
elif distance_func_name == 'default' and netinfo['nettype'][0] == 'w':
distance_func_name = 'euclidean'
print(
'Default distance funciton specified. '
'As network is weighted, using Euclidean')
return distance_func_name | def check_distance_funciton_input(distance_func_name, netinfo) | Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function.
Parameters
----------
distance_func_name : str
distance function name.
netinfo : dict
the output of utils.process_input
Returns
-------
distance_func_name : str
distance function name. | 3.215411 | 3.008744 | 1.068689 | def check_distance_funciton_input(distance_func_name, netinfo):
"""
Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function.
Parameters
----------
distance_func_name : str
distance function name.
netinfo : dict
the output of utils.process_input
Returns
-------
distance_func_name : str
distance function name.
"""
if distance_func_name == 'default' and netinfo['nettype'][0] == 'b':
print('Default distance funciton specified. As network is binary, using Hamming')
distance_func_name = 'hamming'
elif distance_func_name == 'default' and netinfo['nettype'][0] == 'w':
distance_func_name = 'euclidean'
print(
'Default distance funciton specified. '
'As network is weighted, using Euclidean')
return distance_func_name | 0.871229 | 0.568895 |
if isinstance(parcellation, str):
parcin = ''
if '+' in parcellation:
parcin = parcellation
parcellation = parcellation.split('+')[0]
if '+OH' in parcin:
subcortical = True
else:
subcortical = None
if '+SUIT' in parcin:
cerebellar = True
else:
cerebellar = None
if not parc_type or not parc_params:
path = tenetopath[0] + '/data/parcellation_defaults/defaults.json'
with open(path) as data_file:
defaults = json.load(data_file)
if not parc_type:
parc_type = defaults[parcellation]['type']
print('Using default parcellation type')
if not parc_params:
parc_params = defaults[parcellation]['params']
print('Using default parameters')
if parc_type == 'sphere':
parcellation = load_parcellation_coords(parcellation)
seed = NiftiSpheresMasker(np.array(parcellation), **parc_params)
data = seed.fit_transform(data_path)
elif parc_type == 'region':
path = tenetopath[0] + '/data/parcellation/' + parcellation + '.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data = region.fit_transform(data_path)
else:
raise ValueError('Unknown parc_type specified')
if subcortical:
subatlas = fetch_atlas_harvard_oxford('sub-maxprob-thr0-2mm')['maps']
region = NiftiLabelsMasker(subatlas, **parc_params)
data_sub = region.fit_transform(data_path)
data = np.hstack([data, data_sub])
if cerebellar:
path = tenetopath[0] + '/data/parcellation/Cerebellum-SUIT_space-MNI152NLin2009cAsym.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data_cerebellar = region.fit_transform(data_path)
data = np.hstack([data, data_cerebellar])
return data | def make_parcellation(data_path, parcellation, parc_type=None, parc_params=None) | Performs a parcellation which reduces voxel space to regions of interest (brain data).
Parameters
----------
data_path : str
Path to .nii image.
parcellation : str
Specify which parcellation that you would like to use. For MNI: 'gordon2014_333', 'power2012_264', For TAL: 'shen2013_278'.
It is possible to add the OH subcotical atlas on top of a cortical atlas (e.g. gordon) by adding:
'+OH' (for oxford harvard subcortical atlas) and '+SUIT' for SUIT cerebellar atlas.
e.g.: gordon2014_333+OH+SUIT'
parc_type : str
Can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
Returns
-------
data : array
Data after the parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton. | 2.531412 | 2.223121 | 1.138675 | def make_parcellation(data_path, parcellation, parc_type=None, parc_params=None):
"""
Performs a parcellation which reduces voxel space to regions of interest (brain data).
Parameters
----------
data_path : str
Path to .nii image.
parcellation : str
Specify which parcellation that you would like to use. For MNI: 'gordon2014_333', 'power2012_264', For TAL: 'shen2013_278'.
It is possible to add the OH subcotical atlas on top of a cortical atlas (e.g. gordon) by adding:
'+OH' (for oxford harvard subcortical atlas) and '+SUIT' for SUIT cerebellar atlas.
e.g.: gordon2014_333+OH+SUIT'
parc_type : str
Can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
Returns
-------
data : array
Data after the parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton.
"""
if isinstance(parcellation, str):
parcin = ''
if '+' in parcellation:
parcin = parcellation
parcellation = parcellation.split('+')[0]
if '+OH' in parcin:
subcortical = True
else:
subcortical = None
if '+SUIT' in parcin:
cerebellar = True
else:
cerebellar = None
if not parc_type or not parc_params:
path = tenetopath[0] + '/data/parcellation_defaults/defaults.json'
with open(path) as data_file:
defaults = json.load(data_file)
if not parc_type:
parc_type = defaults[parcellation]['type']
print('Using default parcellation type')
if not parc_params:
parc_params = defaults[parcellation]['params']
print('Using default parameters')
if parc_type == 'sphere':
parcellation = load_parcellation_coords(parcellation)
seed = NiftiSpheresMasker(np.array(parcellation), **parc_params)
data = seed.fit_transform(data_path)
elif parc_type == 'region':
path = tenetopath[0] + '/data/parcellation/' + parcellation + '.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data = region.fit_transform(data_path)
else:
raise ValueError('Unknown parc_type specified')
if subcortical:
subatlas = fetch_atlas_harvard_oxford('sub-maxprob-thr0-2mm')['maps']
region = NiftiLabelsMasker(subatlas, **parc_params)
data_sub = region.fit_transform(data_path)
data = np.hstack([data, data_sub])
if cerebellar:
path = tenetopath[0] + '/data/parcellation/Cerebellum-SUIT_space-MNI152NLin2009cAsym.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data_cerebellar = region.fit_transform(data_path)
data = np.hstack([data, data_cerebellar])
return data | 0.750238 | 0.540318 |
steps = (1.0/(N-1)) * (stop - start)
if np.isscalar(steps):
return steps*np.arange(N) + start
else:
return steps[:, None]*np.arange(N) + start[:, None] | def create_traj_ranges(start, stop, N) | Fills in the trajectory range.
# Adapted from https://stackoverflow.com/a/40624614 | 3.039685 | 2.983643 | 1.018783 | def create_traj_ranges(start, stop, N):
"""
Fills in the trajectory range.
# Adapted from https://stackoverflow.com/a/40624614
"""
steps = (1.0/(N-1)) * (stop - start)
if np.isscalar(steps):
return steps*np.arange(N) + start
else:
return steps[:, None]*np.arange(N) + start[:, None] | 0.559892 | 0.509642 |
newnetwork = tnet.network.copy()
newnetwork['i'] = (tnet.network['i']) + \
((tnet.netshape[0]) * (tnet.network['t']))
newnetwork['j'] = (tnet.network['j']) + \
((tnet.netshape[0]) * (tnet.network['t']))
if 'weight' not in newnetwork.columns:
newnetwork['weight'] = 1
newnetwork.drop('t', axis=1, inplace=True)
timepointconns = pd.DataFrame()
timepointconns['i'] = np.arange(0, (tnet.N*tnet.T)-tnet.N)
timepointconns['j'] = np.arange(tnet.N, (tnet.N*tnet.T))
timepointconns['weight'] = intersliceweight
supranet = pd.concat([newnetwork, timepointconns]).reset_index(drop=True)
return supranet | def create_supraadjacency_matrix(tnet, intersliceweight=1) | Returns a supraadjacency matrix from a temporal network structure
Parameters
--------
tnet : TemporalNetwork
Temporal network (any network type)
intersliceweight : int
Weight that links the same node from adjacent time-points
Returns
--------
supranet : dataframe
Supraadjacency matrix | 2.527046 | 2.362498 | 1.06965 | def create_supraadjacency_matrix(tnet, intersliceweight=1):
"""
Returns a supraadjacency matrix from a temporal network structure
Parameters
--------
tnet : TemporalNetwork
Temporal network (any network type)
intersliceweight : int
Weight that links the same node from adjacent time-points
Returns
--------
supranet : dataframe
Supraadjacency matrix
"""
newnetwork = tnet.network.copy()
newnetwork['i'] = (tnet.network['i']) + \
((tnet.netshape[0]) * (tnet.network['t']))
newnetwork['j'] = (tnet.network['j']) + \
((tnet.netshape[0]) * (tnet.network['t']))
if 'weight' not in newnetwork.columns:
newnetwork['weight'] = 1
newnetwork.drop('t', axis=1, inplace=True)
timepointconns = pd.DataFrame()
timepointconns['i'] = np.arange(0, (tnet.N*tnet.T)-tnet.N)
timepointconns['j'] = np.arange(tnet.N, (tnet.N*tnet.T))
timepointconns['weight'] = intersliceweight
supranet = pd.concat([newnetwork, timepointconns]).reset_index(drop=True)
return supranet | 0.846308 | 0.652158 |
r
com_membership = np.array(com_membership)
D = []
for i in range(com_membership.shape[0]):
for j in range(i+1, com_membership.shape[0]):
con = np.sum((com_membership[i, :] - com_membership[j, :])
== 0, axis=-1) / com_membership.shape[-1]
twhere = np.where(con > th)[0]
D += list(zip(*[np.repeat(i, len(twhere)).tolist(), np.repeat(j,
len(twhere)).tolist(), twhere.tolist(), con[twhere].tolist()]))
if len(D) > 0:
D = pd.DataFrame(D, columns=['i', 'j', 't', 'weight'])
D = TemporalNetwork(from_df=D)
D = create_supraadjacency_matrix(D, intersliceweight=0)
Dnx = tnet_to_nx(D)
else:
Dnx = None
return Dnx | def make_consensus_matrix(com_membership, th=0.5) | r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix | 3.630805 | 3.608872 | 1.006077 | def make_consensus_matrix(com_membership, th=0.5):
"""
r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix
"""
r
com_membership = np.array(com_membership)
D = []
for i in range(com_membership.shape[0]):
for j in range(i+1, com_membership.shape[0]):
con = np.sum((com_membership[i, :] - com_membership[j, :])
== 0, axis=-1) / com_membership.shape[-1]
twhere = np.where(con > th)[0]
D += list(zip(*[np.repeat(i, len(twhere)).tolist(), np.repeat(j,
len(twhere)).tolist(), twhere.tolist(), con[twhere].tolist()]))
if len(D) > 0:
D = pd.DataFrame(D, columns=['i', 'j', 't', 'weight'])
D = TemporalNetwork(from_df=D)
D = create_supraadjacency_matrix(D, intersliceweight=0)
Dnx = tnet_to_nx(D)
else:
Dnx = None
return Dnx | 0.793316 | 0.604428 |
r
com_membership = np.array(com_membership)
# make first indicies be between 0 and 1.
com_membership[:, 0] = clean_community_indexes(com_membership[:, 0])
# loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before
for t in range(1, com_membership.shape[1]):
ct, counts_t = np.unique(com_membership[:, t], return_counts=True)
ct = ct[np.argsort(counts_t)[::-1]]
c1back = np.unique(com_membership[:, t-1])
new_index = np.zeros(com_membership.shape[0])
for n in ct:
if len(c1back) > 0:
d = np.ones(int(c1back.max())+1)
for m in c1back:
v1 = np.zeros(com_membership.shape[0])
v2 = np.zeros(com_membership.shape[0])
v1[com_membership[:, t] == n] = 1
v2[com_membership[:, t-1] == m] = 1
d[int(m)] = jaccard(v1, v2)
bestval = np.argmin(d)
else:
bestval = new_index.max() + 1
new_index[com_membership[:, t] == n] = bestval
c1back = np.array(np.delete(c1back, np.where(c1back == bestval)))
com_membership[:, t] = new_index
return com_membership | def make_temporal_consensus(com_membership) | r"""
Matches community labels accross time-points
Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1.
Parameters
----------
com_membership : array
Shape should be node, time.
Returns
-------
D : array
temporal consensus matrix using Jaccard distance | 3.279249 | 3.015183 | 1.087579 | def make_temporal_consensus(com_membership):
"""
r"""
Matches community labels accross time-points
Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1.
Parameters
----------
com_membership : array
Shape should be node, time.
Returns
-------
D : array
temporal consensus matrix using Jaccard distance
"""
r
com_membership = np.array(com_membership)
# make first indicies be between 0 and 1.
com_membership[:, 0] = clean_community_indexes(com_membership[:, 0])
# loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before
for t in range(1, com_membership.shape[1]):
ct, counts_t = np.unique(com_membership[:, t], return_counts=True)
ct = ct[np.argsort(counts_t)[::-1]]
c1back = np.unique(com_membership[:, t-1])
new_index = np.zeros(com_membership.shape[0])
for n in ct:
if len(c1back) > 0:
d = np.ones(int(c1back.max())+1)
for m in c1back:
v1 = np.zeros(com_membership.shape[0])
v2 = np.zeros(com_membership.shape[0])
v1[com_membership[:, t] == n] = 1
v2[com_membership[:, t-1] == m] = 1
d[int(m)] = jaccard(v1, v2)
bestval = np.argmin(d)
else:
bestval = new_index.max() + 1
new_index[com_membership[:, t] == n] = bestval
c1back = np.array(np.delete(c1back, np.where(c1back == bestval)))
com_membership[:, t] = new_index
return com_membership | 0.821778 | 0.665954 |
# Preallocate
flex = np.zeros(communities.shape[0])
# Go from the second time point to last, compare with time-point before
for t in range(1, communities.shape[1]):
flex[communities[:, t] != communities[:, t-1]] += 1
# Normalize
flex = flex / (communities.shape[1] - 1)
return flex | def flexibility(communities) | Amount a node changes community
Parameters
----------
communities : array
Community array of shape (node,time)
Returns
--------
flex : array
Size with the flexibility of each node.
Notes
-----
Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary.
References
-----------
Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6. | 4.733977 | 3.730659 | 1.268938 | def flexibility(communities):
"""
Amount a node changes community
Parameters
----------
communities : array
Community array of shape (node,time)
Returns
--------
flex : array
Size with the flexibility of each node.
Notes
-----
Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary.
References
-----------
Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6.
"""
# Preallocate
flex = np.zeros(communities.shape[0])
# Go from the second time point to last, compare with time-point before
for t in range(1, communities.shape[1]):
flex[communities[:, t] != communities[:, t-1]] += 1
# Normalize
flex = flex / (communities.shape[1] - 1)
return flex | 0.837437 | 0.808729 |
relfun = []
threshold = []
for ec in exclusion_criteria:
if ec[0:2] == '>=':
relfun.append(np.greater_equal)
threshold.append(float(ec[2:]))
elif ec[0:2] == '<=':
relfun.append(np.less_equal)
threshold.append(float(ec[2:]))
elif ec[0] == '>':
relfun.append(np.greater)
threshold.append(float(ec[1:]))
elif ec[0] == '<':
relfun.append(np.less)
threshold.append(float(ec[1:]))
else:
raise ValueError('exclusion crieria must being with >,<,>= or <=')
return relfun, threshold | def process_exclusion_criteria(exclusion_criteria) | Parses an exclusion critera string to get the function and threshold.
Parameters
----------
exclusion_criteria : list
list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\'
Returns
-------
relfun : list
list of numpy functions for the exclusion criteria
threshold : list
list of floats for threshold for each relfun | 2.13075 | 1.827252 | 1.166095 | def process_exclusion_criteria(exclusion_criteria):
"""
Parses an exclusion critera string to get the function and threshold.
Parameters
----------
exclusion_criteria : list
list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\'
Returns
-------
relfun : list
list of numpy functions for the exclusion criteria
threshold : list
list of floats for threshold for each relfun
"""
relfun = []
threshold = []
for ec in exclusion_criteria:
if ec[0:2] == '>=':
relfun.append(np.greater_equal)
threshold.append(float(ec[2:]))
elif ec[0:2] == '<=':
relfun.append(np.less_equal)
threshold.append(float(ec[2:]))
elif ec[0] == '>':
relfun.append(np.greater)
threshold.append(float(ec[1:]))
elif ec[0] == '<':
relfun.append(np.less)
threshold.append(float(ec[1:]))
else:
raise ValueError('exclusion crieria must being with >,<,>= or <=')
return relfun, threshold | 0.776999 | 0.627866 |
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Rcoeff = np.zeros(len(staticcommunities))
for i, statcom in enumerate(staticcommunities):
Rcoeff[i] = np.mean(alleg[i, staticcommunities == statcom])
return Rcoeff | def recruitment(temporalcommunities, staticcommunities) | Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the
same static communities being in the same temporal communities at other time-points or during different tasks.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Rcoeff : array
recruitment coefficient for each node
References:
-----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional
Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. | 3.982296 | 3.488743 | 1.14147 | def recruitment(temporalcommunities, staticcommunities):
"""
Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the
same static communities being in the same temporal communities at other time-points or during different tasks.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Rcoeff : array
recruitment coefficient for each node
References:
-----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional
Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Rcoeff = np.zeros(len(staticcommunities))
for i, statcom in enumerate(staticcommunities):
Rcoeff[i] = np.mean(alleg[i, staticcommunities == statcom])
return Rcoeff | 0.872102 | 0.748168 |
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Icoeff = np.zeros(len(staticcommunities))
# calc integration for each node
for i, statcom in enumerate(len(staticcommunities)):
Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])
return Icoeff | def integration(temporalcommunities, staticcommunities) | Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533. | 4.464949 | 3.614507 | 1.235286 | def integration(temporalcommunities, staticcommunities):
"""
Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Icoeff = np.zeros(len(staticcommunities))
# calc integration for each node
for i, statcom in enumerate(len(staticcommunities)):
Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])
return Icoeff | 0.86916 | 0.630372 |
lowest, highest = self.tracks[0].get_active_pitch_range()
if len(self.tracks) > 1:
for track in self.tracks[1:]:
low, high = track.get_active_pitch_range()
if low < lowest:
lowest = low
if high > highest:
highest = high
return lowest, highest | def get_active_pitch_range(self) | Return the active pitch range of the pianorolls of all tracks as a tuple
(lowest, highest).
Returns
-------
lowest : int
The lowest active pitch among the pianorolls of all tracks.
highest : int
The lowest highest pitch among the pianorolls of all tracks. | 1.955238 | 1.833228 | 1.066555 | def get_active_pitch_range(self):
"""
Return the active pitch range of the pianorolls of all tracks as a tuple
(lowest, highest).
Returns
-------
lowest : int
The lowest active pitch among the pianorolls of all tracks.
highest : int
The lowest highest pitch among the pianorolls of all tracks.
"""
lowest, highest = self.tracks[0].get_active_pitch_range()
if len(self.tracks) > 1:
for track in self.tracks[1:]:
low, high = track.get_active_pitch_range()
if low < lowest:
lowest = low
if high > highest:
highest = high
return lowest, highest | 0.741364 | 0.631438 |
empty_track_indices = [idx for idx, track in enumerate(self.tracks)
if not np.any(track.pianoroll)]
return empty_track_indices | def get_empty_tracks(self) | Return the indices of tracks with empty pianorolls.
Returns
-------
empty_track_indices : list
The indices of tracks with empty pianorolls. | 4.312263 | 3.439944 | 1.253585 | def get_empty_tracks(self):
"""
Return the indices of tracks with empty pianorolls.
Returns
-------
empty_track_indices : list
The indices of tracks with empty pianorolls.
"""
empty_track_indices = [idx for idx, track in enumerate(self.tracks)
if not np.any(track.pianoroll)]
return empty_track_indices | 0.691406 | 0.596198 |
if not isinstance(obj, Multitrack):
raise TypeError("Support only `pypianoroll.Multitrack` class objects")
copied = deepcopy(obj)
copied.pad_to_same()
return copied | def pad_to_same(obj) | Return a copy of the object with shorter piano-rolls padded with zeros
at the end along the time axis to the length of the piano-roll with the
maximal length. | 7.100032 | 5.867205 | 1.210122 | def pad_to_same(obj):
"""
Return a copy of the object with shorter piano-rolls padded with zeros
at the end along the time axis to the length of the piano-roll with the
maximal length.
"""
if not isinstance(obj, Multitrack):
raise TypeError("Support only `pypianoroll.Multitrack` class objects")
copied = deepcopy(obj)
copied.pad_to_same()
return copied | 0.786028 | 0.732053 |
_validate_pianoroll(pianoroll)
reshaped = pianoroll[:, :120].reshape(-1, 12, 10)
reshaped[..., :8] += pianoroll[:, 120:].reshape(-1, 1, 8)
return np.sum(reshaped, 1) | def _to_chroma(pianoroll) | Return the unnormalized chroma features of a pianoroll. | 2.94459 | 2.857388 | 1.030518 | def _to_chroma(pianoroll):
"""
Return the unnormalized chroma features of a pianoroll.
"""
_validate_pianoroll(pianoroll)
reshaped = pianoroll[:, :120].reshape(-1, 12, 10)
reshaped[..., :8] += pianoroll[:, 120:].reshape(-1, 1, 8)
return np.sum(reshaped, 1) | 0.744208 | 0.62088 |
_validate_pianoroll(pianoroll)
reshaped = pianoroll.reshape(-1, beat_resolution * pianoroll.shape[1])
n_empty_beats = np.count_nonzero(reshaped.any(1))
return n_empty_beats / len(reshaped) | def empty_beat_rate(pianoroll, beat_resolution) | Return the ratio of empty beats to the total number of beats in a
pianoroll. | 2.961781 | 2.79248 | 1.060628 | def empty_beat_rate(pianoroll, beat_resolution):
"""
Return the ratio of empty beats to the total number of beats in a
pianoroll.
"""
_validate_pianoroll(pianoroll)
reshaped = pianoroll.reshape(-1, beat_resolution * pianoroll.shape[1])
n_empty_beats = np.count_nonzero(reshaped.any(1))
return n_empty_beats / len(reshaped) | 0.725077 | 0.781247 |
_validate_pianoroll(pianoroll)
chroma = _to_chroma(pianoroll)
return np.count_nonzero(np.any(chroma, 0)) | def n_pitche_classes_used(pianoroll) | Return the number of unique pitch classes used in a pianoroll. | 3.500182 | 3.410502 | 1.026295 | def n_pitche_classes_used(pianoroll):
"""
Return the number of unique pitch classes used in a pianoroll.
"""
_validate_pianoroll(pianoroll)
chroma = _to_chroma(pianoroll)
return np.count_nonzero(np.any(chroma, 0)) | 0.688704 | 0.565299 |
_validate_pianoroll(pianoroll)
if np.issubdtype(pianoroll.dtype, np.bool_):
pianoroll = pianoroll.astype(np.uint8)
padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded, axis=0).reshape(-1)
onsets = (diff > 0).nonzero()[0]
offsets = (diff < 0).nonzero()[0]
n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold)
return n_qualified_notes / len(onsets) | def qualified_note_rate(pianoroll, threshold=2) | Return the ratio of the number of the qualified notes (notes longer than
`threshold` (in time step)) to the total number of notes in a pianoroll. | 2.257025 | 2.245966 | 1.004924 | def qualified_note_rate(pianoroll, threshold=2):
"""
Return the ratio of the number of the qualified notes (notes longer than
`threshold` (in time step)) to the total number of notes in a pianoroll.
"""
_validate_pianoroll(pianoroll)
if np.issubdtype(pianoroll.dtype, np.bool_):
pianoroll = pianoroll.astype(np.uint8)
padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded, axis=0).reshape(-1)
onsets = (diff > 0).nonzero()[0]
offsets = (diff < 0).nonzero()[0]
n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold)
return n_qualified_notes / len(onsets) | 0.791932 | 0.76487 |
if beat_resolution not in (4, 6, 8, 9, 12, 16, 18, 24):
raise ValueError("Unsupported beat resolution. Only 4, 6, 8 ,9, 12, "
"16, 18, 42 are supported.")
_validate_pianoroll(pianoroll)
def _drum_pattern_mask(res, tol):
if res == 24:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 4)
elif res == 12:
drum_pattern_mask = np.tile([1., tol, tol], 4)
elif res == 6:
drum_pattern_mask = np.tile([1., tol, tol], 2)
elif res == 18:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 3)
elif res == 9:
drum_pattern_mask = np.tile([1., tol, tol], 3)
elif res == 16:
drum_pattern_mask = np.tile([1., tol, 0., tol], 4)
elif res == 8:
drum_pattern_mask = np.tile([1., tol], 4)
elif res == 4:
drum_pattern_mask = np.tile([1., tol], 2)
return drum_pattern_mask
drum_pattern_mask = _drum_pattern_mask(beat_resolution, tolerance)
n_in_pattern = np.sum(drum_pattern_mask * np.count_nonzero(pianoroll, 1))
return n_in_pattern / np.count_nonzero(pianoroll) | def drum_in_pattern_rate(pianoroll, beat_resolution, tolerance=0.1) | Return the ratio of the number of drum notes that lie on the drum
pattern (i.e., at certain time steps) to the total number of drum notes. | 1.908838 | 1.893038 | 1.008347 | def drum_in_pattern_rate(pianoroll, beat_resolution, tolerance=0.1):
"""
Return the ratio of the number of drum notes that lie on the drum
pattern (i.e., at certain time steps) to the total number of drum notes.
"""
if beat_resolution not in (4, 6, 8, 9, 12, 16, 18, 24):
raise ValueError("Unsupported beat resolution. Only 4, 6, 8 ,9, 12, "
"16, 18, 42 are supported.")
_validate_pianoroll(pianoroll)
def _drum_pattern_mask(res, tol):
if res == 24:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 4)
elif res == 12:
drum_pattern_mask = np.tile([1., tol, tol], 4)
elif res == 6:
drum_pattern_mask = np.tile([1., tol, tol], 2)
elif res == 18:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 3)
elif res == 9:
drum_pattern_mask = np.tile([1., tol, tol], 3)
elif res == 16:
drum_pattern_mask = np.tile([1., tol, 0., tol], 4)
elif res == 8:
drum_pattern_mask = np.tile([1., tol], 4)
elif res == 4:
drum_pattern_mask = np.tile([1., tol], 2)
return drum_pattern_mask
drum_pattern_mask = _drum_pattern_mask(beat_resolution, tolerance)
n_in_pattern = np.sum(drum_pattern_mask * np.count_nonzero(pianoroll, 1))
return n_in_pattern / np.count_nonzero(pianoroll) | 0.57517 | 0.609553 |
if not isinstance(key, int):
raise TypeError("`key` must an integer.")
if key > 11 or key < 0:
raise ValueError("`key` must be in an integer in between 0 and 11.")
if kind not in ('major', 'minor'):
raise ValueError("`kind` must be one of 'major' or 'minor'.")
_validate_pianoroll(pianoroll)
def _scale_mask(key, kind):
if kind == 'major':
a_scale_mask = np.array([0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1], bool)
else:
a_scale_mask = np.array([1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1], bool)
return np.roll(a_scale_mask, key)
chroma = _to_chroma(pianoroll)
scale_mask = _scale_mask(key, kind)
n_in_scale = np.sum(scale_mask.reshape(-1, 12) * chroma)
return n_in_scale / np.count_nonzero(pianoroll) | def in_scale_rate(pianoroll, key=3, kind='major') | Return the ratio of the number of nonzero entries that lie in a specific
scale to the total number of nonzero entries in a pianoroll. Default to C
major scale. | 2.141032 | 2.103438 | 1.017873 | def in_scale_rate(pianoroll, key=3, kind='major'):
"""
Return the ratio of the number of nonzero entries that lie in a specific
scale to the total number of nonzero entries in a pianoroll. Default to C
major scale.
"""
if not isinstance(key, int):
raise TypeError("`key` must an integer.")
if key > 11 or key < 0:
raise ValueError("`key` must be in an integer in between 0 and 11.")
if kind not in ('major', 'minor'):
raise ValueError("`kind` must be one of 'major' or 'minor'.")
_validate_pianoroll(pianoroll)
def _scale_mask(key, kind):
if kind == 'major':
a_scale_mask = np.array([0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1], bool)
else:
a_scale_mask = np.array([1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1], bool)
return np.roll(a_scale_mask, key)
chroma = _to_chroma(pianoroll)
scale_mask = _scale_mask(key, kind)
n_in_scale = np.sum(scale_mask.reshape(-1, 12) * chroma)
return n_in_scale / np.count_nonzero(pianoroll) | 0.674345 | 0.549761 |
nonzero_steps = np.any(self.pianoroll, axis=1)
inv_last_nonzero_step = np.argmax(np.flip(nonzero_steps, axis=0))
active_length = self.pianoroll.shape[0] - inv_last_nonzero_step
return active_length | def get_active_length(self) | Return the active length (i.e., without trailing silence) of the
pianoroll. The unit is time step.
Returns
-------
active_length : int
The active length (i.e., without trailing silence) of the pianoroll. | 3.980884 | 3.492857 | 1.139721 | def get_active_length(self):
"""
Return the active length (i.e., without trailing silence) of the
pianoroll. The unit is time step.
Returns
-------
active_length : int
The active length (i.e., without trailing silence) of the pianoroll.
"""
nonzero_steps = np.any(self.pianoroll, axis=1)
inv_last_nonzero_step = np.argmax(np.flip(nonzero_steps, axis=0))
active_length = self.pianoroll.shape[0] - inv_last_nonzero_step
return active_length | 0.748076 | 0.526769 |
if self.pianoroll.shape[1] < 1:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
lowest = 0
highest = 127
while lowest < highest:
if np.any(self.pianoroll[:, lowest]):
break
lowest += 1
if lowest == highest:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
while not np.any(self.pianoroll[:, highest]):
highest -= 1
return lowest, highest | def get_active_pitch_range(self) | Return the active pitch range as a tuple (lowest, highest).
Returns
-------
lowest : int
The lowest active pitch in the pianoroll.
highest : int
The highest active pitch in the pianoroll. | 2.558522 | 2.321702 | 1.102003 | def get_active_pitch_range(self):
"""
Return the active pitch range as a tuple (lowest, highest).
Returns
-------
lowest : int
The lowest active pitch in the pianoroll.
highest : int
The highest active pitch in the pianoroll.
"""
if self.pianoroll.shape[1] < 1:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
lowest = 0
highest = 127
while lowest < highest:
if np.any(self.pianoroll[:, lowest]):
break
lowest += 1
if lowest == highest:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
while not np.any(self.pianoroll[:, highest]):
highest -= 1
return lowest, highest | 0.766242 | 0.609611 |
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt | def plot_conv_activity(layer, x, figsize=(6, 8)) | Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1. | 2.865567 | 2.825922 | 1.014029 | def plot_conv_activity(layer, x, figsize=(6, 8)):
"""
Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt | 0.831742 | 0.851583 |
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph | def make_pydot_graph(layers, output_shape=True, verbose=False) | :parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph | 1.925423 | 1.858983 | 1.03574 | def make_pydot_graph(layers, output_shape=True, verbose=False):
"""
:parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph
"""
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph | 0.703639 | 0.542015 |
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png()) | def draw_to_notebook(layers, **kwargs) | Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options | 3.869769 | 3.186751 | 1.21433 | def draw_to_notebook(layers, **kwargs):
"""
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
"""
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png()) | 0.77102 | 0.540621 |
from decaf.util import transform # soft dep
_JEFFNET_FLIP = True
# first, extract the 256x256 center.
image = transform.scale_and_extract(transform.as_rgb(image), 256)
# convert to [0,255] float32
image = image.astype(np.float32) * 255.
if _JEFFNET_FLIP:
# Flip the image if necessary, maintaining the c_contiguous order
image = image[::-1, :].copy()
# subtract the mean
image -= self.net_._data_mean
return image | def prepare_image(self, image) | Returns image of shape `(256, 256, 3)`, as expected by
`transform` when `classify_direct = True`. | 8.974966 | 8.861794 | 1.012771 | def prepare_image(self, image):
"""
Returns image of shape `(256, 256, 3)`, as expected by
`transform` when `classify_direct = True`.
"""
from decaf.util import transform # soft dep
_JEFFNET_FLIP = True
# first, extract the 256x256 center.
image = transform.scale_and_extract(transform.as_rgb(image), 256)
# convert to [0,255] float32
image = image.astype(np.float32) * 255.
if _JEFFNET_FLIP:
# Flip the image if necessary, maintaining the c_contiguous order
image = image[::-1, :].copy()
# subtract the mean
image -= self.net_._data_mean
return image | 0.828037 | 0.671918 |
mapping = kwargs
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSET requires **kwargs or a single dict arg')
mapping.update(args[0])
if len(mapping) == 0:
raise ResponseError("wrong number of arguments for 'mset' command")
for key, value in mapping.items():
self.set(key, value)
return True | def mset(self, *args, **kwargs) | Sets key/values based on a mapping. Mapping can be supplied as a single
dictionary argument or as kwargs. | 3.485207 | 3.135663 | 1.111474 | def mset(self, *args, **kwargs):
"""
Sets key/values based on a mapping. Mapping can be supplied as a single
dictionary argument or as kwargs.
"""
mapping = kwargs
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSET requires **kwargs or a single dict arg')
mapping.update(args[0])
if len(mapping) == 0:
raise ResponseError("wrong number of arguments for 'mset' command")
for key, value in mapping.items():
self.set(key, value)
return True | 0.680242 | 0.526586 |
redis_hash = self._get_hash(hashkey, 'HEXISTS')
return self._encode(attribute) in redis_hash | def hexists(self, hashkey, attribute) | Emulate hexists. | 8.409638 | 7.622225 | 1.103305 | def hexists(self, hashkey, attribute):
"""
Emulate hexists.
"""
redis_hash = self._get_hash(hashkey, 'HEXISTS')
return self._encode(attribute) in redis_hash | 0.626524 | 0.569613 |
disco = self.dependencies[aioxmpp.disco.DiscoClient]
response = yield from disco.query_info(
peer_jid,
)
return namespaces.xep0050_commands in response.features | def supports_commands(self, peer_jid) | Detect whether a peer supports :xep:`50` Ad-Hoc commands.
:param peer_jid: JID of the peer to query
:type peer_jid: :class:`aioxmpp.JID`
:rtype: :class:`bool`
:return: True if the peer supports the Ad-Hoc commands protocol, false
otherwise.
Note that the fact that a peer supports the protocol does not imply
that it offers any commands. | 12.254194 | 9.169634 | 1.336389 | def supports_commands(self, peer_jid):
"""
Detect whether a peer supports :xep:`50` Ad-Hoc commands.
:param peer_jid: JID of the peer to query
:type peer_jid: :class:`aioxmpp.JID`
:rtype: :class:`bool`
:return: True if the peer supports the Ad-Hoc commands protocol, false
otherwise.
Note that the fact that a peer supports the protocol does not imply
that it offers any commands.
"""
disco = self.dependencies[aioxmpp.disco.DiscoClient]
response = yield from disco.query_info(
peer_jid,
)
return namespaces.xep0050_commands in response.features | 0.847968 | 0.582313 |
if self._response is not None:
raise RuntimeError("command execution already started")
request = aioxmpp.IQ(
type_=aioxmpp.IQType.SET,
to=self._peer_jid,
payload=adhoc_xso.Command(self._command_name),
)
self._response = yield from self._stream.send_iq_and_wait_for_reply(
request,
)
return self._response.first_payload | def start(self) | Initiate the session by starting to execute the command with the peer.
:return: The :attr:`~.xso.Command.first_payload` of the response
This sends an empty command IQ request with the
:attr:`~.ActionType.EXECUTE` action.
The :attr:`status`, :attr:`response` and related attributes get updated
with the newly received values. | 6.721424 | 4.330876 | 1.551978 | def start(self):
"""
Initiate the session by starting to execute the command with the peer.
:return: The :attr:`~.xso.Command.first_payload` of the response
This sends an empty command IQ request with the
:attr:`~.ActionType.EXECUTE` action.
The :attr:`status`, :attr:`response` and related attributes get updated
with the newly received values.
"""
if self._response is not None:
raise RuntimeError("command execution already started")
request = aioxmpp.IQ(
type_=aioxmpp.IQType.SET,
to=self._peer_jid,
payload=adhoc_xso.Command(self._command_name),
)
self._response = yield from self._stream.send_iq_and_wait_for_reply(
request,
)
return self._response.first_payload | 0.74459 | 0.581957 |
if self._this_occupant is not None:
items = [self._this_occupant]
else:
items = []
items += list(self._occupant_info.values())
return items | def members(self) | A copy of the list of occupants. The local user is always the first
item in the list, unless the :meth:`on_enter` has not fired yet. | 6.388441 | 3.987495 | 1.602119 | def members(self):
"""
A copy of the list of occupants. The local user is always the first
item in the list, unless the :meth:`on_enter` has not fired yet.
"""
if self._this_occupant is not None:
items = [self._this_occupant]
else:
items = []
items += list(self._occupant_info.values())
return items | 0.633297 | 0.560974 |
keys = list(self.keys())
try:
keys.remove(None)
except ValueError:
pass
keys.sort()
key = lookup_language(keys, language_ranges)
return self[key] | def lookup(self, language_ranges) | Perform an RFC4647 language range lookup on the keys in the
dictionary. `language_ranges` must be a sequence of
:class:`LanguageRange` instances.
Return the entry in the dictionary with a key as produced by
`lookup_language`. If `lookup_language` does not find a match and the
mapping contains an entry with key :data:`None`, that entry is
returned, otherwise :class:`KeyError` is raised. | 3.510848 | 3.125103 | 1.123434 | def lookup(self, language_ranges):
"""
Perform an RFC4647 language range lookup on the keys in the
dictionary. `language_ranges` must be a sequence of
:class:`LanguageRange` instances.
Return the entry in the dictionary with a key as produced by
`lookup_language`. If `lookup_language` does not find a match and the
mapping contains an entry with key :data:`None`, that entry is
returned, otherwise :class:`KeyError` is raised.
"""
keys = list(self.keys())
try:
keys.remove(None)
except ValueError:
pass
keys.sort()
key = lookup_language(keys, language_ranges)
return self[key] | 0.861858 | 0.734715 |
record = b".".join([
b"_" + service.encode("ascii"),
b"_" + transport.encode("ascii"),
domain])
answer = yield from repeated_query(
record,
dns.rdatatype.SRV,
**kwargs)
if answer is None:
return None
items = [
(rec.priority, rec.weight, (str(rec.target), rec.port))
for rec in answer
]
for i, (prio, weight, (host, port)) in enumerate(items):
if host == ".":
raise ValueError(
"protocol {!r} over {!r} not supported at {!r}".format(
service,
transport,
domain
)
)
items[i] = (prio, weight, (
host.rstrip(".").encode("ascii"),
port))
return items | def lookup_srv(
domain: bytes,
service: str,
transport: str = "tcp",
**kwargs) | Query the DNS for SRV records describing how the given `service` over the
given `transport` is implemented for the given `domain`. `domain` must be
an IDNA-encoded :class:`bytes` object; `service` must be a normal
:class:`str`.
Keyword arguments are passed to :func:`repeated_query`.
Return a list of tuples ``(prio, weight, (hostname, port))``, where
`hostname` is a IDNA-encoded :class:`bytes` object containing the hostname
obtained from the SRV record. The other fields are also as obtained from
the SRV records. The trailing dot is stripped from the `hostname`.
If the DNS query returns an empty result, :data:`None` is returned. If any
of the found SRV records has the root zone (``.``) as `hostname`, this
indicates that the service is not available at the given `domain` and
:class:`ValueError` is raised. | 3.632017 | 2.949542 | 1.231384 | def lookup_srv(
domain: bytes,
service: str,
transport: str = "tcp",
**kwargs):
"""
Query the DNS for SRV records describing how the given `service` over the
given `transport` is implemented for the given `domain`. `domain` must be
an IDNA-encoded :class:`bytes` object; `service` must be a normal
:class:`str`.
Keyword arguments are passed to :func:`repeated_query`.
Return a list of tuples ``(prio, weight, (hostname, port))``, where
`hostname` is a IDNA-encoded :class:`bytes` object containing the hostname
obtained from the SRV record. The other fields are also as obtained from
the SRV records. The trailing dot is stripped from the `hostname`.
If the DNS query returns an empty result, :data:`None` is returned. If any
of the found SRV records has the root zone (``.``) as `hostname`, this
indicates that the service is not available at the given `domain` and
:class:`ValueError` is raised.
"""
record = b".".join([
b"_" + service.encode("ascii"),
b"_" + transport.encode("ascii"),
domain])
answer = yield from repeated_query(
record,
dns.rdatatype.SRV,
**kwargs)
if answer is None:
return None
items = [
(rec.priority, rec.weight, (str(rec.target), rec.port))
for rec in answer
]
for i, (prio, weight, (host, port)) in enumerate(items):
if host == ".":
raise ValueError(
"protocol {!r} over {!r} not supported at {!r}".format(
service,
transport,
domain
)
)
items[i] = (prio, weight, (
host.rstrip(".").encode("ascii"),
port))
return items | 0.88796 | 0.698278 |
record = b".".join([
b"_" + str(port).encode("ascii"),
b"_" + transport.encode("ascii"),
hostname
])
answer = yield from repeated_query(
record,
dns.rdatatype.TLSA,
require_ad=require_ad,
**kwargs)
if answer is None:
return None
items = [
(rec.usage, rec.selector, rec.mtype, rec.cert)
for rec in answer
]
return items | def lookup_tlsa(hostname, port, transport="tcp", require_ad=True, **kwargs) | Query the DNS for TLSA records describing the certificates and/or keys to
expect when contacting `hostname` at the given `port` over the given
`transport`. `hostname` must be an IDNA-encoded :class:`bytes` object.
The keyword arguments are passed to :func:`repeated_query`; `require_ad`
defaults to :data:`True` here.
Return a list of tuples ``(usage, selector, mtype, cert)`` which contains
the information from the TLSA records.
If no data is returned by the query, :data:`None` is returned instead. | 4.397869 | 3.582686 | 1.227534 | def lookup_tlsa(hostname, port, transport="tcp", require_ad=True, **kwargs):
"""
Query the DNS for TLSA records describing the certificates and/or keys to
expect when contacting `hostname` at the given `port` over the given
`transport`. `hostname` must be an IDNA-encoded :class:`bytes` object.
The keyword arguments are passed to :func:`repeated_query`; `require_ad`
defaults to :data:`True` here.
Return a list of tuples ``(usage, selector, mtype, cert)`` which contains
the information from the TLSA records.
If no data is returned by the query, :data:`None` is returned instead.
"""
record = b".".join([
b"_" + str(port).encode("ascii"),
b"_" + transport.encode("ascii"),
hostname
])
answer = yield from repeated_query(
record,
dns.rdatatype.TLSA,
require_ad=require_ad,
**kwargs)
if answer is None:
return None
items = [
(rec.usage, rec.selector, rec.mtype, rec.cert)
for rec in answer
]
return items | 0.863017 | 0.678487 |
parts = [
_process_identity(identity)
for identity in identities
]
parts.sort()
return b"".join(parts)+b"\x1c" | def _process_identities(identities) | Generate the `Identities String` from an iterable of identities.
:param identities: The identities to generate the features string from.
:type identities: :class:`~collections.abc.Iterable` of
:class:`~.disco.xso.Identity`
:return: The `Identities String`
:rtype: :class:`bytes`
Generate the `Identities String` from the given `identities` as specified
in :xep:`390`. | 6.086969 | 5.620315 | 1.08303 | def _process_identities(identities):
"""
Generate the `Identities String` from an iterable of identities.
:param identities: The identities to generate the features string from.
:type identities: :class:`~collections.abc.Iterable` of
:class:`~.disco.xso.Identity`
:return: The `Identities String`
:rtype: :class:`bytes`
Generate the `Identities String` from the given `identities` as specified
in :xep:`390`.
"""
parts = [
_process_identity(identity)
for identity in identities
]
parts.sort()
return b"".join(parts)+b"\x1c" | 0.850002 | 0.538073 |
parts = [
_process_form(form)
for form in exts
]
parts.sort()
return b"".join(parts)+b"\x1c" | def _process_extensions(exts) | Generate the `Extensions String` from an iterable of data forms.
:param exts: The data forms to generate the extensions string from.
:type exts: :class:`~collections.abc.Iterable` of
:class:`~.forms.xso.Data`
:return: The `Extensions String`
:rtype: :class:`bytes`
Generate the `Extensions String` from the given `exts` as specified
in :xep:`390`. | 7.861825 | 6.660774 | 1.180317 | def _process_extensions(exts):
"""
Generate the `Extensions String` from an iterable of data forms.
:param exts: The data forms to generate the extensions string from.
:type exts: :class:`~collections.abc.Iterable` of
:class:`~.forms.xso.Data`
:return: The `Extensions String`
:rtype: :class:`bytes`
Generate the `Extensions String` from the given `exts` as specified
in :xep:`390`.
"""
parts = [
_process_form(form)
for form in exts
]
parts.sort()
return b"".join(parts)+b"\x1c" | 0.830388 | 0.508117 |
stanza = aioxmpp.Presence()
self._state.apply_to_stanza(stanza)
stanza.status.update(self._status)
return stanza | def make_stanza(self) | Create and return a presence stanza with the current settings.
:return: Presence stanza
:rtype: :class:`aioxmpp.Presence` | 7.006313 | 5.696356 | 1.229964 | def make_stanza(self):
"""
Create and return a presence stanza with the current settings.
:return: Presence stanza
:rtype: :class:`aioxmpp.Presence`
"""
stanza = aioxmpp.Presence()
self._state.apply_to_stanza(stanza)
stanza.status.update(self._status)
return stanza | 0.618752 | 0.541833 |
if not isinstance(priority, numbers.Integral):
raise TypeError(
"invalid priority: got {}, expected integer".format(
type(priority)
)
)
if not isinstance(state, aioxmpp.PresenceState):
raise TypeError(
"invalid state: got {}, expected aioxmpp.PresenceState".format(
type(state),
)
)
if isinstance(status, str):
new_status = {None: status}
else:
new_status = dict(status)
new_priority = int(priority)
emit_state_event = self._state != state
emit_overall_event = (
emit_state_event or
self._priority != new_priority or
self._status != new_status
)
self._state = state
self._status = new_status
self._priority = new_priority
if emit_state_event:
self.on_presence_state_changed()
if emit_overall_event:
self.on_presence_changed()
return self.resend_presence() | def set_presence(self, state, status={}, priority=0) | Change the presence broadcast by the client.
:param state: New presence state to broadcast
:type state: :class:`aioxmpp.PresenceState`
:param status: New status information to broadcast
:type status: :class:`dict` or :class:`str`
:param priority: New priority for the resource
:type priority: :class:`int`
:return: Stanza token of the presence stanza or :data:`None` if the
presence is unchanged or the stream is not connected.
:rtype: :class:`~.stream.StanzaToken`
If the client is currently connected, the new presence is broadcast
immediately.
`status` must be either a string or something which can be passed to
the :class:`dict` constructor. If it is a string, it is wrapped into a
dict using ``{None: status}``. The mapping must map
:class:`~.LanguageTag` objects (or :data:`None`) to strings. The
information will be used to generate internationalised presence status
information. If you do not need internationalisation, simply use the
string version of the argument. | 2.556511 | 2.386725 | 1.071138 | def set_presence(self, state, status={}, priority=0):
"""
Change the presence broadcast by the client.
:param state: New presence state to broadcast
:type state: :class:`aioxmpp.PresenceState`
:param status: New status information to broadcast
:type status: :class:`dict` or :class:`str`
:param priority: New priority for the resource
:type priority: :class:`int`
:return: Stanza token of the presence stanza or :data:`None` if the
presence is unchanged or the stream is not connected.
:rtype: :class:`~.stream.StanzaToken`
If the client is currently connected, the new presence is broadcast
immediately.
`status` must be either a string or something which can be passed to
the :class:`dict` constructor. If it is a string, it is wrapped into a
dict using ``{None: status}``. The mapping must map
:class:`~.LanguageTag` objects (or :data:`None`) to strings. The
information will be used to generate internationalised presence status
information. If you do not need internationalisation, simply use the
string version of the argument.
"""
if not isinstance(priority, numbers.Integral):
raise TypeError(
"invalid priority: got {}, expected integer".format(
type(priority)
)
)
if not isinstance(state, aioxmpp.PresenceState):
raise TypeError(
"invalid state: got {}, expected aioxmpp.PresenceState".format(
type(state),
)
)
if isinstance(status, str):
new_status = {None: status}
else:
new_status = dict(status)
new_priority = int(priority)
emit_state_event = self._state != state
emit_overall_event = (
emit_state_event or
self._priority != new_priority or
self._status != new_status
)
self._state = state
self._status = new_status
self._priority = new_priority
if emit_state_event:
self.on_presence_state_changed()
if emit_overall_event:
self.on_presence_changed()
return self.resend_presence() | 0.859987 | 0.53607 |
pk = pyasn1_struct.getComponentByName(
"tbsCertificate"
).getComponentByName(
"subjectPublicKeyInfo"
)
return pyasn1.codec.der.encoder.encode(pk) | def extract_pk_blob_from_pyasn1(pyasn1_struct) | Extract an ASN.1 encoded public key blob from the given :mod:`pyasn1`
structure (which must represent a certificate). | 3.346926 | 3.08438 | 1.085121 | def extract_pk_blob_from_pyasn1(pyasn1_struct):
"""
Extract an ASN.1 encoded public key blob from the given :mod:`pyasn1`
structure (which must represent a certificate).
"""
pk = pyasn1_struct.getComponentByName(
"tbsCertificate"
).getComponentByName(
"subjectPublicKeyInfo"
)
return pyasn1.codec.der.encoder.encode(pk) | 0.808786 | 0.504639 |
cert_structure = extract_python_dict_from_x509(x509)
try:
ssl.match_hostname(cert_structure, hostname)
except ssl.CertificateError:
return False
return True | def check_x509_hostname(x509, hostname) | Check whether the given :class:`OpenSSL.crypto.X509` certificate `x509`
matches the given `hostname`.
Return :data:`True` if the name matches and :data:`False` otherwise. This
uses :func:`ssl.match_hostname` and :func:`extract_python_dict_from_x509`. | 4.243701 | 2.652637 | 1.599805 | def check_x509_hostname(x509, hostname):
"""
Check whether the given :class:`OpenSSL.crypto.X509` certificate `x509`
matches the given `hostname`.
Return :data:`True` if the name matches and :data:`False` otherwise. This
uses :func:`ssl.match_hostname` and :func:`extract_python_dict_from_x509`.
"""
cert_structure = extract_python_dict_from_x509(x509)
try:
ssl.match_hostname(cert_structure, hostname)
except ssl.CertificateError:
return False
return True | 0.803617 | 0.54256 |
key = self._x509_key(x509)
try:
pins = self._storage[hostname]
except KeyError:
return None
if key in pins:
return True
return None | def query(self, hostname, x509) | Return true if the given :class:`OpenSSL.crypto.X509` object `x509` has
previously been pinned for use with the given `hostname` and
:data:`None` otherwise.
Returning :data:`None` allows this method to be used with
:class:`PinningPKIXCertificateVerifier`. | 4.754226 | 3.829011 | 1.241633 | def query(self, hostname, x509):
"""
Return true if the given :class:`OpenSSL.crypto.X509` object `x509` has
previously been pinned for use with the given `hostname` and
:data:`None` otherwise.
Returning :data:`None` allows this method to be used with
:class:`PinningPKIXCertificateVerifier`.
"""
key = self._x509_key(x509)
try:
pins = self._storage[hostname]
except KeyError:
return None
if key in pins:
return True
return None | 0.858333 | 0.502441 |
End of preview. Expand
in Dataset Viewer.
Filtered version of code search net python subset, with filtering based on perplexity with/without docstring, learning value/quality classifiers, and manual filtering.
Original data with perplexity filtering is from here, with credit to bjoernp.
- Downloads last month
- 50