repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Varriount/Colliberation | libs/twisted/web/script.py | 20 | 5272 | # -*- test-case-name: twisted.web.test.test_script -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I contain PythonScript, which is a very simple python script resource.
"""
import os, traceback
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from twisted import copyright
from twisted.web import http, server, static, resource, html
rpyNoResource = """<p>You forgot to assign to the variable "resource" in your script. For example:</p>
<pre>
# MyCoolWebApp.rpy
import mygreatresource
resource = mygreatresource.MyGreatResource()
</pre>
"""
class AlreadyCached(Exception):
"""This exception is raised when a path has already been cached.
"""
class CacheScanner:
def __init__(self, path, registry):
self.path = path
self.registry = registry
self.doCache = 0
def cache(self):
c = self.registry.getCachedPath(self.path)
if c is not None:
raise AlreadyCached(c)
self.recache()
def recache(self):
self.doCache = 1
noRsrc = resource.ErrorPage(500, "Whoops! Internal Error", rpyNoResource)
def ResourceScript(path, registry):
"""
I am a normal py file which must define a 'resource' global, which should
be an instance of (a subclass of) web.resource.Resource; it will be
renderred.
"""
cs = CacheScanner(path, registry)
glob = {'__file__': path,
'resource': noRsrc,
'registry': registry,
'cache': cs.cache,
'recache': cs.recache}
try:
execfile(path, glob, glob)
except AlreadyCached, ac:
return ac.args[0]
rsrc = glob['resource']
if cs.doCache and rsrc is not noRsrc:
registry.cachePath(path, rsrc)
return rsrc
def ResourceTemplate(path, registry):
from quixote import ptl_compile
glob = {'__file__': path,
'resource': resource.ErrorPage(500, "Whoops! Internal Error",
rpyNoResource),
'registry': registry}
e = ptl_compile.compile_template(open(path), path)
exec e in glob
return glob['resource']
class ResourceScriptWrapper(resource.Resource):
def __init__(self, path, registry=None):
resource.Resource.__init__(self)
self.path = path
self.registry = registry or static.Registry()
def render(self, request):
res = ResourceScript(self.path, self.registry)
return res.render(request)
def getChildWithDefault(self, path, request):
res = ResourceScript(self.path, self.registry)
return res.getChildWithDefault(path, request)
class ResourceScriptDirectory(resource.Resource):
"""
L{ResourceScriptDirectory} is a resource which serves scripts from a
filesystem directory. File children of a L{ResourceScriptDirectory} will
be served using L{ResourceScript}. Directory children will be served using
another L{ResourceScriptDirectory}.
@ivar path: A C{str} giving the filesystem path in which children will be
looked up.
@ivar registry: A L{static.Registry} instance which will be used to decide
how to interpret scripts found as children of this resource.
"""
def __init__(self, pathname, registry=None):
resource.Resource.__init__(self)
self.path = pathname
self.registry = registry or static.Registry()
def getChild(self, path, request):
fn = os.path.join(self.path, path)
if os.path.isdir(fn):
return ResourceScriptDirectory(fn, self.registry)
if os.path.exists(fn):
return ResourceScript(fn, self.registry)
return resource.NoResource()
def render(self, request):
return resource.NoResource().render(request)
class PythonScript(resource.Resource):
"""I am an extremely simple dynamic resource; an embedded python script.
This will execute a file (usually of the extension '.epy') as Python code,
internal to the webserver.
"""
isLeaf = 1
def __init__(self, filename, registry):
"""Initialize me with a script name.
"""
self.filename = filename
self.registry = registry
def render(self, request):
"""Render me to a web client.
Load my file, execute it in a special namespace (with 'request' and
'__file__' global vars) and finish the request. Output to the web-page
will NOT be handled with print - standard output goes to the log - but
with request.write.
"""
request.setHeader("x-powered-by","Twisted/%s" % copyright.version)
namespace = {'request': request,
'__file__': self.filename,
'registry': self.registry}
try:
execfile(self.filename, namespace, namespace)
except IOError, e:
if e.errno == 2: #file not found
request.setResponseCode(http.NOT_FOUND)
request.write(resource.NoResource("File not found.").render(request))
except:
io = StringIO.StringIO()
traceback.print_exc(file=io)
request.write(html.PRE(io.getvalue()))
request.finish()
return server.NOT_DONE_YET
| mit | -5,841,385,579,758,247,000 | 30.195266 | 102 | 0.63676 | false |
xiaohaidao007/pandoraBox-SDK-mt7620 | staging_dir/host/lib/scons-2.5.0/SCons/Scanner/LaTeX.py | 3 | 16233 | """SCons.Scanner.LaTeX
This module implements the dependency scanner for LaTeX code.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/LaTeX.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os.path
import re
import SCons.Scanner
import SCons.Util
# list of graphics file extensions for TeX and LaTeX
TexGraphics = ['.eps', '.ps']
LatexGraphics = ['.pdf', '.png', '.jpg', '.gif', '.tif']
# Used as a return value of modify_env_var if the variable is not set.
class _Null(object):
pass
_null = _Null
# The user specifies the paths in env[variable], similar to other builders.
# They may be relative and must be converted to absolute, as expected
# by LaTeX and Co. The environment may already have some paths in
# env['ENV'][var]. These paths are honored, but the env[var] paths have
# higher precedence. All changes are un-done on exit.
def modify_env_var(env, var, abspath):
try:
save = env['ENV'][var]
except KeyError:
save = _null
env.PrependENVPath(var, abspath)
try:
if SCons.Util.is_List(env[var]):
env.PrependENVPath(var, [os.path.abspath(str(p)) for p in env[var]])
else:
# Split at os.pathsep to convert into absolute path
env.PrependENVPath(var, [os.path.abspath(p) for p in str(env[var]).split(os.pathsep)])
except KeyError:
pass
# Convert into a string explicitly to append ":" (without which it won't search system
# paths as well). The problem is that env.AppendENVPath(var, ":")
# does not work, refuses to append ":" (os.pathsep).
if SCons.Util.is_List(env['ENV'][var]):
env['ENV'][var] = os.pathsep.join(env['ENV'][var])
# Append the trailing os.pathsep character here to catch the case with no env[var]
env['ENV'][var] = env['ENV'][var] + os.pathsep
return save
class FindENVPathDirs(object):
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env['ENV'][self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
def LaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with latex.
"""
ds = LaTeX(name = "LaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = TexGraphics,
recursive = 0)
return ds
def PDFLaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with pdflatex.
"""
ds = LaTeX(name = "PDFLaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = LatexGraphics,
recursive = 0)
return ds
class LaTeX(SCons.Scanner.Base):
"""Class for scanning LaTeX files for included files.
Unlike most scanners, which use regular expressions that just
return the included file name, this returns a tuple consisting
of the keyword for the inclusion ("include", "includegraphics",
"input", or "bibliography"), and then the file name itself.
Based on a quick look at LaTeX documentation, it seems that we
should append .tex suffix for the "include" keywords, append .tex if
there is no extension for the "input" keyword, and need to add .bib
for the "bibliography" keyword that does not accept extensions by itself.
Finally, if there is no extension for an "includegraphics" keyword
latex will append .ps or .eps to find the file, while pdftex may use .pdf,
.jpg, .tif, .mps, or .png.
The actual subset and search order may be altered by
DeclareGraphicsExtensions command. This complication is ignored.
The default order corresponds to experimentation with teTeX
$ latex --version
pdfeTeX 3.141592-1.21a-2.2 (Web2C 7.5.4)
kpathsea version 3.5.4
The order is:
['.eps', '.ps'] for latex
['.png', '.pdf', '.jpg', '.tif'].
Another difference is that the search path is determined by the type
of the file being searched:
env['TEXINPUTS'] for "input" and "include" keywords
env['TEXINPUTS'] for "includegraphics" keyword
env['TEXINPUTS'] for "lstinputlisting" keyword
env['BIBINPUTS'] for "bibliography" keyword
env['BSTINPUTS'] for "bibliographystyle" keyword
env['INDEXSTYLE'] for "makeindex" keyword, no scanning support needed
just allows user to set it if needed.
FIXME: also look for the class or style in document[class|style]{}
FIXME: also look for the argument of bibliographystyle{}
"""
keyword_paths = {'include': 'TEXINPUTS',
'input': 'TEXINPUTS',
'includegraphics': 'TEXINPUTS',
'bibliography': 'BIBINPUTS',
'bibliographystyle': 'BSTINPUTS',
'addbibresource': 'BIBINPUTS',
'addglobalbib': 'BIBINPUTS',
'addsectionbib': 'BIBINPUTS',
'makeindex': 'INDEXSTYLE',
'usepackage': 'TEXINPUTS',
'lstinputlisting': 'TEXINPUTS'}
env_variables = SCons.Util.unique(list(keyword_paths.values()))
def __init__(self, name, suffixes, graphics_extensions, *args, **kw):
# We have to include \n with the % we exclude from the first part
# part of the regex because the expression is compiled with re.M.
# Without the \n, the ^ could match the beginning of a *previous*
# line followed by one or more newline characters (i.e. blank
# lines), interfering with a match on the next line.
# add option for whitespace before the '[options]' or the '{filename}'
regex = r'^[^%\n]*\\(include|includegraphics(?:\s*\[[^\]]+\])?|lstinputlisting(?:\[[^\]]+\])?|input|bibliography|addbibresource|addglobalbib|addsectionbib|usepackage)\s*{([^}]*)}'
self.cre = re.compile(regex, re.M)
self.comment_re = re.compile(r'^((?:(?:\\%)|[^%\n])*)(.*)$', re.M)
self.graphics_extensions = graphics_extensions
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan_recurse(node, path)
class FindMultiPathDirs(object):
"""The stock FindPathDirs function has the wrong granularity:
it is called once per target, while we need the path that depends
on what kind of included files is being searched. This wrapper
hides multiple instances of FindPathDirs, one per the LaTeX path
variable in the environment. When invoked, the function calculates
and returns all the required paths as a dictionary (converted into
a tuple to become hashable). Then the scan function converts it
back and uses a dictionary of tuples rather than a single tuple
of paths.
"""
def __init__(self, dictionary):
self.dictionary = {}
for k,n in dictionary.items():
self.dictionary[k] = ( SCons.Scanner.FindPathDirs(n),
FindENVPathDirs(n) )
def __call__(self, env, dir=None, target=None, source=None,
argument=None):
di = {}
for k,(c,cENV) in self.dictionary.items():
di[k] = ( c(env, dir=None, target=None, source=None,
argument=None) ,
cENV(env, dir=None, target=None, source=None,
argument=None) )
# To prevent "dict is not hashable error"
return tuple(di.items())
class LaTeXScanCheck(object):
"""Skip all but LaTeX source files, i.e., do not scan *.eps,
*.pdf, *.jpg, etc.
"""
def __init__(self, suffixes):
self.suffixes = suffixes
def __call__(self, node, env):
current = not node.has_builder() or node.is_up_to_date()
scannable = node.get_suffix() in env.subst_list(self.suffixes)[0]
# Returning false means that the file is not scanned.
return scannable and current
kw['function'] = _scan
kw['path_function'] = FindMultiPathDirs(LaTeX.keyword_paths)
kw['recursive'] = 0
kw['skeys'] = suffixes
kw['scan_check'] = LaTeXScanCheck(suffixes)
kw['name'] = name
SCons.Scanner.Base.__init__(self, *args, **kw)
def _latex_names(self, include):
filename = include[1]
if include[0] == 'input':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.tex']
if (include[0] == 'include'):
return [filename + '.tex']
if include[0] == 'bibliography':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.bib']
if include[0] == 'usepackage':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.sty']
if include[0] == 'includegraphics':
base, ext = os.path.splitext( filename )
if ext == "":
#return [filename+e for e in self.graphics_extensions + TexGraphics]
# use the line above to find dependencies for the PDF builder
# when only an .eps figure is present. Since it will be found
# if the user tells scons how to make the pdf figure, leave
# it out for now.
return [filename+e for e in self.graphics_extensions]
return [filename]
def sort_key(self, include):
return SCons.Node.FS._my_normcase(str(include))
def find_include(self, include, source_dir, path):
try:
sub_path = path[include[0]]
except (IndexError, KeyError):
sub_path = ()
try_names = self._latex_names(include)
for n in try_names:
# see if we find it using the path in env[var]
i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[0])
if i:
return i, include
# see if we find it using the path in env['ENV'][var]
i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[1])
if i:
return i, include
return i, include
def canonical_text(self, text):
"""Standardize an input TeX-file contents.
Currently:
* removes comments, unwrapping comment-wrapped lines.
"""
out = []
line_continues_a_comment = False
for line in text.splitlines():
line,comment = self.comment_re.findall(line)[0]
if line_continues_a_comment == True:
out[-1] = out[-1] + line.lstrip()
else:
out.append(line)
line_continues_a_comment = len(comment) > 0
return '\n'.join(out).rstrip()+'\n'
def scan(self, node):
# Modify the default scan function to allow for the regular
# expression to return a comma separated list of file names
# as can be the case with the bibliography keyword.
# Cache the includes list in node so we only scan it once:
# path_dict = dict(list(path))
# add option for whitespace (\s) before the '['
noopt_cre = re.compile('\s*\[.*$')
if node.includes != None:
includes = node.includes
else:
text = self.canonical_text(node.get_text_contents())
includes = self.cre.findall(text)
# 1. Split comma-separated lines, e.g.
# ('bibliography', 'phys,comp')
# should become two entries
# ('bibliography', 'phys')
# ('bibliography', 'comp')
# 2. Remove the options, e.g., such as
# ('includegraphics[clip,width=0.7\\linewidth]', 'picture.eps')
# should become
# ('includegraphics', 'picture.eps')
split_includes = []
for include in includes:
inc_type = noopt_cre.sub('', include[0])
inc_list = include[1].split(',')
for j in range(len(inc_list)):
split_includes.append( (inc_type, inc_list[j]) )
#
includes = split_includes
node.includes = includes
return includes
def scan_recurse(self, node, path=()):
""" do a recursive scan of the top level target file
This lets us search for included files based on the
directory of the main file just as latex does"""
path_dict = dict(list(path))
queue = []
queue.extend( self.scan(node) )
seen = {}
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the \include, \input, etc. line.
# TODO: what about the comment in the original Classic scanner:
# """which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally."""
nodes = []
source_dir = node.get_dir()
#for include in includes:
while queue:
include = queue.pop()
try:
if seen[include[1]] == 1:
continue
except KeyError:
seen[include[1]] = 1
#
# Handle multiple filenames in include[1]
#
n, i = self.find_include(include, source_dir, path_dict)
if n is None:
# Do not bother with 'usepackage' warnings, as they most
# likely refer to system-level files
if include[0] != 'usepackage':
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(n)
nodes.append((sortkey, n))
# recurse down
queue.extend( self.scan(n) )
return [pair[1] for pair in sorted(nodes)]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 | -5,932,492,853,959,748,000 | 40.623077 | 187 | 0.585228 | false |
luisza/dfva_client | src/client_fva/ui/validationinformationcertificate.py | 1 | 1999 | from PyQt5 import QtWidgets, QtGui
from PyQt5.QtWidgets import QTableWidgetItem
from .validationinformationcertificateui import Ui_Dialog
class ValidationInformationCertificate(QtWidgets.QDialog, Ui_Dialog):
def __init__(self, widget, main_app):
super().__init__(widget)
Ui_Dialog.__init__(self)
self.setupUi(self)
self.signer_count = 0
self.certinformation.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.certinformation.setRowCount(0)
# set column count
self.certinformation.setColumnCount(4)
self.certinformation.setHorizontalHeaderItem(0, QTableWidgetItem("Identificación"))
self.certinformation.setHorizontalHeaderItem(1, QTableWidgetItem("Nombre"))
self.certinformation.setHorizontalHeaderItem(2, QTableWidgetItem("Válido desde"))
self.certinformation.setHorizontalHeaderItem(3, QTableWidgetItem("Válido hasta"))
self.certinformation.resizeColumnsToContents()
def add_owner(self, data):
# ('status', 'status_text', 'was_successfully', 'identification', 'full_name', 'start_validity', 'end_validity' )
if data['was_successfully']:
self.certinformation.insertRow(self.certinformation.rowCount())
self.certinformation.setItem(self.signer_count, 0, QTableWidgetItem(data['identification']))
self.certinformation.setItem(self.signer_count, 1, QTableWidgetItem(data['full_name']))
self.certinformation.setItem(self.signer_count, 2, QTableWidgetItem(data['start_validity']))
self.certinformation.setItem(self.signer_count, 3, QTableWidgetItem(data['end_validity']))
self.signer_count += 1
self.certinformation.resizeColumnsToContents()
def set_status_icon(self, code):
if code:
self.statusicon.setStyleSheet("image: url(:/images/connected.png);")
else:
self.statusicon.setStyleSheet("image: url(:/images/error.png);") | gpl-3.0 | -7,060,771,720,240,273,000 | 48.925 | 121 | 0.701904 | false |
BadSingleton/pyside2 | tests/QtCore/qrect_test.py | 3 | 3364 | #!/usr/bin/python
'''Test cases for QRect'''
import unittest
from PySide2.QtCore import QPoint, QRect, QRectF
class RectConstructor(unittest.TestCase):
def testDefault(self):
#QRect()
obj = QRect()
self.assert_(obj.isNull())
def testConstructorQPoint(self):
topLeft = QPoint(3, 0)
bottomRight = QPoint(0, 3)
rect1 = QRect(topLeft, bottomRight)
rect2 = QRect(topLeft, bottomRight)
self.assertEqual(rect1, rect2)
class RectOperator(unittest.TestCase):
'''Test case for QRect operators'''
def testEqual(self):
'''QRect == QRect
Note: operator == must be working as it's the main check
for correctness'''
rect1 = QRect()
rect2 = QRect()
self.assertEqual(rect1, rect2)
rect1 = QRect(0, 4, 100, 300)
rect2 = QRect(0, 4, 100, 300)
self.assertEqual(rect1, rect2)
def testNullRectIntersection(self):
#QRect & QRect for null rects
rect1 = QRect()
rect2 = QRect()
rect3 = rect1 & rect2
self.assertEqual(rect3, rect1)
self.assertEqual(rect3, rect2)
def testNoIntersect(self):
'''QRect & QRect for non-intersecting QRects
Non-intersecting QRects return a 'null' QRect for operator &'''
rect1 = QRect(10, 10, 5, 5)
rect2 = QRect(20, 20, 5, 5)
rect3 = rect1 & rect2
self.assertEqual(rect3, QRect())
def testIntersectPartial(self):
#QRect & QRect for partial intersections
rect1 = QRect(10, 10, 10, 10)
rect2 = QRect(15, 15, 10, 10)
rect3 = rect1 & rect2
self.assertEqual(rect3, QRect(15, 15, 5, 5))
def testIntersetEnclosed(self):
#QRect & QRect for a qrect inside another
rect1 = QRect(10, 10, 20, 20)
rect2 = QRect(15, 15, 5, 5)
rect3 = rect1 & rect2
self.assertEqual(rect3, rect2)
def testNullRectIntersectBounding(self):
#QRect | QRect for null rects
rect1 = QRect()
rect2 = QRect()
rect3 = rect1 & rect2
self.assertEqual(rect3, rect1)
self.assertEqual(rect3, rect2)
def testNoIntersectBounding(self):
'''QRect | QRect for non-intersecting QRects
Non-intersecting QRects return a greater QRect for operator |'''
rect1 = QRect(10, 10, 5, 5)
rect2 = QRect(20, 20, 5, 5)
rect3 = rect1 | rect2
self.assertEqual(rect3, QRect(10, 10, 15, 15))
def testBoundingPartialIntersection(self):
#QRect | QRect for partial intersections
rect1 = QRect(10, 10, 10, 10)
rect2 = QRect(15, 15, 10, 10)
rect3 = rect1 | rect2
self.assertEqual(rect3, QRect(10, 10, 15, 15))
def testBoundingEnclosed(self):
#QRect | QRect for a qrect inside another
rect1 = QRect(10, 10, 20, 20)
rect2 = QRect(15, 15, 5, 5)
rect3 = rect1 | rect2
self.assertEqual(rect3, rect1)
def testGetCoordsAndRect(self):
rect1 = QRect(1, 2, 3, 4)
self.assertEqual(rect1.getRect(), (1, 2, 3, 4))
self.assertEqual(rect1.getCoords(), (1, 2, 3, 5))
rect1 = QRectF(1, 2, 3, 4)
self.assertEqual(rect1.getRect(), (1, 2, 3, 4))
self.assertEqual(rect1.getCoords(), (1, 2, 4, 6))
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | -578,422,160,519,176,600 | 29.035714 | 72 | 0.588288 | false |
georgelegrand/first_gog | col.py | 1 | 4850 | import numpy as np
import random
'''
ОТКРЫТЫЙ ТЕКСТ: двойная перестановка
МАРШРУТ ВПИСЫВАНИЯ: слева - направо
МАРШРУТ ВЫПИСЫВАНИЯ: сверху - вниз
СТОЛБЦЫ: ( 3, 1, 4, 2) //сейчас рандом
СТРОКИ: ( 3, 2, 4, 1, 5) //сейчас рандом
'''
def setToCh(smt): #конвертирует строку из последовательности цифр, хранящей перестановку, в список
smt_ch = []
for n in smt:
smt_ch.append(int(n))
#print(type(smt_ch), smt_ch)
return smt_ch
def d_print(x,y):
print("Расшифрованный текст: ", x)
return 1
def strToTable(msg, row_dim, col_dim): #вписывает слева-направо в таблицу
msg_table = []
for i in range(0, row_dim):
msg_table.append([])
for j in range(0, col_dim):
msg_table[i].append(msg[col_dim*i +j])
#print(msg_table)
return msg_table
def changeCols(msg_table, col_ch, row_dim): #перестановка столбцов
new_msg_table = []
for i in range(0, row_dim):
new_msg_table.append([])
for j in col_ch:
new_msg_table[i].append(msg_table[i][j])
#print("Таблица после перестановки столбцов: ", new_msg_table)
return new_msg_table
def changeRows(msg_table, row_set): #перестановка строк
new_msg_table = []
for i in range(0, len(row_set)):
a = int(row_set[i])
new_msg_table.append(msg_table[a])
#print("Таблица после перестановки строк: ", new_msg_table)
return new_msg_table
def printCryptLR(msg_table, col_dim, row_dim): #выписывает слева-направо
print_msg = []
for i in range(0, len(msg_table)):
for j in range(0, len(msg_table[i])):
if msg_table[i][j] != "+":
print_msg.append(msg_table[i][j])
print_msg = "".join(print_msg)
print("Зашифрованный текст: ", print_msg)
def printCrypt(msg_table, col_dim, row_dim): #выписывает сверху-вниз
print_msg = []
for i in range(0, col_dim):
for j in range(0, row_dim):
#if msg_table[j][i] != "+":
print_msg.append(msg_table[j][i])
print_msg = "".join(print_msg)
print("Зашифрованный текст: ", print_msg)
def genCrypt(msg): #шифрование
#col_dim = int(input("Введите количество столбцов таблицы: "))
col_dim = random.randint(2,len(msg)-1) #генерим размерность таблицы в зависимости от количества столбцов
#print("col_dim: ",col_dim)
if len(msg) % col_dim == 0: #считаем соответствующее столбцам число строк
row_dim = int(len(msg) / col_dim)
else:
row_dim = int(len(msg) // col_dim + 1)
for add in range(col_dim - (len(msg) % col_dim)):
msg = msg + " "
#print(msg)
#col_set = str(input("Введите порядок столбцов от 0 до " + str(col_dim-1) +" включительно (без пробелов): "))
#col_ch = setToCh(col_set)
col_temp = list(range(0, col_dim)) #генерим случайную перестановку столбцов
random.shuffle(col_temp)
col_dict = dict(zip(list(range(0, col_dim)),col_temp))
#print(col_dict)
#row_set = str(input("Введите порядок строк от 0 до " + str(row_dim-1) +" включительно (без пробелов): "))
#row_ch = setToCh(row_set)
row_temp = list(range(0, row_dim)) #генерим случайную перестановку строк
random.shuffle(row_temp)
row_dict = dict(zip(list(range(0, row_dim)),row_temp))
msg_table = strToTable(msg,row_dim,col_dim)
msg_table = changeCols(msg_table, col_temp, row_dim) #меняем столбцы
msg_table = changeRows(msg_table, row_temp) #меняем строки
printCrypt(msg_table, col_dim, row_dim)
return msg_table, col_temp, row_temp, col_dim, row_dim
def decryptTable(msg, msg_table, col_temp, row_temp, col_dim, row_dim):
d_msg_table = changeRows(msg_table, row_temp) #меняем строки
d_msg_table = changeCols(msg_table, col_temp, row_dim) #меняем столбцы
d_print(msg, d_msg_table)
return d_msg_table
print("\n")
print("Праздник шифрования начинается!!!")
print("\n")
msg = input("Введите текст для шифрования: ")
res = genCrypt(msg)
#d_msg = input("Введите текст для расшифрования: ")
decryptTable(msg, res[0],res[1],res[2], res[3], res[4])
#printCrypt(msg_table, col_dim, row_dim)
#printCrypt(res[0], res[1], res[2]) | mit | -6,127,168,930,026,081,000 | 29.992 | 110 | 0.65399 | false |
mattjmorrison/django-media-masher | src/settings.py | 1 | 1255 | from os import path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_DIR = path.abspath(path.dirname(__file__))
ADMINS = (
('Matthew J. Morrison', 'mattj.morrison@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '.database',
}
}
USE_I18N = False
USE_L10N = True
MEDIA_ROOT = path.join(PROJECT_DIR, 'media')
STATIC_ROOT = MEDIA_ROOT
MEDIA_URL = '/static/'
SECRET_KEY = '-2cmgs7l$5grqwd!x&6241^ah&xx34ki48fwn#ef5s_lm(1@0a4w&v'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'src.urls'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'south',
'debug_toolbar',
'masher',
'sample_app',
'django_nose',
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda request: DEBUG,
'INTERCEPT_REDIRECTS': False,
}
MASHER_COMPRESS = True
# Test settings
SOUTH_TESTS_MIGRATE = False
NOSE_ARGS = (
'--with-coverage',
'--with-xunit',
'--with-xcover',
'--cover-package=src'
)
#TEST_RUNNER = 'xmlrunner.extra.djangotestrunner.XMLTestRunner'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEST_OUTPUT_VERBOSE = False
TEST_OUTPUT_DESCRIPTIONS = False
| mit | 5,681,423,153,971,184,000 | 19.241935 | 69 | 0.666135 | false |
Basvanstein/OWCK | build/lib.linux-x86_64-2.7/OWCK/utils.py | 1 | 2259 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 16:19:28 2015
@author: wangronin
"""
import time, os
import numpy as np
from numpy import pi, log, atleast_2d, size, mod
from pyDOE import lhs
from ghalton import Halton
from sobol import i4_sobol
## SMSE measurement
# test_y is the target, pred_y the predicted target, both 1D arrays of same length
def SMSE(test_y,pred_y):
se = []
target_variance = np.var(test_y)
for i in range(len(test_y)):
temp = (pred_y[i] - test_y[i])**2
se.append(temp)
mse = np.mean(se)
smse = mse / target_variance
return smse
## MSLL = mean standardized log loss
## logprob = 0.5*log(2*pi.*varsigmaVec) + sserror - 0.5*log(2*pi*varyTrain)...
## - ((yTestVec - meanyTrain).^2)./(2*varyTrain);
def MSLL(train_y,test_y,pred_y,variances):
sll = []
mean_y = np.mean(train_y)
var_y = np.var(train_y)
for i in range(len(variances)):
if variances[i] == 0:
variances[i] += 0.0000001 #hack
sll_trivial = 0.5*log(2 * pi * var_y) + ((test_y[i] - mean_y)**2 / (2* var_y))
sllv = ( 0.5*log(2 * pi * variances[i]) + \
((test_y[i] - pred_y[i])**2 / (2* variances[i])) ) - sll_trivial
sll.append(sllv)
sll = np.array(sll)
msll = np.mean(sll)
return msll
# Obtain the initial design locations
def get_design_sites(dim, n_sample, x_lb, x_ub, sampling_method='lhs'):
x_lb = atleast_2d(x_lb)
x_ub = atleast_2d(x_ub)
x_lb = x_lb.T if size(x_lb, 0) != 1 else x_lb
x_ub = x_ub.T if size(x_ub, 0) != 1 else x_ub
if sampling_method == 'lhs':
# Latin Hyper Cube Sampling: Get evenly distributed sampling in R^dim
samples = lhs(dim, samples=n_sample) * (x_ub - x_lb) + x_lb
elif sampling_method == 'uniform':
samples = np.random.rand(n_sample, dim) * (x_ub - x_lb) + x_lb
elif sampling_method == 'sobol':
seed = mod(int(time.time()) + os.getpid(), int(1e6))
samples = np.zeros((n_sample, dim))
for i in range(n_sample):
samples[i, :], seed = i4_sobol(dim, seed)
samples = samples * (x_ub - x_lb) + x_lb
elif sampling_method == 'halton':
sequencer = Halton(dim)
samples = sequencer.get(n_sample) * (x_ub - x_lb) + x_lb
return samples
| gpl-2.0 | 761,808,325,784,908,200 | 29.945205 | 82 | 0.590969 | false |
ppiotr/Invenio | modules/bibexport/lib/bibexport_method_sitemap.py | 5 | 17343 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibExport plugin implementing 'sitemap' exporting method.
The main function is run_export_method(jobname) defined at the end.
This is what BibExport daemon calls for all the export jobs that use
this exporting method.
"""
from datetime import datetime
from urllib import quote
from ConfigParser import ConfigParser
import os
import gzip
import time
from invenio.search_engine import get_collection_reclist
from invenio.dbquery import run_sql
from invenio.config import CFG_SITE_URL, CFG_WEBDIR, CFG_ETCDIR, \
CFG_SITE_RECORD, CFG_SITE_LANGS
from invenio.intbitset import intbitset
from invenio.websearch_webcoll import Collection
from invenio.bibtask import write_message, task_update_progress, task_sleep_now_if_required
from invenio.textutils import encode_for_xml
from invenio.urlutils import get_canonical_and_alternates_urls
DEFAULT_TIMEZONE = '+01:00'
DEFAULT_PRIORITY_HOME = 1
DEFAULT_CHANGEFREQ_HOME = 'hourly'
DEFAULT_PRIORITY_RECORDS = 0.8
DEFAULT_CHANGEFREQ_RECORDS = 'weekly'
DEFAULT_PRIORITY_COMMENTS = 0.4
DEFAULT_CHANGEFREQ_COMMENTS = 'weekly'
DEFAULT_PRIORITY_REVIEWS = 0.6
DEFAULT_CHANGEFREQ_REVIEWS = 'weekly'
DEFAULT_PRIORITY_FULLTEXTS = 0.9
DEFAULT_CHANGEFREQ_FULLTEXTS = 'weekly'
DEFAULT_PRIORITY_COLLECTIONS = 0.3
DEFAULT_CHANGEFREQ_COLLECTIONS = 'hourly'
MAX_RECORDS = 50000
MAX_SIZE = 10000000
def get_all_public_records(collections):
""" Get all records which exist (i.e. not suppressed ones) and are in
accessible collection.
returns list of (recid, last_modification) tuples
"""
recids = intbitset()
for collection in collections:
recids += get_collection_reclist(collection)
query = 'SELECT id, modification_date FROM bibrec'
res = run_sql(query)
return [(recid, lastmod) for (recid, lastmod) in res if recid in recids]
def get_all_public_collections(base_collections):
""" Return a list of (collection.name, last_modification) tuples for all
collections and subcollections of base_collections
"""
def get_collection_last_modification(collection):
""" last modification = modification date fo latest added record """
last_mod = None
query_last_mod = "SELECT modification_date FROM bibrec WHERE id=%s"
try:
latest_recid = collection.reclist.tolist()[-1]
except IndexError:
# this collection is empty
return last_mod
res = run_sql(query_last_mod, (latest_recid,))
if res and res[0][0]:
last_mod = res[0][0]
return last_mod
output = []
for coll_name in base_collections:
mother_collection = Collection(coll_name)
if not mother_collection.restricted_p():
last_mod = get_collection_last_modification(mother_collection)
output.append((coll_name, last_mod))
for descendant in mother_collection.get_descendants(type='r'):
if not descendant.restricted_p():
last_mod = get_collection_last_modification(descendant)
output.append((descendant.name, last_mod))
for descendant in mother_collection.get_descendants(type='v'):
if not descendant.restricted_p():
last_mod = get_collection_last_modification(descendant)
output.append((descendant.name, last_mod))
return output
def filter_fulltexts(recids, fulltext_type=None):
""" returns list of records having a fulltext of type fulltext_type.
If fulltext_type is empty, return all records having a fulltext"""
recids = dict(recids)
if fulltext_type:
query = """SELECT id_bibrec, max(modification_date)
FROM bibrec_bibdoc
LEFT JOIN bibdoc ON bibrec_bibdoc.id_bibdoc=bibdoc.id
WHERE type=%s
GROUP BY id_bibrec"""
res = run_sql(query, (fulltext_type,))
else:
query = """SELECT id_bibrec, max(modification_date)
FROM bibrec_bibdoc
LEFT JOIN bibdoc ON bibrec_bibdoc.id_bibdoc=bibdoc.id
GROUP BY id_bibrec"""
res = run_sql(query)
return [(recid, lastmod) for (recid, lastmod) in res if recid in recids]
def filter_comments(recids):
""" Retrieve recids having a comment. return (recid, last_review_date)"""
recids = dict(recids)
query = """SELECT id_bibrec, max(date_creation)
FROM cmtRECORDCOMMENT
WHERE star_score=0
GROUP BY id_bibrec"""
res = run_sql(query)
return [(recid, lastmod) for (recid, lastmod) in res if recid in recids]
def filter_reviews(recids):
""" Retrieve recids having a review. return (recid, last_review_date)"""
recids = dict(recids)
query = """SELECT id_bibrec, max(date_creation)
FROM cmtRECORDCOMMENT
WHERE star_score>0
GROUP BY id_bibrec"""
res = run_sql(query)
return [(recid, lastmod) for (recid, lastmod) in res if recid in recids]
SITEMAP_HEADER = """\
<?xml version="1.0" encoding="UTF-8"?>
<urlset
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xhtml="http://www.w3.org/1999/xhtml"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd"
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">"""
SITEMAP_FOOTER = '\n</urlset>\n'
class SitemapWriter(object):
""" Writer for sitemaps"""
def __init__(self, sitemap_id):
""" Constructor.
name: path to the sitemap file to be created
"""
self.header = SITEMAP_HEADER
self.footer = SITEMAP_FOOTER
self.sitemap_id = sitemap_id
self.name = os.path.join(CFG_WEBDIR, 'sitemap-%02d.xml.gz' % sitemap_id)
self.filedescriptor = gzip.open(self.name + '.part', 'w')
self.num_urls = 0
self.file_size = 0
self.filedescriptor.write(self.header)
self.file_size += len(self.footer)
def add_url(self, url, lastmod=datetime(1900, 1, 1), changefreq="", priority="", alternate=False):
""" create a new url node. Returns the number of url nodes in sitemap"""
self.num_urls += 1
canonical_url, alternate_urls = get_canonical_and_alternates_urls(url, drop_ln=not alternate)
url_node = u"""
<url>
<loc>%s</loc>%s
</url>"""
optional = ''
if lastmod:
optional += u"""
<lastmod>%s</lastmod>""" % lastmod.strftime('%Y-%m-%dT%H:%M:%S' + \
DEFAULT_TIMEZONE)
if changefreq:
optional += u"""
<changefreq>%s</changefreq>""" % changefreq
if priority:
optional += u"""
<priority>%s</priority>""" % priority
if alternate:
for ln, alternate_url in alternate_urls.iteritems():
ln = ln.replace('_', '-') ## zh_CN -> zh-CN
optional += u"""
<xhtml:link rel="alternate" hreflang="%s" href="%s" />""" % (ln, encode_for_xml(alternate_url, quote=True))
url_node %= (encode_for_xml(canonical_url), optional)
self.file_size += len(url_node)
self.filedescriptor.write(url_node)
return self.num_urls
def get_size(self):
""" File size. Should not be > 10MB """
return self.file_size + len(self.footer)
def get_number_of_urls(self):
""" Number of urls in the sitemap. Should not be > 50'000"""
return self.num_urls
def get_name(self):
""" Returns the filename """
return self.name
def get_sitemap_url(self):
""" Returns the sitemap URL"""
return CFG_SITE_URL + '/' + os.path.basename(self.name)
def __del__(self):
""" Writes the whole sitemap """
self.filedescriptor.write(self.footer)
self.filedescriptor.close()
os.rename(self.name + '.part', self.name)
SITEMAP_INDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/siteindex.xsd"\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
SITEMAP_INDEX_FOOTER = '\n</sitemapindex>\n'
class SitemapIndexWriter(object):
"""class for writing Sitemap Index files."""
def __init__(self, name):
""" Constructor.
name: path to the sitemap index file to be created
"""
self.header = SITEMAP_INDEX_HEADER
self.footer = SITEMAP_INDEX_FOOTER
self.name = name
self.filedescriptor = gzip.open(self.name + '.part', 'w')
self.num_urls = 0
self.file_size = 0
self.filedescriptor.write(self.header)
self.file_size += len(self.footer)
def add_url(self, url):
""" create a new url node. Returns the number of url nodes in sitemap"""
self.num_urls += 1
url_node = u"""
<sitemap>
<loc>%s</loc>%s
</sitemap>"""
optional = u"""
<lastmod>%s</lastmod>""" % time.strftime('%Y-%m-%dT%H:%M:%S' +\
DEFAULT_TIMEZONE)
url_node %= (url, optional)
self.file_size += len(url_node)
self.filedescriptor.write(url_node)
return self.num_urls
def __del__(self):
""" Writes the whole sitemap """
self.filedescriptor.write(self.footer)
self.filedescriptor.close()
os.rename(self.name + '.part', self.name)
def generate_sitemaps(sitemap_index_writer, collection_names, fulltext_filter=''):
"""
Generate sitemaps themselves. Return list of generated sitemaps files
"""
sitemap_id = 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = 0
for lang in CFG_SITE_LANGS:
writer.add_url(CFG_SITE_URL + '/?ln=%s' % lang,
lastmod=datetime.today(),
changefreq=DEFAULT_CHANGEFREQ_HOME,
priority=DEFAULT_PRIORITY_HOME)
nb_urls += 1
write_message("... Getting all public records...")
recids = get_all_public_records(collection_names)
write_message("... Generating urls for %s records..." % len(recids))
task_sleep_now_if_required(can_stop_too=True)
for i, (recid, lastmod) in enumerate(recids):
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url(CFG_SITE_URL + '/%s/%s' % (CFG_SITE_RECORD, recid),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_RECORDS,
priority = DEFAULT_PRIORITY_RECORDS)
if i % 100 == 0:
task_update_progress("Sitemap for recid %s/%s" % (i + 1, len(recids)))
task_sleep_now_if_required(can_stop_too=True)
write_message("... Generating urls for collections...")
collections = get_all_public_collections(collection_names)
for i, (collection, lastmod) in enumerate(collections):
for lang in CFG_SITE_LANGS:
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url('%s/collection/%s?ln=%s' % (CFG_SITE_URL, quote(collection), lang),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_COLLECTIONS,
priority = DEFAULT_PRIORITY_COLLECTIONS,
alternate=True)
if i % 100 == 0:
task_update_progress("Sitemap for collection %s/%s" % (i + 1, len(collections)))
task_sleep_now_if_required(can_stop_too=True)
write_message("... Generating urls for fulltexts...")
recids = filter_fulltexts(recids, fulltext_filter)
for i, (recid, lastmod) in enumerate(recids):
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url(CFG_SITE_URL + '/%s/%s/files' % (CFG_SITE_RECORD, recid),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_FULLTEXTS,
priority = DEFAULT_PRIORITY_FULLTEXTS)
if i % 100 == 0:
task_update_progress("Sitemap for files page %s/%s" % (i, len(recids)))
task_sleep_now_if_required(can_stop_too=True)
write_message("... Generating urls for comments...")
recids = filter_comments(recids)
for i, (recid, lastmod) in enumerate(recids):
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url(CFG_SITE_URL + '/%s/%s/comments' % (CFG_SITE_RECORD, recid),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_COMMENTS,
priority = DEFAULT_PRIORITY_COMMENTS)
if i % 100 == 0:
task_update_progress("Sitemap for comments page %s/%s" % (i, len(recids)))
task_sleep_now_if_required(can_stop_too=True)
write_message("... Generating urls for reviews")
recids = filter_reviews(recids)
for i, (recid, lastmod) in enumerate(recids):
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
write_message("")
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url(CFG_SITE_URL + '/%s/%s/reviews' % (CFG_SITE_RECORD, recid),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_REVIEWS,
priority = DEFAULT_PRIORITY_REVIEWS)
if i % 100 == 0:
task_update_progress("Sitemap for reviews page %s/%s" % (i, len(recids)))
task_sleep_now_if_required(can_stop_too=True)
def generate_sitemaps_index(collection_list, fulltext_filter=None):
"""main function. Generates the sitemap index and the sitemaps
collection_list: list of collection names to add in sitemap
fulltext_filter: if provided the parser will intergrate only give fulltext
types
"""
write_message("Generating all sitemaps...")
sitemap_index_writer = SitemapIndexWriter(CFG_WEBDIR + '/sitemap-index.xml.gz')
generate_sitemaps(sitemap_index_writer, collection_list, fulltext_filter)
def run_export_method(jobname):
"""Main function, reading params and running the task."""
write_message("bibexport_sitemap: job %s started." % jobname)
collections = get_config_parameter(jobname=jobname, parameter_name="collection", is_parameter_collection = True)
fulltext_type = get_config_parameter(jobname=jobname, parameter_name="fulltext_status")
generate_sitemaps_index(collections, fulltext_type)
write_message("bibexport_sitemap: job %s finished." % jobname)
def get_config_parameter(jobname, parameter_name, is_parameter_collection = False):
"""Detect export method of JOBNAME. Basically, parse JOBNAME.cfg
and return export_method. Return None if problem found."""
jobconfig = ConfigParser()
jobconffile = CFG_ETCDIR + os.sep + 'bibexport' + os.sep + jobname + '.cfg'
if not os.path.exists(jobconffile):
write_message("ERROR: cannot find config file %s." % jobconffile)
return None
jobconfig.read(jobconffile)
if is_parameter_collection:
all_items = jobconfig.items(section='export_job')
parameters = []
for item_name, item_value in all_items:
if item_name.startswith(parameter_name):
parameters.append(item_value)
return parameters
else:
parameter = jobconfig.get('export_job', parameter_name)
return parameter
| gpl-2.0 | -7,477,199,330,989,505,000 | 40.097156 | 116 | 0.617598 | false |
gangadhar-kadam/sapphire_app | selling/doctype/lead/test_lead.py | 2 | 1061 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
test_records = [
[{"doctype":"Lead", "lead_name": "_Test Lead", "status":"Open",
"email_id":"test_lead@example.com", "territory": "_Test Territory"}],
[{"doctype":"Lead", "lead_name": "_Test Lead 1", "status":"Open",
"email_id":"test_lead1@example.com"}],
[{"doctype":"Lead", "lead_name": "_Test Lead 2", "status":"Contacted",
"email_id":"test_lead2@example.com"}],
[{"doctype":"Lead", "lead_name": "_Test Lead 3", "status":"Converted",
"email_id":"test_lead3@example.com"}],
]
import webnotes
import unittest
class TestLead(unittest.TestCase):
def test_make_customer(self):
from selling.doctype.lead.lead import make_customer
customer = make_customer("_T-Lead-00001")
self.assertEquals(customer[0]["doctype"], "Customer")
self.assertEquals(customer[0]["lead_name"], "_T-Lead-00001")
customer[0].customer_group = "_Test Customer Group"
webnotes.bean(customer).insert()
| agpl-3.0 | 4,780,978,341,459,978,000 | 34.4 | 72 | 0.673893 | false |
Natetempid/nearfield | GUI_Layout_1/GUI_Layout_1/usbswitch.py | 1 | 2285 | import serial
import threading
import Queue as q
import datetime
import numpy as np
import sys
import time
#reload(sys)
#sys.setdefaultencoding('utf8')
class usbswitch():
def __init__(self, name):
self.ctrl = serial.Serial()
portname = ""
for k in range(0,10):
if str(k) in name:
print portname
portname = "COM%d" % k
self.ctrl.port = portname
self.ctrl.baudrate = 115200
self.ctrl.timeout = 2
self.ctrl.open()
self.error = None
self.relays = []
self.__initrelays()
def __initrelays(self):
for k in range(1,9):
relay_tmp = relay(self,k)
relay_tmp.turnOff()
self.relays.append(relay_tmp)
def turnOffAllRelays(self):
self.ctrl.write( chr(254) + chr(129) + chr(1) )
for k in range(0,8):
self.relays[k].status = 0
def close(self):
self.ctrl.close()
class relay():
def __init__(self, master, number):
self.master = master
if number < 1 or number > 8:
self.number = None
return None
else:
self.number = number #number is for relay 1 - 8
self.onID = self.set_onID() #this is an integer that is sent to relay to turn it on
self.offID = self.set_offID() #this is an integer that is sent to relay to turn it off
self.statusID = self.set_statusID()
self.status = 0
#self.getStatus()
def set_onID(self):
return 107 + self.number
def set_offID(self):
return 99 + self.number
def set_statusID(self):
return 115 + self.number
def turnOn(self):
self.master.ctrl.write( chr(254) + chr(self.onID) + chr(1) )
self.status = 1
def turnOff(self):
self.master.ctrl.write( chr(254) + chr(self.offID) + chr(1))
self.status = 0
def getStatus(self):
waste = self.master.ctrl.read(1024) #read everything in the buffer currently, and then write
self.master.ctrl.write( chr(254) + chr(self.statusID) + chr(1))
#print self.master.ctrl.read(1024)
input = self.master.ctrl.read(1024)
print input
self.status = ord(input)
| gpl-3.0 | 7,338,342,678,479,861,000 | 27.209877 | 100 | 0.563239 | false |
codrut3/tensorflow | tensorflow/contrib/receptive_field/python/util/receptive_field.py | 16 | 23376 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to compute receptive field of a fully-convolutional network.
Please refer to the following g3doc for detailed explanation on how this
computation is performed, and why it is important:
g3doc/photos/vision/features/delf/g3doc/rf_computation.md
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.receptive_field.python.util import graph_compute_order
from tensorflow.contrib.util import make_ndarray
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.framework import ops as framework_ops
import numpy as np
# White-listed layer operations, which do not affect the receptive field
# computation.
_UNCHANGED_RF_LAYER_OPS = [
'Add', 'BiasAdd', 'Ceil', 'ConcatV2', 'Const', 'Floor', 'Identity', 'Log',
'Mul', 'Pow', 'RealDiv', 'Relu', 'Round', 'Rsqrt', 'Softplus', 'Sub',
'VariableV2']
# Different ways in which padding modes may be spelled.
_VALID_PADDING = ["VALID", b"VALID"]
_SAME_PADDING = ["SAME", b"SAME"]
def _stride_size(node):
"""Computes stride size given a TF node.
Args:
node: Tensorflow node (NodeDef proto).
Returns:
stride_x: Stride size for horizontal direction (integer).
stride_y: Stride size for vertical direction (integer).
"""
strides_attr = node.attr["strides"]
logging.vlog(4, "strides_attr = %s", strides_attr)
stride_y = strides_attr.list.i[1]
stride_x = strides_attr.list.i[2]
return stride_x, stride_y
def _conv_kernel_size(node, name_to_order_node):
"""Computes kernel size given a TF convolution or pooling node.
Args:
node: Tensorflow node (NodeDef proto).
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
Raises:
ValueError: If the weight layer node is invalid.
"""
weights_layer_read_name = node.input[1]
if not weights_layer_read_name.endswith("/read"):
raise ValueError(
"Weight layer's name input to conv layer does not end with '/read'")
weights_layer_param_name = weights_layer_read_name[:-5]
weights_node = name_to_order_node[weights_layer_param_name].node
if weights_node.op != "VariableV2":
raise ValueError("Weight layer is not of type VariableV2")
shape = weights_node.attr["shape"]
logging.vlog(4, "weight shape = %s", shape)
kernel_size_y = shape.shape.dim[0].size
kernel_size_x = shape.shape.dim[1].size
return kernel_size_x, kernel_size_y
def _padding_size_conv_pool(node, kernel_size, stride):
"""Computes padding size given a TF convolution or pooling node.
Args:
node: Tensorflow node (NodeDef proto).
kernel_size: Kernel size of node (integer).
stride: Stride size of node (integer).
Returns:
padding: Padding size (integer).
Raises:
ValueError: If padding is invalid.
"""
# In this case, we need to carefully consider the different TF padding modes.
# The padding depends on kernel size, and may depend on input size. If it
# depends on input size, we raise an exception.
padding_attr = node.attr["padding"]
logging.vlog(4, "padding_attr = %s", padding_attr)
if padding_attr.s in _VALID_PADDING:
padding = 0
elif padding_attr.s in _SAME_PADDING:
if kernel_size == 1:
padding = 0
elif stride == 1:
padding = int(math.floor((float(kernel_size) - 1) / 2))
elif stride == 2 and kernel_size % 2 == 0:
padding = int(math.floor((float(kernel_size) - 1) / 2))
else:
padding = None
logging.warning(
"Padding depends on input size, which means that the effective "
"padding may be different depending on the input image "
"dimensionality. In this case, alignment check will be skipped.")
else:
raise ValueError("Invalid padding operation %s" % padding_attr.s)
return padding
def _pool_kernel_size(node):
"""Computes kernel size given a TF pooling node.
Args:
node: Tensorflow node (NodeDef proto).
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
Raises:
ValueError: If pooling is invalid.
"""
ksize = node.attr["ksize"]
kernel_size_y = ksize.list.i[1]
kernel_size_x = ksize.list.i[2]
if ksize.list.i[0] != 1:
raise ValueError("pool ksize for first dim is not 1")
if ksize.list.i[3] != 1:
raise ValueError("pool ksize for last dim is not 1")
return kernel_size_x, kernel_size_y
def _padding_size_pad_layer(node, name_to_order_node):
"""Computes padding size given a TF padding node.
Args:
node: Tensorflow node (NodeDef proto).
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
padding_x: Padding size for horizontal direction (integer).
padding_y: Padding size for vertical direction (integer).
Raises:
ValueError: If padding layer is invalid.
"""
paddings_layer_name = node.input[1]
if not paddings_layer_name.endswith("/paddings"):
raise ValueError("Padding layer name does not end with '/paddings'")
paddings_node = name_to_order_node[paddings_layer_name].node
if paddings_node.op != "Const":
raise ValueError("Padding op is not Const")
value = paddings_node.attr["value"]
t = make_ndarray(value.tensor)
padding_y = t[1][0]
padding_x = t[2][0]
if t[0][0] != 0:
raise ValueError("padding is not zero for first tensor dim")
if t[3][0] != 0:
raise ValueError("padding is not zero for last tensor dim")
return padding_x, padding_y
def _get_layer_params(node, name_to_order_node):
"""Gets layer parameters relevant for RF computation.
Currently, only these nodes are supported:
- Conv2D
- DepthwiseConv2dNative
- Pad
- MaxPool
- AvgPool
- all nodes listed in _UNCHANGED_RF_LAYER_OPS
Args:
node: Tensorflow node (NodeDef proto).
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
stride_x: Stride size for horizontal direction (integer).
stride_y: Stride size for vertical direction (integer).
padding_x: Padding size for horizontal direction (integer).
padding_y: Padding size for vertical direction (integer).
Raises:
ValueError: If layer op is unknown.
"""
logging.vlog(3, "node.op = %s", node.op)
logging.vlog(4, "node = %s", node)
if node.op == "Conv2D" or node.op == "DepthwiseConv2dNative":
stride_x, stride_y = _stride_size(node)
kernel_size_x, kernel_size_y = _conv_kernel_size(node, name_to_order_node)
# Compute the padding for this node separately for each direction.
padding_x = _padding_size_conv_pool(node, kernel_size_x, stride_x)
padding_y = _padding_size_conv_pool(node, kernel_size_y, stride_y)
elif node.op == "Pad":
# Kernel and stride are simply 1 in this case.
kernel_size_x = 1
kernel_size_y = 1
stride_x = 1
stride_y = 1
padding_x, padding_y = _padding_size_pad_layer(node, name_to_order_node)
elif node.op == "MaxPool" or node.op == "AvgPool":
stride_x, stride_y = _stride_size(node)
kernel_size_x, kernel_size_y = _pool_kernel_size(node)
# Compute the padding for this node separately for each direction.
padding_x = _padding_size_conv_pool(node, kernel_size_x, stride_x)
padding_y = _padding_size_conv_pool(node, kernel_size_y, stride_y)
elif node.op in _UNCHANGED_RF_LAYER_OPS:
# These nodes do not modify the RF parameters.
kernel_size_x = 1
kernel_size_y = 1
stride_x = 1
stride_y = 1
padding_x = 0
padding_y = 0
else:
raise ValueError("Unknown layer for operation '%s': %s" %
(node.name, node.op))
return kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y
def _reverse_sort_by_order(name_to_order_node):
"""Sorts map of name_to_order_node nodes in reverse order.
The output is such that the nodes in name_to_order_node are sorted in
descending order of the "order" field.
Args:
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
sorted_name_to_order_node: Sorted version of the input, in descending order.
"""
return sorted(name_to_order_node.items(), key=lambda x: -x[1].order)
def _get_rf_size_node_input(stride, kernel_size, rf_size_output):
"""Computes RF size at the input of a given layer.
Args:
stride: Stride of given layer (integer).
kernel_size: Kernel size of given layer (integer).
rf_size_output: RF size at output of given layer (integer).
Returns:
rf_size_input: RF size at input of given layer (integer).
"""
return stride * rf_size_output + kernel_size - stride
def _get_effective_stride_node_input(stride, effective_stride_output):
"""Computes effective stride at the input of a given layer.
Args:
stride: Stride of given layer (integer).
effective_stride_output: Effective stride at output of given layer
(integer).
Returns:
effective_stride_input: Effective stride at input of given layer
(integer).
"""
return stride * effective_stride_output
def _get_effective_padding_node_input(stride, padding,
effective_padding_output):
"""Computes effective padding at the input of a given layer.
Args:
stride: Stride of given layer (integer).
padding: Padding of given layer (integer).
effective_padding_output: Effective padding at output of given layer
(integer).
Returns:
effective_padding_input: Effective padding at input of given layer
(integer).
"""
return stride * effective_padding_output + padding
class ReceptiveField:
"""
Receptive field of a convolutional neural network.
Args:
size: Receptive field size.
stride: Effective stride.
padding: Effective padding.
"""
def __init__(self, size, stride, padding):
self.size = np.asarray(size)
self.stride = np.asarray(stride)
self.padding = np.asarray(padding)
def compute_input_center_coordinates(self, y, axis=None):
"""
Computes the center of the receptive field that generated a feature.
Args:
y: An array of feature coordinates with shape `(..., d)`, where `d` is the
number of dimensions of the coordinates.
axis: The dimensions for which to compute the input center coordinates.
If `None` (the default), compute the input center coordinates for all
dimensions.
Returns:
x: Center of the receptive field that generated the features, at the input
of the network.
Raises:
ValueError: If the number of dimensions of the feature coordinates does
not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
y = np.asarray(y)
if y.shape[-1] != len(axis):
raise ValueError("Dimensionality of the feature coordinates `y` (%d) "
"does not match dimensionality of `axis` (%d)" %
(y.shape[-1], len(axis)))
return - self.padding[axis] + y * self.stride[axis] + \
(self.size[axis] - 1) / 2
def compute_feature_coordinates(self, x, axis=None):
"""
Computes the position of a feature given the center of a receptive field.
Args:
x: An array of input center coordinates with shape `(..., d)`, where `d`
is the number of dimensions of the coordinates.
axis: The dimensions for which to compute the feature coordinates.
If `None` (the default), compute the feature coordinates for all
dimensions.
Returns:
y: Coordinates of the features.
Raises:
ValueError: If the number of dimensions of the input center coordinates
does not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
x = np.asarray(x)
if x.shape[-1] != len(axis):
raise ValueError("Dimensionality of the input center coordinates `x` "
"(%d) does not match dimensionality of `axis` (%d)" %
(x.shape[-1], len(axis)))
return (x + self.padding[axis] + (1 - self.size[axis]) / 2) / \
self.stride[axis]
def __iter__(self):
return iter(np.concatenate([self.size, self.stride, self.padding]))
def compute_receptive_field_from_graph_def(graph_def, input_node, output_node,
stop_propagation=None):
"""Computes receptive field (RF) parameters from a Graph or GraphDef object.
The algorithm stops the calculation of the receptive field whenever it
encounters an operation in the list `stop_propagation`. Stopping the
calculation early can be useful to calculate the receptive field of a
subgraph such as a single branch of the
[inception network](https://arxiv.org/abs/1512.00567).
Args:
graph_def: Graph or GraphDef object.
input_node: Name of the input node or Tensor object from graph.
output_node: Name of the output node or Tensor object from graph.
stop_propagation: List of operation or scope names for which to stop the
propagation of the receptive field.
Returns:
rf_size_x: Receptive field size of network in the horizontal direction, with
respect to specified input and output.
rf_size_y: Receptive field size of network in the vertical direction, with
respect to specified input and output.
effective_stride_x: Effective stride of network in the horizontal direction,
with respect to specified input and output.
effective_stride_y: Effective stride of network in the vertical direction,
with respect to specified input and output.
effective_padding_x: Effective padding of network in the horizontal
direction, with respect to specified input and output.
effective_padding_y: Effective padding of network in the vertical
direction, with respect to specified input and output.
Raises:
ValueError: If network is not aligned or if either input or output nodes
cannot be found. For network criterion alignment, see
photos/vision/features/delf/g3doc/rf_computation.md
"""
# Convert a graph to graph_def if necessary.
if isinstance(graph_def, framework_ops.Graph):
graph_def = graph_def.as_graph_def()
# Convert tensors to names.
if isinstance(input_node, framework_ops.Tensor):
input_node = input_node.op.name
if isinstance(output_node, framework_ops.Tensor):
output_node = output_node.op.name
stop_propagation = stop_propagation or []
# Computes order of computation for a given graph.
name_to_order_node = graph_compute_order.get_compute_order(
graph_def=graph_def)
# Sort in reverse topological order.
order = _reverse_sort_by_order(name_to_order_node)
# Dictionaries to keep track of receptive field, effective stride and
# effective padding of different nodes.
rf_sizes_x = {}
rf_sizes_y = {}
effective_strides_x = {}
effective_strides_y = {}
effective_paddings_x = {}
effective_paddings_y = {}
# Initialize dicts for output_node.
rf_sizes_x[output_node] = 1
rf_sizes_y[output_node] = 1
effective_strides_x[output_node] = 1
effective_strides_y[output_node] = 1
effective_paddings_x[output_node] = 0
effective_paddings_y[output_node] = 0
# Flag to denote if we found output node yet. If we have not, we skip nodes
# until the output node is found.
found_output_node = False
# Flag to denote if padding is undefined. This happens when SAME padding mode
# is used in conjunction with stride and kernel sizes which make it such that
# the padding to be applied would depend on the input size. In this case,
# alignment checks are skipped, and the effective padding is None.
undefined_padding = False
for _, (o, node) in order:
if node:
logging.vlog(3, "%10d %-100s %-20s" % (o, node.name[:90], node.op))
else:
continue
# When we find input node, we can stop.
if node.name == input_node:
break
# Loop until we find the output node. All nodes before finding the output
# one are irrelevant, so they can be skipped.
if not found_output_node:
if node.name == output_node:
found_output_node = True
if found_output_node:
if node.name not in rf_sizes_x:
assert node.name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % node.name)
# In this case, node is not relevant since it's not part of the
# computation we're interested in.
logging.vlog(3, "Irrelevant node %s, skipping it...", node.name)
continue
# Get params for this layer.
kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y = (
_get_layer_params(node, name_to_order_node))
logging.vlog(3, "kernel_size_x = %s, kernel_size_y = %s, "
"stride_x = %s, stride_y = %s, "
"padding_x = %s, padding_y = %s" %
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x,
padding_y))
if padding_x is None or padding_y is None:
undefined_padding = True
# Get parameters at input of this layer which may or may not be propagated
# to the input layers.
rf_size_input_x = _get_rf_size_node_input(stride_x, kernel_size_x,
rf_sizes_x[node.name])
rf_size_input_y = _get_rf_size_node_input(stride_y, kernel_size_y,
rf_sizes_y[node.name])
effective_stride_input_x = _get_effective_stride_node_input(
stride_x, effective_strides_x[node.name])
effective_stride_input_y = _get_effective_stride_node_input(
stride_y, effective_strides_y[node.name])
if not undefined_padding:
effective_padding_input_x = _get_effective_padding_node_input(
stride_x, padding_x, effective_paddings_x[node.name])
effective_padding_input_y = _get_effective_padding_node_input(
stride_y, padding_y, effective_paddings_y[node.name])
else:
effective_padding_input_x = None
effective_padding_input_y = None
# Loop over this node's inputs and potentially propagate information down.
for inp_name in node.input:
# Stop the propagation of the receptive field.
if any(inp_name.startswith(stop) for stop in stop_propagation):
logging.vlog(3, "Skipping explicitly ignored node %s.", node.name)
continue
logging.vlog(4, "inp_name = %s", inp_name)
inp_node = name_to_order_node[inp_name].node
logging.vlog(4, "inp_node = \n%s", inp_node)
if inp_node.name in rf_sizes_x:
assert inp_node.name in rf_sizes_y, (
"Node %s is in rf_sizes_x, but "
"not in rf_sizes_y" % inp_node.name)
# This node was already discovered through a previous path, so we need
# to make sure that graph is aligned. This alignment check is skipped
# if the padding is not defined, since in this case alignment cannot
# be checked.
if not undefined_padding:
if effective_strides_x[inp_node.name] != effective_stride_input_x:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in horizontal direction")
if effective_strides_y[inp_node.name] != effective_stride_input_y:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in vertical direction")
if (rf_sizes_x[inp_node.name] - 1
) / 2 - effective_paddings_x[inp_node.name] != (
rf_size_input_x - 1) / 2 - effective_padding_input_x:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in horizontal direction")
if (rf_sizes_y[inp_node.name] - 1
) / 2 - effective_paddings_y[inp_node.name] != (
rf_size_input_y - 1) / 2 - effective_padding_input_y:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in vertical direction")
# Keep track of path with largest RF, for both directions.
if rf_sizes_x[inp_node.name] < rf_size_input_x:
rf_sizes_x[inp_node.name] = rf_size_input_x
effective_strides_x[inp_node.name] = effective_stride_input_x
effective_paddings_x[inp_node.name] = effective_padding_input_x
if rf_sizes_y[inp_node.name] < rf_size_input_y:
rf_sizes_y[inp_node.name] = rf_size_input_y
effective_strides_y[inp_node.name] = effective_stride_input_y
effective_paddings_y[inp_node.name] = effective_padding_input_y
else:
assert inp_node.name not in rf_sizes_y, (
"Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % inp_node.name)
# In this case, it is the first time we encounter this node. So we
# propagate the RF parameters.
rf_sizes_x[inp_node.name] = rf_size_input_x
rf_sizes_y[inp_node.name] = rf_size_input_y
effective_strides_x[inp_node.name] = effective_stride_input_x
effective_strides_y[inp_node.name] = effective_stride_input_y
effective_paddings_x[inp_node.name] = effective_padding_input_x
effective_paddings_y[inp_node.name] = effective_padding_input_y
if not found_output_node:
raise ValueError("Output node was not found")
if input_node not in rf_sizes_x:
raise ValueError("Input node was not found")
return ReceptiveField(
(rf_sizes_x[input_node], rf_sizes_y[input_node]),
(effective_strides_x[input_node], effective_strides_y[input_node]),
(effective_paddings_x[input_node], effective_paddings_y[input_node]))
| apache-2.0 | -2,473,689,287,112,896,500 | 38.287395 | 80 | 0.660806 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KViewStateMaintainerBase.py | 1 | 1046 | # encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KViewStateMaintainerBase(__PyQt4_QtCore.QObject):
# no doc
def configGroup(self, *args, **kwargs): # real signature unknown
pass
def restoreState(self, *args, **kwargs): # real signature unknown
pass
def saveState(self, *args, **kwargs): # real signature unknown
pass
def selectionModel(self, *args, **kwargs): # real signature unknown
pass
def setSelectionModel(self, *args, **kwargs): # real signature unknown
pass
def setView(self, *args, **kwargs): # real signature unknown
pass
def view(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
| gpl-2.0 | 8,347,826,808,553,256,000 | 25.15 | 82 | 0.66826 | false |
okantekeli/aws-lambda | ImageFunctions/CreateImages.py | 1 | 2560 | import collections
import datetime
from datetime import timedelta
import boto3
def lambda_handler(event, context):
"""Main Handler for execute lambda function
Args:
event : Lambda Event
context : Lambda Event Context
"""
ec2 = boto3.client('ec2')
# This query searchs instances which has AutoBackup Tags.
# You can change/customize it according to your need
reservations = ec2.describe_instances(
Filters=[
{'Name': 'tag-key', 'Values': ['AutoBackup']},
]
).get(
'Reservations', []
)
instances = sum(
[
[i for i in r['Instances']]
for r in reservations
], [])
print "Found %d instances that need backing up" % len(instances)
for instance in instances:
for tag in instance['Tags']:
if tag['Key'] == 'Name':
create_snapshot(tag['Value'], instance['InstanceId'])
def create_snapshot(instancename, instanceid):
"""Pushes the given SNS Topic
Args:
instancename (string) : The backed up instances name
instanceid (string) : EC2 Instance ID
"""
ec2 = boto3.client('ec2')
create_time = datetime.datetime.now()
# This variable(days) defines image retention period.
# You can change it according to your need ex : days=3 , days=5 , days=1
valid_time = datetime.datetime.now() + timedelta(days=3)
snapshotname = instancename + '_' + create_time.strftime('%Y-%m-%d')
ami_data = ec2.create_image(
InstanceId=instanceid,
Name=snapshotname,
Description="Lambda created AMI of instance " + instanceid,
NoReboot=True,
DryRun=False
)
amiid = ami_data['ImageId']
ec2.create_tags(
Resources=[amiid],
DryRun=False,
Tags=[
{
'Key' : 'OriginalInstance',
'Value' : instancename
},
{
'Key' : 'ValidUntil',
'Value' : valid_time.strftime('%Y-%m-%d')
}]
)
#publish SNS topic.
sns_notifier(instancename, snapshotname)
def sns_notifier(instancename, snapshotname):
"""Push the messages the given SNS Topic
Args:
instancename (string) : The backed up instance name
snapshotname (string) : Snapshotname
"""
sns = boto3.client('sns')
sns.publish(
TargetArn='YOURSNSTOPICARN',
Message='Auto Backup Complated For : ' + instancename + " named : " + snapshotname,
MessageStructure='text'
)
| gpl-3.0 | 5,648,038,826,633,173,000 | 25.122449 | 92 | 0.585938 | false |
mikemhenry/arcade | examples/shapes.py | 1 | 2831 | """
This simple animation example shows how to use classes to animate
multple objects on the screen at the same time.
Because this is redraws the shapes from scratch each frame, this is slow
and inefficient, but we'll show how to make it faster in the chapter on
performance.
"""
import arcade
import random
# Set up the constants
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
RECT_WIDTH = 50
RECT_HEIGHT = 50
class Shape():
def __init__(self, x, y, width, height, angle, delta_x, delta_y,
delta_angle, color):
self.x = x
self.y = y
self.width = width
self.height = height
self.angle = angle
self.delta_x = delta_x
self.delta_y = delta_y
self.delta_angle = delta_angle
self.color = color
def move(self):
self.x += self.delta_x
self.y += self.delta_y
self.angle += self.delta_angle
class Ellipse(Shape):
def draw(self):
arcade.draw_ellipse_filled(self.x, self.y, self.width, self.height,
self.color, self.angle)
class Rectangle(Shape):
def draw(self):
arcade.draw_rectangle_filled(self.x, self.y, self.width, self.height,
self.color, self.angle)
class MyApplication(arcade.Window):
""" Main application class. """
def setup(self):
""" Set up the game and initialize the variables. """
self.shape_list = []
for i in range(100):
x = random.randrange(0, SCREEN_WIDTH)
y = random.randrange(0, SCREEN_HEIGHT)
width = random.randrange(10, 30)
height = random.randrange(10, 30)
angle = random.randrange(0, 360)
d_x = random.randrange(-3, 4)
d_y = random.randrange(-3, 4)
d_angle = random.randrange(-3, 4)
red = random.randrange(256)
green = random.randrange(256)
blue = random.randrange(256)
alpha = random.randrange(256)
shape_type = random.randrange(2)
if shape_type == 0:
shape = Rectangle(x, y, width, height, angle, d_x, d_y,
d_angle, (red, green, blue, alpha))
else:
shape = Ellipse(x, y, width, height, angle, d_x, d_y,
d_angle, (red, green, blue, alpha))
self.shape_list.append(shape)
def animate(self, dt):
""" Move everything """
for shape in self.shape_list:
shape.move()
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
for shape in self.shape_list:
shape.draw()
window = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT, title="Shapes!")
window.setup()
arcade.run()
| mit | 9,209,995,178,170,576,000 | 25.707547 | 77 | 0.553515 | false |
eclee25/flu-SDI-exploratory-age | scripts/OR_allweeks.py | 1 | 3108 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 9/2/13
###Function: draw OR by week for all weeks
###Import data:
###Command Line: python
##############################################
### notes ###
### packages/modules ###
import csv
import numpy as np
import matplotlib.pyplot as plt
import sys
## local modules ##
import ORgenerator as od
### data structures ###
# ilidict[(week, age marker)] = ILI
# wkdict[week] = seasonnum
ilidict, wkdict = {}, {} # unnecessary
# ORdict[week] = OR
# ARdict[week] = attack rate per 10000
ORdict, ARdict = {}, {}
### parameters ###
USchild = 20348657 + 20677194 + 22040343 #US child popn from 2010 Census
USadult = 21585999 + 21101849 + 19962099 + 20179642 + 20890964 + 22708591 + 22298125 + 19664805 #US adult popn from 2010 Census
seasons = range(1,11) #seasons for which ORs will be generated
### plotting settings ###
colorvec = ['grey', 'black', 'red', 'orange', 'gold', 'green', 'blue', 'cyan', 'darkviolet', 'hotpink']
labelvec = ['00-01', '01-02', '02-03', '03-04', '04-05', '05-06', '06-07', '07-08', '08-09', '09-10']
xlabels = range(40,54)
xlabels.extend(range(1,40))
### functions ###
### import data ###
datain=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks.csv','r')
data=csv.reader(datain, delimiter=',')
### program ###
# OR by week chart
# ilidict[(week, age marker)] = ILI
# wkdict[week] = seasonnum
# weeks = unique list of weeks for dataset
ilidict, wkdict, weeks = od.import_dwk(data, 0, 1, 2, 3)
ORdict, ARdict = od.ORgen_wk(ilidict, weeks)
for s in seasons:
# wkdummy will represent list of weeks for chart in season to use as key for OR dict
wkdummy = [key for key in sorted(weeks) if wkdict[key] == int(s)]
wkdummy = set(wkdummy)
if s == 1:
chartORs = [ORdict[wk] for wk in sorted(wkdummy)]
chartwks = xrange(13, 13 + len(sorted(wkdummy)))
print "season number and num weeks", s, len(wkdummy)
plt.plot(chartwks, chartORs, marker = 'o', color = colorvec[s-1], label = labelvec[s-1], linewidth = 2)
elif len(wkdummy) == 53:
# wkdummy needs to be sorted bc dict values don't have order
chartORs = [ORdict[wk] for wk in sorted(wkdummy)]
chartwks = xrange(len(sorted(wkdummy)))
print "season number and num weeks", s, len(wkdummy)
plt.plot(chartwks, chartORs, marker = 'o', color = colorvec[s-1], label = labelvec[s-1], linewidth = 2)
else:
chartORs = [ORdict[wk] for wk in sorted(wkdummy)]
avg53 = (chartORs[12] + chartORs[13])/2
chartORs.insert(13, avg53)
chartwks = xrange(len(sorted(wkdummy)) + 1)
print "season number and num weeks", s, len(wkdummy)
plt.plot(chartwks, chartORs, marker = 'o', color = colorvec[s-1], label = labelvec[s-1], linewidth = 2)
plt.plot([33, 33], [0, 10], color = 'k', linewidth = 1)
plt.xlim([0, 52])
plt.ylim([0, 10])
plt.xlabel('Week Number', fontsize=24) # 12/1/13 increase size
plt.ylabel('OR, child:adult', fontsize=24)
# plt.ylabel('OR, US pop normalized', fontsize=24)
plt.legend(loc = 'upper left')
plt.xticks(xrange(53), xlabels)
plt.show()
| mit | 8,016,307,729,006,739,000 | 31.715789 | 127 | 0.649936 | false |
constKutsy/GeoWiki | .compit/lib/python3.6/site-packages/tornado/test/httputil_test.py | 14 | 17520 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from tornado.httputil import url_concat, parse_multipart_form_data, HTTPHeaders, format_timestamp, HTTPServerRequest, parse_request_start_line, parse_cookie
from tornado.escape import utf8, native_str
from tornado.log import gen_log
from tornado.testing import ExpectLog
from tornado.test.util import unittest
import copy
import datetime
import logging
import pickle
import time
class TestUrlConcat(unittest.TestCase):
def test_url_concat_no_query_params(self):
url = url_concat(
"https://localhost/path",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?y=y&z=z")
def test_url_concat_encode_args(self):
url = url_concat(
"https://localhost/path",
[('y', '/y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?y=%2Fy&z=z")
def test_url_concat_trailing_q(self):
url = url_concat(
"https://localhost/path?",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?y=y&z=z")
def test_url_concat_q_with_no_trailing_amp(self):
url = url_concat(
"https://localhost/path?x",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?x=&y=y&z=z")
def test_url_concat_trailing_amp(self):
url = url_concat(
"https://localhost/path?x&",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?x=&y=y&z=z")
def test_url_concat_mult_params(self):
url = url_concat(
"https://localhost/path?a=1&b=2",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?a=1&b=2&y=y&z=z")
def test_url_concat_no_params(self):
url = url_concat(
"https://localhost/path?r=1&t=2",
[],
)
self.assertEqual(url, "https://localhost/path?r=1&t=2")
def test_url_concat_none_params(self):
url = url_concat(
"https://localhost/path?r=1&t=2",
None,
)
self.assertEqual(url, "https://localhost/path?r=1&t=2")
def test_url_concat_with_frag(self):
url = url_concat(
"https://localhost/path#tab",
[('y', 'y')],
)
self.assertEqual(url, "https://localhost/path?y=y#tab")
def test_url_concat_multi_same_params(self):
url = url_concat(
"https://localhost/path",
[('y', 'y1'), ('y', 'y2')],
)
self.assertEqual(url, "https://localhost/path?y=y1&y=y2")
def test_url_concat_multi_same_query_params(self):
url = url_concat(
"https://localhost/path?r=1&r=2",
[('y', 'y')],
)
self.assertEqual(url, "https://localhost/path?r=1&r=2&y=y")
def test_url_concat_dict_params(self):
url = url_concat(
"https://localhost/path",
dict(y='y'),
)
self.assertEqual(url, "https://localhost/path?y=y")
class MultipartFormDataTest(unittest.TestCase):
def test_file_upload(self):
data = b"""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--""".replace(b"\n", b"\r\n")
args = {}
files = {}
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
def test_unquoted_names(self):
# quotes are optional unless special characters are present
data = b"""\
--1234
Content-Disposition: form-data; name=files; filename=ab.txt
Foo
--1234--""".replace(b"\n", b"\r\n")
args = {}
files = {}
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
def test_special_filenames(self):
filenames = ['a;b.txt',
'a"b.txt',
'a";b.txt',
'a;"b.txt',
'a";";.txt',
'a\\"b.txt',
'a\\b.txt',
]
for filename in filenames:
logging.debug("trying filename %r", filename)
data = """\
--1234
Content-Disposition: form-data; name="files"; filename="%s"
Foo
--1234--""" % filename.replace('\\', '\\\\').replace('"', '\\"')
data = utf8(data.replace("\n", "\r\n"))
args = {}
files = {}
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], filename)
self.assertEqual(file["body"], b"Foo")
def test_boundary_starts_and_ends_with_quotes(self):
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(b"\n", b"\r\n")
args = {}
files = {}
parse_multipart_form_data(b'"1234"', data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
def test_missing_headers(self):
data = b'''\
--1234
Foo
--1234--'''.replace(b"\n", b"\r\n")
args = {}
files = {}
with ExpectLog(gen_log, "multipart/form-data missing headers"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_invalid_content_disposition(self):
data = b'''\
--1234
Content-Disposition: invalid; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(b"\n", b"\r\n")
args = {}
files = {}
with ExpectLog(gen_log, "Invalid multipart/form-data"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_line_does_not_end_with_correct_line_break(self):
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo--1234--'''.replace(b"\n", b"\r\n")
args = {}
files = {}
with ExpectLog(gen_log, "Invalid multipart/form-data"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_content_disposition_header_without_name_parameter(self):
data = b"""\
--1234
Content-Disposition: form-data; filename="ab.txt"
Foo
--1234--""".replace(b"\n", b"\r\n")
args = {}
files = {}
with ExpectLog(gen_log, "multipart/form-data value missing name"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_data_after_final_boundary(self):
# The spec requires that data after the final boundary be ignored.
# http://www.w3.org/Protocols/rfc1341/7_2_Multipart.html
# In practice, some libraries include an extra CRLF after the boundary.
data = b"""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--
""".replace(b"\n", b"\r\n")
args = {}
files = {}
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
class HTTPHeadersTest(unittest.TestCase):
def test_multi_line(self):
# Lines beginning with whitespace are appended to the previous line
# with any leading whitespace replaced by a single space.
# Note that while multi-line headers are a part of the HTTP spec,
# their use is strongly discouraged.
data = """\
Foo: bar
baz
Asdf: qwer
\tzxcv
Foo: even
more
lines
""".replace("\n", "\r\n")
headers = HTTPHeaders.parse(data)
self.assertEqual(headers["asdf"], "qwer zxcv")
self.assertEqual(headers.get_list("asdf"), ["qwer zxcv"])
self.assertEqual(headers["Foo"], "bar baz,even more lines")
self.assertEqual(headers.get_list("foo"), ["bar baz", "even more lines"])
self.assertEqual(sorted(list(headers.get_all())),
[("Asdf", "qwer zxcv"),
("Foo", "bar baz"),
("Foo", "even more lines")])
def test_unicode_newlines(self):
# Ensure that only \r\n is recognized as a header separator, and not
# the other newline-like unicode characters.
# Characters that are likely to be problematic can be found in
# http://unicode.org/standard/reports/tr13/tr13-5.html
# and cpython's unicodeobject.c (which defines the implementation
# of unicode_type.splitlines(), and uses a different list than TR13).
newlines = [
u'\u001b', # VERTICAL TAB
u'\u001c', # FILE SEPARATOR
u'\u001d', # GROUP SEPARATOR
u'\u001e', # RECORD SEPARATOR
u'\u0085', # NEXT LINE
u'\u2028', # LINE SEPARATOR
u'\u2029', # PARAGRAPH SEPARATOR
]
for newline in newlines:
# Try the utf8 and latin1 representations of each newline
for encoding in ['utf8', 'latin1']:
try:
try:
encoded = newline.encode(encoding)
except UnicodeEncodeError:
# Some chars cannot be represented in latin1
continue
data = b'Cookie: foo=' + encoded + b'bar'
# parse() wants a native_str, so decode through latin1
# in the same way the real parser does.
headers = HTTPHeaders.parse(
native_str(data.decode('latin1')))
expected = [('Cookie', 'foo=' +
native_str(encoded.decode('latin1')) + 'bar')]
self.assertEqual(
expected, list(headers.get_all()))
except Exception:
gen_log.warning("failed while trying %r in %s",
newline, encoding)
raise
def test_optional_cr(self):
# Both CRLF and LF should be accepted as separators. CR should not be
# part of the data when followed by LF, but it is a normal char
# otherwise (or should bare CR be an error?)
headers = HTTPHeaders.parse(
'CRLF: crlf\r\nLF: lf\nCR: cr\rMore: more\r\n')
self.assertEqual(sorted(headers.get_all()),
[('Cr', 'cr\rMore: more'),
('Crlf', 'crlf'),
('Lf', 'lf'),
])
def test_copy(self):
all_pairs = [('A', '1'), ('A', '2'), ('B', 'c')]
h1 = HTTPHeaders()
for k, v in all_pairs:
h1.add(k, v)
h2 = h1.copy()
h3 = copy.copy(h1)
h4 = copy.deepcopy(h1)
for headers in [h1, h2, h3, h4]:
# All the copies are identical, no matter how they were
# constructed.
self.assertEqual(list(sorted(headers.get_all())), all_pairs)
for headers in [h2, h3, h4]:
# Neither the dict or its member lists are reused.
self.assertIsNot(headers, h1)
self.assertIsNot(headers.get_list('A'), h1.get_list('A'))
def test_pickle_roundtrip(self):
headers = HTTPHeaders()
headers.add('Set-Cookie', 'a=b')
headers.add('Set-Cookie', 'c=d')
headers.add('Content-Type', 'text/html')
pickled = pickle.dumps(headers)
unpickled = pickle.loads(pickled)
self.assertEqual(sorted(headers.get_all()), sorted(unpickled.get_all()))
self.assertEqual(sorted(headers.items()), sorted(unpickled.items()))
def test_setdefault(self):
headers = HTTPHeaders()
headers['foo'] = 'bar'
# If a value is present, setdefault returns it without changes.
self.assertEqual(headers.setdefault('foo', 'baz'), 'bar')
self.assertEqual(headers['foo'], 'bar')
# If a value is not present, setdefault sets it for future use.
self.assertEqual(headers.setdefault('quux', 'xyzzy'), 'xyzzy')
self.assertEqual(headers['quux'], 'xyzzy')
self.assertEqual(sorted(headers.get_all()), [('Foo', 'bar'), ('Quux', 'xyzzy')])
def test_string(self):
headers = HTTPHeaders()
headers.add("Foo", "1")
headers.add("Foo", "2")
headers.add("Foo", "3")
headers2 = HTTPHeaders.parse(str(headers))
self.assertEquals(headers, headers2)
class FormatTimestampTest(unittest.TestCase):
# Make sure that all the input types are supported.
TIMESTAMP = 1359312200.503611
EXPECTED = 'Sun, 27 Jan 2013 18:43:20 GMT'
def check(self, value):
self.assertEqual(format_timestamp(value), self.EXPECTED)
def test_unix_time_float(self):
self.check(self.TIMESTAMP)
def test_unix_time_int(self):
self.check(int(self.TIMESTAMP))
def test_struct_time(self):
self.check(time.gmtime(self.TIMESTAMP))
def test_time_tuple(self):
tup = tuple(time.gmtime(self.TIMESTAMP))
self.assertEqual(9, len(tup))
self.check(tup)
def test_datetime(self):
self.check(datetime.datetime.utcfromtimestamp(self.TIMESTAMP))
# HTTPServerRequest is mainly tested incidentally to the server itself,
# but this tests the parts of the class that can be tested in isolation.
class HTTPServerRequestTest(unittest.TestCase):
def test_default_constructor(self):
# All parameters are formally optional, but uri is required
# (and has been for some time). This test ensures that no
# more required parameters slip in.
HTTPServerRequest(uri='/')
def test_body_is_a_byte_string(self):
requets = HTTPServerRequest(uri='/')
self.assertIsInstance(requets.body, bytes)
class ParseRequestStartLineTest(unittest.TestCase):
METHOD = "GET"
PATH = "/foo"
VERSION = "HTTP/1.1"
def test_parse_request_start_line(self):
start_line = " ".join([self.METHOD, self.PATH, self.VERSION])
parsed_start_line = parse_request_start_line(start_line)
self.assertEqual(parsed_start_line.method, self.METHOD)
self.assertEqual(parsed_start_line.path, self.PATH)
self.assertEqual(parsed_start_line.version, self.VERSION)
class ParseCookieTest(unittest.TestCase):
# These tests copied from Django:
# https://github.com/django/django/pull/6277/commits/da810901ada1cae9fc1f018f879f11a7fb467b28
def test_python_cookies(self):
"""
Test cases copied from Python's Lib/test/test_http_cookies.py
"""
self.assertEqual(parse_cookie('chips=ahoy; vienna=finger'), {'chips': 'ahoy', 'vienna': 'finger'})
# Here parse_cookie() differs from Python's cookie parsing in that it
# treats all semicolons as delimiters, even within quotes.
self.assertEqual(
parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'),
{'keebler': '"E=mc2', 'L': '\\"Loves\\"', 'fudge': '\\012', '': '"'}
)
# Illegal cookies that have an '=' char in an unquoted value.
self.assertEqual(parse_cookie('keebler=E=mc2'), {'keebler': 'E=mc2'})
# Cookies with ':' character in their name.
self.assertEqual(parse_cookie('key:term=value:term'), {'key:term': 'value:term'})
# Cookies with '[' and ']'.
self.assertEqual(parse_cookie('a=b; c=[; d=r; f=h'), {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'})
def test_cookie_edgecases(self):
# Cookies that RFC6265 allows.
self.assertEqual(parse_cookie('a=b; Domain=example.com'), {'a': 'b', 'Domain': 'example.com'})
# parse_cookie() has historically kept only the last cookie with the
# same name.
self.assertEqual(parse_cookie('a=b; h=i; a=c'), {'a': 'c', 'h': 'i'})
def test_invalid_cookies(self):
"""
Cookie strings that go against RFC6265 but browsers will send if set
via document.cookie.
"""
# Chunks without an equals sign appear as unnamed values per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
self.assertIn('django_language', parse_cookie('abc=def; unnamed; django_language=en').keys())
# Even a double quote may be an unamed value.
self.assertEqual(parse_cookie('a=b; "; c=d'), {'a': 'b', '': '"', 'c': 'd'})
# Spaces in names and values, and an equals sign in values.
self.assertEqual(parse_cookie('a b c=d e = f; gh=i'), {'a b c': 'd e = f', 'gh': 'i'})
# More characters the spec forbids.
self.assertEqual(parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'), {'a b,c<>@:/[]?{}': 'd " =e,f g'})
# Unicode characters. The spec only allows ASCII.
self.assertEqual(parse_cookie('saint=André Bessette'), {'saint': native_str('André Bessette')})
# Browsers don't send extra whitespace or semicolons in Cookie headers,
# but parse_cookie() should parse whitespace the same way
# document.cookie parses whitespace.
self.assertEqual(parse_cookie(' = b ; ; = ; c = ; '), {'': 'b', 'c': ''})
| unlicense | -3,576,038,395,327,762,000 | 36.592275 | 156 | 0.565304 | false |
w1ndy/qtile | libqtile/layout/verticaltile.py | 6 | 10393 | # Copyright (c) 2014, Florian Scherf <fscherf@gmx.net>. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .base import Layout
class VerticalTile(Layout):
"""
VerticalTile implements a tiling layout that works nice on vertically
mounted monitors.
The available height gets divided by the number of panes, if no pane
is maximized. If one pane has been maximized, the available height gets
split in master- and secondary area. The maximized pane (master pane)
gets the full height of the master area and the other panes
(secondary panes) share the remaining space.
The master area (at default 75%) can grow and shrink via keybindings.
::
----------------- ----------------- ---
| | | | |
| 1 | <-- Panes | | |
| | | | | |
|---------------| | | | |
| | | | | |
| 2 | <-----+ | 1 | | Master Area
| | | | | |
|---------------| | | | |
| | | | | |
| 3 | <-----+ | | |
| | | | | |
|---------------| | |---------------| ---
| | | | 2 | |
| 4 | <-----+ |---------------| | Secondary Area
| | | 3 | |
----------------- ----------------- ---
Normal behavior. No One maximized pane in the master area
maximized pane. No and two secondary panes in the
specific areas. secondary area.
::
----------------------------------- In some cases VerticalTile can be
| | useful on horizontal mounted
| 1 | monitors two.
| | For example if you want to have a
|---------------------------------| webbrowser and a shell below it.
| |
| 2 |
| |
-----------------------------------
Suggested keybindings:
::
Key([modkey], 'j', lazy.layout.down()),
Key([modkey], 'k', lazy.layout.up()),
Key([modkey], 'Tab', lazy.layout.next()),
Key([modkey, 'shift'], 'Tab', lazy.layout.next()),
Key([modkey, 'shift'], 'j', lazy.layout.shuffle_down()),
Key([modkey, 'shift'], 'k', lazy.layout.shuffle_up()),
Key([modkey], 'm', lazy.layout.maximize()),
Key([modkey], 'n', lazy.layout.normalize()),
"""
defaults = [
('border_focus', '#FF0000', 'Border color for the focused window.'),
('border_normal', '#FFFFFF', 'Border color for un-focused winows.'),
('border_width', 1, 'Border width.'),
('margin', 0, 'Border margin.'),
('name', 'VerticalTile', 'Name of this layout.'),
]
windows = []
focused = None
maximized = None
ratio = 0.75
steps = 0.05
def __init__(self, **config):
Layout.__init__(self, **config)
self.add_defaults(self.defaults)
def add(self, window):
if self.windows and self.focused:
index = self.windows.index(self.focused)
self.windows.insert(index + 1, window)
else:
self.windows.append(window)
self.focus(window)
def remove(self, window):
if window not in self.windows:
return
index = self.windows.index(window)
self.windows.remove(window)
if not self.windows:
self.focused = None
self.maximized = None
return
if self.maximized is window:
self.maximized = None
if index == len(self.windows):
index -= 1
self.focus(self.windows[index])
return self.focused
def clone(self, group):
c = Layout.clone(self, group)
c.windows = []
c.focused = None
return c
def configure(self, window, screen):
if self.windows and window in self.windows:
n = len(self.windows)
index = self.windows.index(window)
# border
if n > 1:
border_width = self.border_width
else:
border_width = 0
if window is self.focused:
border_color = self.group.qtile.colorPixel(self.border_focus)
else:
border_color = self.group.qtile.colorPixel(self.border_normal)
# width
if n > 1:
width = screen.width - self.border_width * 2
else:
width = screen.width
# height
if n > 1:
main_area_height = int(screen.height * self.ratio)
sec_area_height = screen.height - main_area_height
main_pane_height = main_area_height - border_width * 2
sec_pane_height = sec_area_height / (n - 1) - border_width * 2
normal_pane_height = (screen.height / n) - (border_width * 2)
if self.maximized:
if window is self.maximized:
height = main_pane_height
else:
height = sec_pane_height
else:
height = normal_pane_height
else:
height = screen.height
# y
y = screen.y
if n > 1:
if self.maximized:
y += (index * sec_pane_height) + (border_width * 2 * index)
else:
y += (index * normal_pane_height) +\
(border_width * 2 * index)
if self.maximized and window is not self.maximized:
if index > self.windows.index(self.maximized):
y = y - sec_pane_height + main_pane_height
window.place(screen.x, y, width, height, border_width,
border_color, margin=self.margin)
window.unhide()
else:
window.hide()
def blur(self):
self.focused = None
def focus(self, window):
self.focused = window
def focus_first(self):
try:
self.focus(self.windows[0])
except IndexError:
self.blur()
def focus_last(self):
try:
self.focus(self.windows[-1])
except IndexError:
self.blur()
def focus_next(self):
try:
index = self.windows.index(self.focused)
self.focus(self.windows[index + 1])
except IndexError:
self.focus_first()
def focus_previous(self):
try:
index = self.windows.index(self.focused)
self.focus(self.windows[index - 1])
except IndexError:
self.focus_last()
def grow(self):
if self.ratio + self.steps < 1:
self.ratio += self.steps
self.group.layoutAll()
def shrink(self):
if self.ratio - self.steps > 0:
self.ratio -= self.steps
self.group.layoutAll()
def cmd_next(self):
self.focus_next()
self.group.focus(self.focused, False)
def cmd_previous(self):
self.focus_previous()
self.group.focus(self.focused, False)
def cmd_down(self):
self.focus_next()
self.group.focus(self.focused, False)
def cmd_up(self):
self.focus_previous()
self.group.focus(self.focused, False)
def cmd_shuffle_up(self):
index = self.windows.index(self.focused)
try:
self.windows[index], self.windows[index - 1] =\
self.windows[index - 1], self.windows[index]
except IndexError:
self.windows[index], self.windows[-1] =\
self.windows[-1], self.windows[index]
self.group.layoutAll()
def cmd_shuffle_down(self):
index = self.windows.index(self.focused)
try:
self.windows[index], self.windows[index + 1] =\
self.windows[index + 1], self.windows[index]
except IndexError:
self.windows[index], self.windows[0] =\
self.windows[0], self.windows[index]
self.group.layoutAll()
def cmd_maximize(self):
if self.windows:
self.maximized = self.focused
self.group.layoutAll()
def cmd_normalize(self):
self.maximized = None
self.group.layoutAll()
def cmd_grow(self):
if not self.maximized:
return
if self.focused is self.maximized:
self.grow()
else:
self.shrink()
def cmd_shrink(self):
if not self.maximized:
return
if self.focused is self.maximized:
self.shrink()
else:
self.grow()
| mit | -3,183,962,367,164,014,000 | 32.743506 | 79 | 0.488983 | false |
phsmit/iwclul2016-scripts | 01_dataprep/trn_to_phn.py | 1 | 1177 | #!/usr/bin/env python3
import os
import sys
def main(langdat_dir, trn_file, phn_dir):
phone_map = {v[0]: v[1].strip() for v in (l.split(None, 1) for l in open('{}/phones'.format(langdat_dir), encoding='utf-8'))}
for line in open(trn_file):
parts = line.split()
sentence = parts[:-1]
sid = parts[-1][1:-1]
phn = open(os.path.join(phn_dir,sid+".phn"), "w", encoding="iso8859-15")
print("0 0 __", file=phn)
phones = '_'
for word in sentence:
for c in word:
phones += phone_map[c]
phones += '_'
for j in range(1, len(phones)-1):
if phones[j] == '_':
print("0 0 _", file=phn)
continue
lci = j -1
while lci > 0 and phones[lci] == '_':
lci -= 1
rci = j +1
while rci < len(phones) - 1 and phones[rci] == '_':
rci += 1
print("0 0 {}-{}+{}".format(phones[lci], phones[j], phones[rci]), file=phn)
print("0 0 __", file=phn)
phn.close()
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], sys.argv[3]) | bsd-3-clause | -4,783,353,102,058,689,000 | 24.608696 | 129 | 0.460493 | false |
PetrDlouhy/django | tests/queries/models.py | 36 | 16195 | """
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
@python_2_unicode_compatible
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
category = models.ForeignKey(NamedCategory, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, 'type checking should happen without calling model __iter__'
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, null=True)
objectb = models.ForeignKey(ObjectB, null=True)
childobjecta = models.ForeignKey(ChildObjectA, null=True, related_name='ca_pk')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, null=True)
d = models.ForeignKey(ModelD)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, to_field='name')
responsibility = models.ForeignKey('Responsibility', to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, null=True)
b = models.ForeignKey(FK2, null=True)
c = models.ForeignKey(FK3, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter')
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph')
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, related_name='owner')
creator = models.ForeignKey(BaseUser, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent)
class Person(models.Model):
name = models.CharField(max_length=128)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company)
employee = models.ForeignKey(Person)
title = models.CharField(max_length=128)
# Bug #22429
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School)
class Classroom(models.Model):
school = models.ForeignKey(School)
students = models.ManyToManyField(Student, related_name='classroom')
class Ticket23605A(models.Model):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A)
modelc_fk = models.ForeignKey("Ticket23605C")
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
| bsd-3-clause | -2,722,055,568,291,930,600 | 21.65035 | 94 | 0.682495 | false |
Carreau/readthedocs.org | readthedocs/core/utils.py | 8 | 3694 | import getpass
import logging
import os
from urlparse import urlparse
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
from builds.models import Build
log = logging.getLogger(__name__)
SYNC_USER = getattr(settings, 'SYNC_USER', getpass.getuser())
def run_on_app_servers(command):
"""
A helper to copy a single file across app servers
"""
log.info("Running %s on app servers" % command)
ret_val = 0
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
for server in settings.MULTIPLE_APP_SERVERS:
ret = os.system("ssh %s@%s %s" % (SYNC_USER, server, command))
if ret != 0:
ret_val = ret
return ret_val
else:
ret = os.system(command)
return ret
def make_latest(project):
"""
Useful for correcting versions with no latest, using the database.
>>> no_latest = Project.objects.exclude(versions__slug__in=['latest'])
>>> for project in no_latest:
>>> make_latest(project)
"""
branch = project.default_branch or project.vcs_repo().fallback_branch
version_data, created = Version.objects.get_or_create(
project=project,
slug='latest',
type='branch',
active=True,
verbose_name='latest',
identifier=branch,
)
def clean_url(url):
parsed = urlparse(url)
if parsed.scheme:
scheme, netloc = parsed.scheme, parsed.netloc
elif parsed.netloc:
scheme, netloc = "http", parsed.netloc
else:
scheme, netloc = "http", parsed.path
return netloc
def cname_to_slug(host):
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode()
slug = domain.split('.')[0]
return slug
def trigger_build(project, version=None, record=True, force=False, basic=False):
"""
An API to wrap the triggering of a build.
"""
# Avoid circular import
from projects.tasks import update_docs
if project.skip:
return None
if not version:
version = project.versions.get(slug='latest')
if record:
build = Build.objects.create(
project=project,
version=version,
type='html',
state='triggered',
success=True,
)
update_docs.delay(pk=project.pk, version_pk=version.pk, record=record, force=force, basic=basic, build_pk=build.pk)
else:
build = None
update_docs.delay(pk=project.pk, version_pk=version.pk, record=record, force=force, basic=basic)
return build
def send_email(recipient, subject, template, template_html, context=None,
request=None):
'''
Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
request
Request object for determining absolute URL
'''
if request:
scheme = 'https' if request.is_secure() else 'http'
context['uri'] = '{scheme}://{host}'.format(scheme=scheme,
host=request.get_host())
ctx = Context(context)
msg = EmailMultiAlternatives(
subject,
get_template(template).render(ctx),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(ctx), 'text/html')
msg.send()
| mit | 6,760,016,510,612,581,000 | 25.768116 | 123 | 0.622902 | false |
lancezlin/pyjs | pyjswidgets/pyjamas/ui/FlexCellFormatter.py | 8 | 1365 | # Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas.ui.CellFormatter import CellFormatter
class FlexCellFormatter(CellFormatter):
def __init__(self, outer, **kwargs):
CellFormatter.__init__(self, outer, **kwargs)
def getColSpan(self, row, column):
return DOM.getIntAttribute(self.getElement(row, column), "colSpan")
def getRowSpan(self, row, column):
return DOM.getIntAttribute(self.getElement(row, column), "rowSpan")
def setColSpan(self, row, column, colSpan):
DOM.setIntAttribute(self.ensureElement(row, column), "colSpan", colSpan)
def setRowSpan(self, row, column, rowSpan):
DOM.setIntAttribute(self.ensureElement(row, column), "rowSpan", rowSpan)
| apache-2.0 | -631,005,595,587,995,000 | 38 | 80 | 0.733333 | false |
junghans/espressopp | contrib/mpi4py/mpi4py-1.3/test/test_doc.py | 3 | 1559 | import types
from mpi4py import MPI
import mpiunittest as unittest
ModuleType = type(MPI)
ClassType = type(MPI.Comm)
FunctionType = type(MPI.Init)
MethodDescrType = type(MPI.Comm.Get_rank)
GetSetDescrType = type(MPI.Comm.rank)
def getdocstr(mc, docstrings, namespace=None):
name = getattr(mc, '__name__', None)
if name is None: return
if name in ('__builtin__', 'builtins'): return
if name.startswith('_'): return
if namespace: name = '%s.%s' % (namespace, name)
if type(mc) in (ModuleType, ClassType):
doc = getattr(mc, '__doc__', None)
docstrings[name] = doc
for k, v in vars(mc).items():
getdocstr(v, docstrings, name)
elif type(mc) in (FunctionType, MethodDescrType, GetSetDescrType):
doc = getattr(mc, '__doc__', None)
docstrings[name] = doc
class TestDoc(unittest.TestCase):
def testDoc(self):
missing = False
docs = { }
getdocstr(MPI, docs)
for k in docs:
if not k.startswith('_'):
doc = docs[k]
if doc is None:
print ("'%s': missing docstring" % k)
missing = True
else:
doc = doc.strip()
if not doc:
print ("'%s': empty docstring" % k)
missing = True
if 'mpi4py.MPI' in doc:
print ("'%s': bad format docstring" % k)
self.assertFalse(missing)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -7,050,226,136,262,280,000 | 30.816327 | 70 | 0.530468 | false |
pydata/xarray | doc/gallery/plot_cartopy_facetgrid.py | 4 | 1285 | """
==================================
Multiple plots and map projections
==================================
Control the map projection parameters on multiple axes
This example illustrates how to plot multiple maps and control their extent
and aspect ratio.
For more details see `this discussion`_ on github.
.. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
# Load the data
ds = xr.tutorial.load_dataset("air_temperature")
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(
transform=ccrs.PlateCarree(), # the data's projection
col="time",
col_wrap=1, # multiplot settings
aspect=ds.dims["lon"] / ds.dims["lat"], # for a sensible figsize
subplot_kws={"projection": map_proj}, # the plot's projection
)
# We have to set the map's options on all four axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
# Without this aspect attributes the maps will look chaotic and the
# "extent" attribute above will be ignored
ax.set_aspect("equal")
plt.show()
| apache-2.0 | 1,738,798,561,084,066,600 | 27.555556 | 88 | 0.676265 | false |
leezu/mxnet | example/extensions/lib_custom_op/test_gemm.py | 6 | 2831 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=arguments-differ
# This test checks dynamic loading of custom library into MXNet
# and checks end to end compute of a simple 2D gemm custom op
import mxnet as mx
import os
#load library
if (os.name=='posix'):
path = os.path.abspath('libgemm_lib.so')
mx.library.load(path)
elif (os.name=='nt'):
path = os.path.abspath('libgemm_lib.dll')
mx.library.load(path)
a = mx.nd.array([[1,2,3],[4,5,6]])
b = mx.nd.array([[7],[8],[9]])
print("--------start ndarray compute---------")
print(mx.nd.my_gemm(a,b))
print("--------")
print(mx.nd.state_gemm(a,b,test_kw=100))
print("--------start symbolic compute--------")
s = mx.sym.Variable('s')
t = mx.sym.Variable('t')
c = mx.sym.my_gemm(s,t)
d = mx.sym.state_gemm(s,t,test_kw=200)
e = mx.sym.linalg.gemm2(s,t)
out_grad = mx.nd.ones((2,1))
# stateless
block = mx.gluon.nn.SymbolBlock(c,[s,t])
with mx.autograd.record():
a_ = mx.nd.array([[1,2,3],[4,5,6]])
b_ = mx.nd.array([[7],[8],[9]])
a_.attach_grad()
b_.attach_grad()
# foward
out = block(a_,b_)
print(out)
print('+++++')
# backward
out.backward(out_grad)
print(a_.grad)
print(b_.grad)
print("-------")
# stateful
block2 = mx.gluon.nn.SymbolBlock(d,[s,t])
block2.hybridize(static_alloc=True, static_shape=True)
out2 = block2(a,b)
out2 = block2(a,b)
print(out2)
with mx.autograd.record():
a_ = mx.nd.array([[1,2,3],[4,5,6]])
b_ = mx.nd.array([[7],[8],[9]])
a_.attach_grad()
b_.attach_grad()
# forward
out2 = block2(a_,b_)
print('+++++')
# backward
out2.backward(out_grad)
print(a_.grad)
print(b_.grad)
print("-------")
# baseline
block3 = mx.gluon.nn.SymbolBlock(e,[s,t])
with mx.autograd.record():
a_ = mx.nd.array([[1,2,3],[4,5,6]])
b_ = mx.nd.array([[7],[8],[9]])
a_.attach_grad()
b_.attach_grad()
# forward
out3 = block3(a_,b_)
print(out3)
print('+++++')
# backward
out3.backward(out_grad)
print(a_.grad)
print(b_.grad)
| apache-2.0 | -63,400,042,165,830,720 | 25.961905 | 63 | 0.630519 | false |
Shedino/SherpaHighLevel | catkin_ws/src/px-ros-pkg/mavlink/share/pyshared/pymavlink/examples/rotmat.py | 29 | 8769 | #!/usr/bin/env python
#
# vector3 and rotation matrix classes
# This follows the conventions in the ArduPilot code,
# and is essentially a python version of the AP_Math library
#
# Andrew Tridgell, March 2012
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''rotation matrix class
'''
from math import sin, cos, sqrt, asin, atan2, pi, radians, acos
class Vector3:
'''a vector'''
def __init__(self, x=None, y=None, z=None):
if x != None and y != None and z != None:
self.x = float(x)
self.y = float(y)
self.z = float(z)
elif x != None and len(x) == 3:
self.x = float(x[0])
self.y = float(x[1])
self.z = float(x[2])
elif x != None:
raise ValueError('bad initialiser')
else:
self.x = float(0)
self.y = float(0)
self.z = float(0)
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f)' % (self.x,
self.y,
self.z)
def __add__(self, v):
return Vector3(self.x + v.x,
self.y + v.y,
self.z + v.z)
__radd__ = __add__
def __sub__(self, v):
return Vector3(self.x - v.x,
self.y - v.y,
self.z - v.z)
def __neg__(self):
return Vector3(-self.x, -self.y, -self.z)
def __rsub__(self, v):
return Vector3(v.x - self.x,
v.y - self.y,
v.z - self.z)
def __mul__(self, v):
if isinstance(v, Vector3):
'''dot product'''
return self.x*v.x + self.y*v.y + self.z*v.z
return Vector3(self.x * v,
self.y * v,
self.z * v)
__rmul__ = __mul__
def __div__(self, v):
return Vector3(self.x / v,
self.y / v,
self.z / v)
def __mod__(self, v):
'''cross product'''
return Vector3(self.y*v.z - self.z*v.y,
self.z*v.x - self.x*v.z,
self.x*v.y - self.y*v.x)
def __copy__(self):
return Vector3(self.x, self.y, self.z)
copy = __copy__
def length(self):
return sqrt(self.x**2 + self.y**2 + self.z**2)
def zero(self):
self.x = self.y = self.z = 0
def angle(self, v):
'''return the angle between this vector and another vector'''
return acos(self * v) / (self.length() * v.length())
def normalized(self):
return self / self.length()
def normalize(self):
v = self.normalized()
self.x = v.x
self.y = v.y
self.z = v.z
class Matrix3:
'''a 3x3 matrix, intended as a rotation matrix'''
def __init__(self, a=None, b=None, c=None):
if a is not None and b is not None and c is not None:
self.a = a.copy()
self.b = b.copy()
self.c = c.copy()
else:
self.identity()
def __repr__(self):
return 'Matrix3((%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f))' % (
self.a.x, self.a.y, self.a.z,
self.b.x, self.b.y, self.b.z,
self.c.x, self.c.y, self.c.z)
def identity(self):
self.a = Vector3(1,0,0)
self.b = Vector3(0,1,0)
self.c = Vector3(0,0,1)
def transposed(self):
return Matrix3(Vector3(self.a.x, self.b.x, self.c.x),
Vector3(self.a.y, self.b.y, self.c.y),
Vector3(self.a.z, self.b.z, self.c.z))
def from_euler(self, roll, pitch, yaw):
'''fill the matrix from Euler angles in radians'''
cp = cos(pitch)
sp = sin(pitch)
sr = sin(roll)
cr = cos(roll)
sy = sin(yaw)
cy = cos(yaw)
self.a.x = cp * cy
self.a.y = (sr * sp * cy) - (cr * sy)
self.a.z = (cr * sp * cy) + (sr * sy)
self.b.x = cp * sy
self.b.y = (sr * sp * sy) + (cr * cy)
self.b.z = (cr * sp * sy) - (sr * cy)
self.c.x = -sp
self.c.y = sr * cp
self.c.z = cr * cp
def to_euler(self):
'''find Euler angles for the matrix'''
if self.c.x >= 1.0:
pitch = pi
elif self.c.x <= -1.0:
pitch = -pi
else:
pitch = -asin(self.c.x)
roll = atan2(self.c.y, self.c.z)
yaw = atan2(self.b.x, self.a.x)
return (roll, pitch, yaw)
def __add__(self, m):
return Matrix3(self.a + m.a, self.b + m.b, self.c + m.c)
__radd__ = __add__
def __sub__(self, m):
return Matrix3(self.a - m.a, self.b - m.b, self.c - m.c)
def __rsub__(self, m):
return Matrix3(m.a - self.a, m.b - self.b, m.c - self.c)
def __mul__(self, other):
if isinstance(other, Vector3):
v = other
return Vector3(self.a.x * v.x + self.a.y * v.y + self.a.z * v.z,
self.b.x * v.x + self.b.y * v.y + self.b.z * v.z,
self.c.x * v.x + self.c.y * v.y + self.c.z * v.z)
elif isinstance(other, Matrix3):
m = other
return Matrix3(Vector3(self.a.x * m.a.x + self.a.y * m.b.x + self.a.z * m.c.x,
self.a.x * m.a.y + self.a.y * m.b.y + self.a.z * m.c.y,
self.a.x * m.a.z + self.a.y * m.b.z + self.a.z * m.c.z),
Vector3(self.b.x * m.a.x + self.b.y * m.b.x + self.b.z * m.c.x,
self.b.x * m.a.y + self.b.y * m.b.y + self.b.z * m.c.y,
self.b.x * m.a.z + self.b.y * m.b.z + self.b.z * m.c.z),
Vector3(self.c.x * m.a.x + self.c.y * m.b.x + self.c.z * m.c.x,
self.c.x * m.a.y + self.c.y * m.b.y + self.c.z * m.c.y,
self.c.x * m.a.z + self.c.y * m.b.z + self.c.z * m.c.z))
v = other
return Matrix3(self.a * v, self.b * v, self.c * v)
def __div__(self, v):
return Matrix3(self.a / v, self.b / v, self.c / v)
def __neg__(self):
return Matrix3(-self.a, -self.b, -self.c)
def __copy__(self):
return Matrix3(self.a, self.b, self.c)
copy = __copy__
def rotate(self, g):
'''rotate the matrix by a given amount on 3 axes'''
temp_matrix = Matrix3()
a = self.a
b = self.b
c = self.c
temp_matrix.a.x = a.y * g.z - a.z * g.y
temp_matrix.a.y = a.z * g.x - a.x * g.z
temp_matrix.a.z = a.x * g.y - a.y * g.x
temp_matrix.b.x = b.y * g.z - b.z * g.y
temp_matrix.b.y = b.z * g.x - b.x * g.z
temp_matrix.b.z = b.x * g.y - b.y * g.x
temp_matrix.c.x = c.y * g.z - c.z * g.y
temp_matrix.c.y = c.z * g.x - c.x * g.z
temp_matrix.c.z = c.x * g.y - c.y * g.x
self.a += temp_matrix.a
self.b += temp_matrix.b
self.c += temp_matrix.c
def normalize(self):
'''re-normalise a rotation matrix'''
error = self.a * self.b
t0 = self.a - (self.b * (0.5 * error))
t1 = self.b - (self.a * (0.5 * error))
t2 = t0 % t1
self.a = t0 * (1.0 / t0.length())
self.b = t1 * (1.0 / t1.length())
self.c = t2 * (1.0 / t2.length())
def trace(self):
'''the trace of the matrix'''
return self.a.x + self.b.y + self.c.z
def test_euler():
'''check that from_euler() and to_euler() are consistent'''
m = Matrix3()
from math import radians, degrees
for r in range(-179, 179, 3):
for p in range(-89, 89, 3):
for y in range(-179, 179, 3):
m.from_euler(radians(r), radians(p), radians(y))
(r2, p2, y2) = m.to_euler()
v1 = Vector3(r,p,y)
v2 = Vector3(degrees(r2),degrees(p2),degrees(y2))
diff = v1 - v2
if diff.length() > 1.0e-12:
print('EULER ERROR:', v1, v2, diff.length())
if __name__ == "__main__":
import doctest
doctest.testmod()
test_euler()
| bsd-3-clause | 2,206,745,543,868,549,400 | 31.598513 | 91 | 0.475653 | false |
openstack/tooz | tooz/drivers/redis.py | 1 | 30171 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils import version
import functools
import logging
import string
import threading
from oslo_utils import encodeutils
from oslo_utils import strutils
import redis
from redis import exceptions
from redis import sentinel
import tooz
from tooz import coordination
from tooz import locking
from tooz import utils
LOG = logging.getLogger(__name__)
def _handle_failures(func=None, n_tries=15):
"""Translates common redis exceptions into tooz exceptions.
This also enables retrying on certain exceptions.
:param func: the function to act on
:param n_tries: the number of retries
"""
if func is None:
return functools.partial(
_handle_failures,
n_tries=n_tries
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
ntries = n_tries
while ntries > 1:
try:
return func(*args, **kwargs)
except exceptions.ConnectionError as e:
# retry ntries times and then raise a connection error
ntries -= 1
if ntries >= 1:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)
except (exceptions.TimeoutError) as e:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.RedisError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
return func(*args, **kwargs)
return wrapper
class RedisLock(locking.Lock):
def __init__(self, coord, client, name, timeout):
name = "%s_%s_lock" % (coord.namespace, str(name))
super(RedisLock, self).__init__(name)
# NOTE(jd) Make sure we don't release and heartbeat at the same time by
# using a exclusive access lock (LP#1557593)
self._exclusive_access = threading.Lock()
self._lock = client.lock(name,
timeout=timeout,
thread_local=False)
self._coord = coord
self._client = client
@_handle_failures
def is_still_owner(self):
lock_tok = self._lock.local.token
if not lock_tok:
return False
owner_tok = self._client.get(self.name)
return owner_tok == lock_tok
@_handle_failures
def break_(self):
return bool(self._client.delete(self.name))
@_handle_failures
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
blocking, timeout = utils.convert_blocking(blocking)
acquired = self._lock.acquire(
blocking=blocking, blocking_timeout=timeout)
if acquired:
with self._exclusive_access:
self._coord._acquired_locks.add(self)
return acquired
@_handle_failures
def release(self):
with self._exclusive_access:
try:
self._lock.release()
except exceptions.LockError as e:
LOG.error("Unable to release lock '%r': %s", self, e)
return False
finally:
self._coord._acquired_locks.discard(self)
return True
@_handle_failures
def heartbeat(self):
with self._exclusive_access:
if self.acquired:
self._lock.reacquire()
return True
return False
@property
def acquired(self):
return self in self._coord._acquired_locks
class RedisDriver(coordination.CoordinationDriverCachedRunWatchers,
coordination.CoordinationDriverWithExecutor):
"""Redis provides a few nice benefits that act as a poormans zookeeper.
It **is** fully functional and implements all of the coordination
driver API(s). It stores data into `redis`_ using the provided `redis`_
API(s) using `msgpack`_ encoded values as needed.
- Durability (when setup with `AOF`_ mode).
- Consistent, note that this is still restricted to only
one redis server, without the recently released redis (alpha)
clustering > 1 server will not be consistent when partitions
or failures occur (even redis clustering docs state it is
not a fully AP or CP solution, which means even with it there
will still be *potential* inconsistencies).
- Master/slave failover (when setup with redis `sentinel`_), giving
some notion of HA (values *can* be lost when a failover transition
occurs).
The Redis driver connection URI should look like::
redis://[:PASSWORD@]HOST:PORT[?OPTION=VALUE[&OPTION2=VALUE2[&...]]]
For a list of options recognized by this driver, see the documentation
for the member CLIENT_ARGS, and to determine the expected types of those
options see CLIENT_BOOL_ARGS, CLIENT_INT_ARGS, and CLIENT_LIST_ARGS.
To use a `sentinel`_ the connection URI must point to the sentinel server.
At connection time the sentinel will be asked for the current IP and port
of the master and then connect there. The connection URI for sentinel
should be written as follows::
redis://<sentinel host>:<sentinel port>?sentinel=<master name>
Additional sentinel hosts are listed with multiple ``sentinel_fallback``
parameters as follows::
redis://<sentinel host>:<sentinel port>?sentinel=<master name>&
sentinel_fallback=<other sentinel host>:<sentinel port>&
sentinel_fallback=<other sentinel host>:<sentinel port>&
sentinel_fallback=<other sentinel host>:<sentinel port>
Further resources/links:
- http://redis.io/
- http://redis.io/topics/sentinel
- http://redis.io/topics/cluster-spec
Note that this client will itself retry on transaction failure (when they
keys being watched have changed underneath the current transaction).
Currently the number of attempts that are tried is infinite (this might
be addressed in https://github.com/andymccurdy/redis-py/issues/566 when
that gets worked on). See http://redis.io/topics/transactions for more
information on this topic.
General recommendations/usage considerations:
- When used for locks, run in AOF mode and think carefully about how
your redis deployment handles losing a server (the clustering support
is supposed to aid in losing servers, but it is also of unknown
reliablity and is relatively new, so use at your own risk).
.. _redis: http://redis.io/
.. _msgpack: http://msgpack.org/
.. _sentinel: http://redis.io/topics/sentinel
.. _AOF: http://redis.io/topics/persistence
"""
CHARACTERISTICS = (
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
coordination.Characteristics.CAUSAL,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
MIN_VERSION = version.LooseVersion("2.6.0")
"""
The min redis version that this driver requires to operate with...
"""
GROUP_EXISTS = b'__created__'
"""
Redis deletes dictionaries that have no keys in them, which means the
key will disappear which means we can't tell the difference between
a group not existing and a group being empty without this key being
saved...
"""
#: Value used (with group exists key) to keep a group from disappearing.
GROUP_EXISTS_VALUE = b'1'
#: Default namespace for keys when none is provided.
DEFAULT_NAMESPACE = b'_tooz'
NAMESPACE_SEP = b':'
"""
Separator that is used to combine a key with the namespace (to get
the **actual** key that will be used).
"""
DEFAULT_ENCODING = 'utf8'
"""
This is for python3.x; which will behave differently when returned
binary types or unicode types (redis uses binary internally it appears),
so to just stick with a common way of doing this, make all the things
binary (with this default encoding if one is not given and a unicode
string is provided).
"""
CLIENT_ARGS = frozenset([
'db',
'encoding',
'retry_on_timeout',
'socket_keepalive',
'socket_timeout',
'ssl',
'ssl_certfile',
'ssl_keyfile',
'sentinel',
'sentinel_fallback',
])
"""
Keys that we allow to proxy from the coordinator configuration into the
redis client (used to configure the redis client internals so that
it works as you expect/want it to).
See: http://redis-py.readthedocs.org/en/latest/#redis.Redis
See: https://github.com/andymccurdy/redis-py/blob/2.10.3/redis/client.py
"""
#: Client arguments that are expected/allowed to be lists.
CLIENT_LIST_ARGS = frozenset([
'sentinel_fallback',
])
#: Client arguments that are expected to be boolean convertible.
CLIENT_BOOL_ARGS = frozenset([
'retry_on_timeout',
'ssl',
])
#: Client arguments that are expected to be int convertible.
CLIENT_INT_ARGS = frozenset([
'db',
'socket_keepalive',
'socket_timeout',
])
#: Default socket timeout to use when none is provided.
CLIENT_DEFAULT_SOCKET_TO = 30
#: String used to keep a key/member alive (until it next expires).
STILL_ALIVE = b"Not dead!"
SCRIPTS = {
'create_group': """
-- Extract *all* the variables (so we can easily know what they are)...
local namespaced_group_key = KEYS[1]
local all_groups_key = KEYS[2]
local no_namespaced_group_key = ARGV[1]
if redis.call("exists", namespaced_group_key) == 1 then
return 0
end
redis.call("sadd", all_groups_key, no_namespaced_group_key)
redis.call("hset", namespaced_group_key,
"${group_existence_key}", "${group_existence_value}")
return 1
""",
'delete_group': """
-- Extract *all* the variables (so we can easily know what they are)...
local namespaced_group_key = KEYS[1]
local all_groups_key = KEYS[2]
local no_namespaced_group_key = ARGV[1]
if redis.call("exists", namespaced_group_key) == 0 then
return -1
end
if redis.call("sismember", all_groups_key, no_namespaced_group_key) == 0 then
return -2
end
if redis.call("hlen", namespaced_group_key) > 1 then
return -3
end
-- First remove from the set (then delete the group); if the set removal
-- fails, at least the group will still exist (and can be fixed manually)...
if redis.call("srem", all_groups_key, no_namespaced_group_key) == 0 then
return -4
end
redis.call("del", namespaced_group_key)
return 1
""",
'update_capabilities': """
-- Extract *all* the variables (so we can easily know what they are)...
local group_key = KEYS[1]
local member_id = ARGV[1]
local caps = ARGV[2]
if redis.call("exists", group_key) == 0 then
return -1
end
if redis.call("hexists", group_key, member_id) == 0 then
return -2
end
redis.call("hset", group_key, member_id, caps)
return 1
""",
}
"""`Lua`_ **template** scripts that will be used by various methods (they
are turned into real scripts and loaded on call into the :func:`.start`
method).
.. _Lua: http://www.lua.org
"""
EXCLUDE_OPTIONS = CLIENT_LIST_ARGS
def __init__(self, member_id, parsed_url, options):
super(RedisDriver, self).__init__(member_id, parsed_url, options)
self._parsed_url = parsed_url
self._encoding = self._options.get('encoding', self.DEFAULT_ENCODING)
timeout = self._options.get('timeout', self.CLIENT_DEFAULT_SOCKET_TO)
self.timeout = int(timeout)
self.membership_timeout = float(self._options.get(
'membership_timeout', timeout))
lock_timeout = self._options.get('lock_timeout', self.timeout)
self.lock_timeout = int(lock_timeout)
namespace = self._options.get('namespace', self.DEFAULT_NAMESPACE)
self._namespace = utils.to_binary(namespace, encoding=self._encoding)
self._group_prefix = self._namespace + b"_group"
self._beat_prefix = self._namespace + b"_beats"
self._groups = self._namespace + b"_groups"
self._client = None
self._acquired_locks = set()
self._started = False
self._server_info = {}
self._scripts = {}
def _check_fetch_redis_version(self, geq_version, not_existent=True):
if isinstance(geq_version, str):
desired_version = version.LooseVersion(geq_version)
elif isinstance(geq_version, version.LooseVersion):
desired_version = geq_version
else:
raise TypeError("Version check expects a string/version type")
try:
redis_version = version.LooseVersion(
self._server_info['redis_version'])
except KeyError:
return (not_existent, None)
else:
if redis_version < desired_version:
return (False, redis_version)
else:
return (True, redis_version)
@property
def namespace(self):
return self._namespace
@property
def running(self):
return self._started
def get_lock(self, name):
return RedisLock(self, self._client, name, self.lock_timeout)
_dumps = staticmethod(utils.dumps)
_loads = staticmethod(utils.loads)
@classmethod
def _make_client(cls, parsed_url, options, default_socket_timeout):
kwargs = {}
if parsed_url.hostname:
kwargs['host'] = parsed_url.hostname
if parsed_url.port:
kwargs['port'] = parsed_url.port
else:
if not parsed_url.path:
raise ValueError("Expected socket path in parsed urls path")
kwargs['unix_socket_path'] = parsed_url.path
if parsed_url.password:
kwargs['password'] = parsed_url.password
for a in cls.CLIENT_ARGS:
if a not in options:
continue
if a in cls.CLIENT_BOOL_ARGS:
v = strutils.bool_from_string(options[a])
elif a in cls.CLIENT_LIST_ARGS:
v = options[a]
elif a in cls.CLIENT_INT_ARGS:
v = int(options[a])
else:
v = options[a]
kwargs[a] = v
if 'socket_timeout' not in kwargs:
kwargs['socket_timeout'] = default_socket_timeout
# Ask the sentinel for the current master if there is a
# sentinel arg.
if 'sentinel' in kwargs:
sentinel_hosts = [
tuple(fallback.split(':'))
for fallback in kwargs.get('sentinel_fallback', [])
]
sentinel_hosts.insert(0, (kwargs['host'], kwargs['port']))
sentinel_server = sentinel.Sentinel(
sentinel_hosts,
socket_timeout=kwargs['socket_timeout'])
sentinel_name = kwargs['sentinel']
del kwargs['sentinel']
if 'sentinel_fallback' in kwargs:
del kwargs['sentinel_fallback']
master_client = sentinel_server.master_for(sentinel_name, **kwargs)
# The master_client is a redis.StrictRedis using a
# Sentinel managed connection pool.
return master_client
return redis.StrictRedis(**kwargs)
@_handle_failures
def _start(self):
super(RedisDriver, self)._start()
try:
self._client = self._make_client(self._parsed_url, self._options,
self.timeout)
except exceptions.RedisError as e:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)
else:
# Ensure that the server is alive and not dead, this does not
# ensure the server will always be alive, but does insure that it
# at least is alive once...
self._server_info = self._client.info()
# Validate we have a good enough redis version we are connected
# to so that the basic set of features we support will actually
# work (instead of blowing up).
new_enough, redis_version = self._check_fetch_redis_version(
self.MIN_VERSION)
if not new_enough:
raise tooz.NotImplemented("Redis version greater than or"
" equal to '%s' is required"
" to use this driver; '%s' is"
" being used which is not new"
" enough" % (self.MIN_VERSION,
redis_version))
tpl_params = {
'group_existence_value': self.GROUP_EXISTS_VALUE,
'group_existence_key': self.GROUP_EXISTS,
}
# For py3.x ensure these are unicode since the string template
# replacement will expect unicode (and we don't want b'' as a
# prefix which will happen in py3.x if this is not done).
for (k, v) in tpl_params.copy().items():
if isinstance(v, bytes):
v = v.decode('ascii')
tpl_params[k] = v
prepared_scripts = {}
for name, raw_script_tpl in self.SCRIPTS.items():
script_tpl = string.Template(raw_script_tpl)
script = script_tpl.substitute(**tpl_params)
prepared_scripts[name] = self._client.register_script(script)
self._scripts = prepared_scripts
self.heartbeat()
self._started = True
def _encode_beat_id(self, member_id):
member_id = utils.to_binary(member_id, encoding=self._encoding)
return self.NAMESPACE_SEP.join([self._beat_prefix, member_id])
def _encode_member_id(self, member_id):
member_id = utils.to_binary(member_id, encoding=self._encoding)
if member_id == self.GROUP_EXISTS:
raise ValueError("Not allowed to use private keys as a member id")
return member_id
def _decode_member_id(self, member_id):
return utils.to_binary(member_id, encoding=self._encoding)
def _encode_group_leader(self, group_id):
group_id = utils.to_binary(group_id, encoding=self._encoding)
return b"leader_of_" + group_id
def _encode_group_id(self, group_id, apply_namespace=True):
group_id = utils.to_binary(group_id, encoding=self._encoding)
if not apply_namespace:
return group_id
return self.NAMESPACE_SEP.join([self._group_prefix, group_id])
def _decode_group_id(self, group_id):
return utils.to_binary(group_id, encoding=self._encoding)
@_handle_failures
def heartbeat(self):
beat_id = self._encode_beat_id(self._member_id)
expiry_ms = max(0, int(self.membership_timeout * 1000.0))
self._client.psetex(beat_id, time_ms=expiry_ms,
value=self.STILL_ALIVE)
for lock in self._acquired_locks.copy():
try:
lock.heartbeat()
except tooz.ToozError:
LOG.warning("Unable to heartbeat lock '%s'", lock,
exc_info=True)
return min(self.lock_timeout, self.membership_timeout)
@_handle_failures
def _stop(self):
while self._acquired_locks:
lock = self._acquired_locks.pop()
try:
lock.release()
except tooz.ToozError:
LOG.warning("Unable to release lock '%s'", lock, exc_info=True)
super(RedisDriver, self)._stop()
if self._client is not None:
# Make sure we no longer exist...
beat_id = self._encode_beat_id(self._member_id)
try:
# NOTE(harlowja): this will delete nothing if the key doesn't
# exist in the first place, which is fine/expected/desired...
self._client.delete(beat_id)
except tooz.ToozError:
LOG.warning("Unable to delete heartbeat key '%s'", beat_id,
exc_info=True)
self._client = None
self._server_info = {}
self._scripts.clear()
self._started = False
def _submit(self, cb, *args, **kwargs):
if not self._started:
raise tooz.ToozError("Redis driver has not been started")
return self._executor.submit(cb, *args, **kwargs)
def _get_script(self, script_key):
try:
return self._scripts[script_key]
except KeyError:
raise tooz.ToozError("Redis driver has not been started")
def create_group(self, group_id):
script = self._get_script('create_group')
def _create_group(script):
encoded_group = self._encode_group_id(group_id)
keys = [
encoded_group,
self._groups,
]
args = [
self._encode_group_id(group_id, apply_namespace=False),
]
result = script(keys=keys, args=args)
result = strutils.bool_from_string(result)
if not result:
raise coordination.GroupAlreadyExist(group_id)
return RedisFutureResult(self._submit(_create_group, script))
def update_capabilities(self, group_id, capabilities):
script = self._get_script('update_capabilities')
def _update_capabilities(script):
keys = [
self._encode_group_id(group_id),
]
args = [
self._encode_member_id(self._member_id),
self._dumps(capabilities),
]
result = int(script(keys=keys, args=args))
if result == -1:
raise coordination.GroupNotCreated(group_id)
if result == -2:
raise coordination.MemberNotJoined(group_id, self._member_id)
return RedisFutureResult(self._submit(_update_capabilities, script))
def leave_group(self, group_id):
encoded_group = self._encode_group_id(group_id)
encoded_member_id = self._encode_member_id(self._member_id)
def _leave_group(p):
if not p.exists(encoded_group):
raise coordination.GroupNotCreated(group_id)
p.multi()
p.hdel(encoded_group, encoded_member_id)
c = p.execute()[0]
if c == 0:
raise coordination.MemberNotJoined(group_id, self._member_id)
else:
self._joined_groups.discard(group_id)
return RedisFutureResult(self._submit(self._client.transaction,
_leave_group, encoded_group,
value_from_callable=True))
def get_members(self, group_id):
encoded_group = self._encode_group_id(group_id)
def _get_members(p):
if not p.exists(encoded_group):
raise coordination.GroupNotCreated(group_id)
potential_members = set()
for m in p.hkeys(encoded_group):
m = self._decode_member_id(m)
if m != self.GROUP_EXISTS:
potential_members.add(m)
if not potential_members:
return set()
# Ok now we need to see which members have passed away...
gone_members = set()
member_values = p.mget(map(self._encode_beat_id,
potential_members))
for (potential_member, value) in zip(potential_members,
member_values):
# Always preserve self (just incase we haven't heartbeated
# while this call/s was being made...), this does *not* prevent
# another client from removing this though...
if potential_member == self._member_id:
continue
if not value:
gone_members.add(potential_member)
# Trash all the members that no longer are with us... RIP...
if gone_members:
p.multi()
encoded_gone_members = list(self._encode_member_id(m)
for m in gone_members)
p.hdel(encoded_group, *encoded_gone_members)
p.execute()
return set(m for m in potential_members
if m not in gone_members)
return potential_members
return RedisFutureResult(self._submit(self._client.transaction,
_get_members, encoded_group,
value_from_callable=True))
def get_member_capabilities(self, group_id, member_id):
encoded_group = self._encode_group_id(group_id)
encoded_member_id = self._encode_member_id(member_id)
def _get_member_capabilities(p):
if not p.exists(encoded_group):
raise coordination.GroupNotCreated(group_id)
capabilities = p.hget(encoded_group, encoded_member_id)
if capabilities is None:
raise coordination.MemberNotJoined(group_id, member_id)
return self._loads(capabilities)
return RedisFutureResult(self._submit(self._client.transaction,
_get_member_capabilities,
encoded_group,
value_from_callable=True))
def join_group(self, group_id, capabilities=b""):
encoded_group = self._encode_group_id(group_id)
encoded_member_id = self._encode_member_id(self._member_id)
def _join_group(p):
if not p.exists(encoded_group):
raise coordination.GroupNotCreated(group_id)
p.multi()
p.hset(encoded_group, encoded_member_id,
self._dumps(capabilities))
c = p.execute()[0]
if c == 0:
# Field already exists...
raise coordination.MemberAlreadyExist(group_id,
self._member_id)
else:
self._joined_groups.add(group_id)
return RedisFutureResult(self._submit(self._client.transaction,
_join_group,
encoded_group,
value_from_callable=True))
def delete_group(self, group_id):
script = self._get_script('delete_group')
def _delete_group(script):
keys = [
self._encode_group_id(group_id),
self._groups,
]
args = [
self._encode_group_id(group_id, apply_namespace=False),
]
result = int(script(keys=keys, args=args))
if result in (-1, -2):
raise coordination.GroupNotCreated(group_id)
if result == -3:
raise coordination.GroupNotEmpty(group_id)
if result == -4:
raise tooz.ToozError("Unable to remove '%s' key"
" from set located at '%s'"
% (args[0], keys[-1]))
if result != 1:
raise tooz.ToozError("Internal error, unable"
" to complete group '%s' removal"
% (group_id))
return RedisFutureResult(self._submit(_delete_group, script))
def _destroy_group(self, group_id):
"""Should only be used in tests..."""
self._client.delete(self._encode_group_id(group_id))
def get_groups(self):
def _get_groups():
results = []
for g in self._client.smembers(self._groups):
results.append(self._decode_group_id(g))
return results
return RedisFutureResult(self._submit(_get_groups))
def _get_leader_lock(self, group_id):
name = self._encode_group_leader(group_id)
return self.get_lock(name)
def run_elect_coordinator(self):
for group_id, hooks in self._hooks_elected_leader.items():
leader_lock = self._get_leader_lock(group_id)
if leader_lock.acquire(blocking=False):
# We got the lock
hooks.run(coordination.LeaderElected(group_id,
self._member_id))
def run_watchers(self, timeout=None):
result = super(RedisDriver, self).run_watchers(timeout=timeout)
self.run_elect_coordinator()
return result
RedisFutureResult = functools.partial(coordination.CoordinatorResult)
| apache-2.0 | 6,796,789,495,924,240,000 | 37.581841 | 79 | 0.580259 | false |
srblum/server | scripts/generate_fasta.py | 5 | 3505 | """
Generate a random FASTA file
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import hashlib
import json
import math
import os
import random
import utils
class FastaGenerator(object):
"""
Generates a random FASTA file and metadata json.
"""
def __init__(self, args):
self.numBases = args.num_bases
self.outputPrefix = args.output_prefix
self.fastaFileName = "{}.fa".format(self.outputPrefix)
self.referenceId = os.path.split(args.output_prefix)[-1]
self.bases = ""
def writeFasta(self):
"""
Write the random fasta file
"""
utils.log("writing {} bases to {} ...".format(
self.numBases, self.fastaFileName))
with open(self.fastaFileName, 'w') as fastaFile:
firstLine = ">{} Generated by generate_fasta.py".format(
self.referenceId)
print(firstLine, file=fastaFile)
basesPerLine = 70
numLines = int(math.ceil(self.numBases / basesPerLine))
baseChoices = ['A', 'G', 'C', 'T']
basesRemaining = self.numBases
for i in range(numLines):
if basesRemaining < basesPerLine:
basesToWrite = basesRemaining
else:
basesToWrite = basesPerLine
bases = ''.join(
[random.choice(baseChoices) for _ in range(basesToWrite)])
line = "{}".format(bases)
self.bases += line
print(line, file=fastaFile)
basesRemaining -= basesToWrite
assert basesRemaining == 0
def writeMetadata(self):
"""
Write some metadata.
"""
metadata = {
"md5checksum": hashlib.md5(self.bases).hexdigest(),
"sourceUri": "http://example.com/random_url",
"ncbiTaxonId": random.randint(1, 10000),
"isDerived": False,
"sourceDivergence": None,
"sourceAccessions": [],
}
jsonFileName = "{}.json".format(self.outputPrefix)
utils.log("writing metadata to {} ...".format(jsonFileName))
with open(jsonFileName, "w") as jsonFile:
json.dump(metadata, jsonFile, indent=4)
def zipFasta(self):
"""
Compress the fasta file
"""
utils.log("zipping {} ...".format(self.fastaFileName))
cmd = "bgzip -f {}".format(self.fastaFileName)
utils.runCommand(cmd)
def indexFasta(self):
"""
Create index on the fasta file
"""
zipFileName = "{}.gz".format(self.fastaFileName)
utils.log("indexing {} ...".format(zipFileName))
cmd = "samtools faidx {}".format(zipFileName)
utils.runCommand(cmd)
def generate(self):
self.writeFasta()
self.writeMetadata()
self.zipFasta()
self.indexFasta()
def main():
parser = argparse.ArgumentParser(
description="Generate random FASTA files and metadata")
parser.add_argument(
"output_prefix", help="The prefix for generated files.")
basesDefault = 1000
parser.add_argument(
"--num-bases", "-n", default=basesDefault,
help="number of bases to include; default {}".format(basesDefault))
fastaGenerator = FastaGenerator(parser.parse_args())
fastaGenerator.generate()
if __name__ == '__main__':
main()
| apache-2.0 | 6,550,465,919,768,596,000 | 30.576577 | 78 | 0.573181 | false |
ioana-delaney/spark | python/pyspark/ml/param/__init__.py | 53 | 17172 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array
import sys
if sys.version > '3':
basestring = str
xrange = range
unicode = str
from abc import ABCMeta
import copy
import numpy as np
from py4j.java_gateway import JavaObject
from pyspark.ml.linalg import DenseVector, Vector, Matrix
from pyspark.ml.util import Identifiable
__all__ = ['Param', 'Params', 'TypeConverters']
class Param(object):
"""
A param with self-contained documentation.
.. versionadded:: 1.3.0
"""
def __init__(self, parent, name, doc, typeConverter=None):
if not isinstance(parent, Identifiable):
raise TypeError("Parent must be an Identifiable but got type %s." % type(parent))
self.parent = parent.uid
self.name = str(name)
self.doc = str(doc)
self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter
def _copy_new_parent(self, parent):
"""Copy the current param to a new parent, must be a dummy param."""
if self.parent == "undefined":
param = copy.copy(self)
param.parent = parent.uid
return param
else:
raise ValueError("Cannot copy from non-dummy parent %s." % parent)
def __str__(self):
return str(self.parent) + "__" + self.name
def __repr__(self):
return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, Param):
return self.parent == other.parent and self.name == other.name
else:
return False
class TypeConverters(object):
"""
.. note:: DeveloperApi
Factory methods for common type conversion functions for `Param.typeConverter`.
.. versionadded:: 2.0.0
"""
@staticmethod
def _is_numeric(value):
vtype = type(value)
return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == 'long'
@staticmethod
def _is_integer(value):
return TypeConverters._is_numeric(value) and float(value).is_integer()
@staticmethod
def _can_convert_to_list(value):
vtype = type(value)
return vtype in [list, np.ndarray, tuple, xrange, array.array] or isinstance(value, Vector)
@staticmethod
def _can_convert_to_string(value):
vtype = type(value)
return isinstance(value, basestring) or vtype in [np.unicode_, np.string_, np.str_]
@staticmethod
def identity(value):
"""
Dummy converter that just returns value.
"""
return value
@staticmethod
def toList(value):
"""
Convert a value to a list, if possible.
"""
if type(value) == list:
return value
elif type(value) in [np.ndarray, tuple, xrange, array.array]:
return list(value)
elif isinstance(value, Vector):
return list(value.toArray())
else:
raise TypeError("Could not convert %s to list" % value)
@staticmethod
def toListFloat(value):
"""
Convert a value to list of floats, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return [float(v) for v in value]
raise TypeError("Could not convert %s to list of floats" % value)
@staticmethod
def toListInt(value):
"""
Convert a value to list of ints, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_integer(v), value)):
return [int(v) for v in value]
raise TypeError("Could not convert %s to list of ints" % value)
@staticmethod
def toListString(value):
"""
Convert a value to list of strings, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):
return [TypeConverters.toString(v) for v in value]
raise TypeError("Could not convert %s to list of strings" % value)
@staticmethod
def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return DenseVector(value)
raise TypeError("Could not convert %s to vector" % value)
@staticmethod
def toMatrix(value):
"""
Convert a value to a MLlib Matrix, if possible.
"""
if isinstance(value, Matrix):
return value
raise TypeError("Could not convert %s to matrix" % value)
@staticmethod
def toFloat(value):
"""
Convert a value to a float, if possible.
"""
if TypeConverters._is_numeric(value):
return float(value)
else:
raise TypeError("Could not convert %s to float" % value)
@staticmethod
def toInt(value):
"""
Convert a value to an int, if possible.
"""
if TypeConverters._is_integer(value):
return int(value)
else:
raise TypeError("Could not convert %s to int" % value)
@staticmethod
def toString(value):
"""
Convert a value to a string, if possible.
"""
if isinstance(value, basestring):
return value
elif type(value) in [np.string_, np.str_]:
return str(value)
elif type(value) == np.unicode_:
return unicode(value)
else:
raise TypeError("Could not convert %s to string type" % type(value))
@staticmethod
def toBoolean(value):
"""
Convert a value to a boolean, if possible.
"""
if type(value) == bool:
return value
else:
raise TypeError("Boolean Param requires value of type bool. Found %s." % type(value))
class Params(Identifiable):
"""
Components that take parameters. This also provides an internal
param map to store parameter values attached to the instance.
.. versionadded:: 1.3.0
"""
__metaclass__ = ABCMeta
def __init__(self):
super(Params, self).__init__()
#: internal param map for user-supplied values param map
self._paramMap = {}
#: internal param map for default values
self._defaultParamMap = {}
#: value returned by :py:func:`params`
self._params = None
# Copy the params from the class to the object
self._copy_params()
def _copy_params(self):
"""
Copy all params defined on the class to current object.
"""
cls = type(self)
src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
for name, param in src_params:
setattr(self, name, param._copy_new_parent(self))
@property
def params(self):
"""
Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`.
"""
if self._params is None:
self._params = list(filter(lambda attr: isinstance(attr, Param),
[getattr(self, x) for x in dir(self) if x != "params" and
not isinstance(getattr(type(self), x, None), property)]))
return self._params
def explainParam(self, param):
"""
Explains a single param and returns its name, doc, and optional
default value and user-supplied value in a string.
"""
param = self._resolveParam(param)
values = []
if self.isDefined(param):
if param in self._defaultParamMap:
values.append("default: %s" % self._defaultParamMap[param])
if param in self._paramMap:
values.append("current: %s" % self._paramMap[param])
else:
values.append("undefined")
valueStr = "(" + ", ".join(values) + ")"
return "%s: %s %s" % (param.name, param.doc, valueStr)
def explainParams(self):
"""
Returns the documentation of all params with their optionally
default values and user-supplied values.
"""
return "\n".join([self.explainParam(param) for param in self.params])
def getParam(self, paramName):
"""
Gets a param by its name.
"""
param = getattr(self, paramName)
if isinstance(param, Param):
return param
else:
raise ValueError("Cannot find param with name %s." % paramName)
def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap
def hasDefault(self, param):
"""
Checks whether a param has a default value.
"""
param = self._resolveParam(param)
return param in self._defaultParamMap
def isDefined(self, param):
"""
Checks whether a param is explicitly set by user or has
a default value.
"""
return self.isSet(param) or self.hasDefault(param)
def hasParam(self, paramName):
"""
Tests whether this instance contains a param with a given
(string) name.
"""
if isinstance(paramName, basestring):
p = getattr(self, paramName, None)
return isinstance(p, Param)
else:
raise TypeError("hasParam(): paramName must be a string")
def getOrDefault(self, param):
"""
Gets the value of a param in the user-supplied param map or its
default value. Raises an error if neither is set.
"""
param = self._resolveParam(param)
if param in self._paramMap:
return self._paramMap[param]
else:
return self._defaultParamMap[param]
def extractParamMap(self, extra=None):
"""
Extracts the embedded default param values and user-supplied
values, and then merges them with extra values from input into
a flat param map, where the latter value is used if there exist
conflicts, i.e., with ordering: default param values <
user-supplied values < extra.
:param extra: extra param values
:return: merged param map
"""
if extra is None:
extra = dict()
paramMap = self._defaultParamMap.copy()
paramMap.update(self._paramMap)
paramMap.update(extra)
return paramMap
def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses should override this method if the default approach
is not sufficient.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
that = copy.copy(self)
that._paramMap = {}
that._defaultParamMap = {}
return self._copyValues(that, extra)
def set(self, param, value):
"""
Sets a parameter in the embedded param map.
"""
self._shouldOwn(param)
try:
value = param.typeConverter(value)
except ValueError as e:
raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e))
self._paramMap[param] = value
def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError("Param %r does not belong to %r." % (param, self))
def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
self._shouldOwn(param)
return param
elif isinstance(param, basestring):
return self.getParam(param)
else:
raise ValueError("Cannot resolve %r as a param." % param)
@staticmethod
def _dummy():
"""
Returns a dummy Params instance used as a placeholder to
generate docs.
"""
dummy = Params()
dummy.uid = "undefined"
return dummy
def _set(self, **kwargs):
"""
Sets user-supplied params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None:
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e))
self._paramMap[p] = value
return self
def _clear(self, param):
"""
Clears a param from the param map if it has been explicitly set.
"""
if self.isSet(param):
del self._paramMap[param]
def _setDefault(self, **kwargs):
"""
Sets default params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None and not isinstance(value, JavaObject):
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid default param value given for param "%s". %s'
% (p.name, e))
self._defaultParamMap[p] = value
return self
def _copyValues(self, to, extra=None):
"""
Copies param values from this instance to another instance for
params shared by them.
:param to: the target instance
:param extra: extra params to be copied
:return: the target instance with param values copied
"""
paramMap = self._paramMap.copy()
if extra is not None:
paramMap.update(extra)
for param in self.params:
# copy default params
if param in self._defaultParamMap and to.hasParam(param.name):
to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param]
# copy explicitly set params
if param in paramMap and to.hasParam(param.name):
to._set(**{param.name: paramMap[param]})
return to
def _resetUid(self, newUid):
"""
Changes the uid of this instance. This updates both
the stored uid and the parent uid of params and param maps.
This is used by persistence (loading).
:param newUid: new uid to use, which is converted to unicode
:return: same instance, but with the uid and Param.parent values
updated, including within param maps
"""
newUid = unicode(newUid)
self.uid = newUid
newDefaultParamMap = dict()
newParamMap = dict()
for param in self.params:
newParam = copy.copy(param)
newParam.parent = newUid
if param in self._defaultParamMap:
newDefaultParamMap[newParam] = self._defaultParamMap[param]
if param in self._paramMap:
newParamMap[newParam] = self._paramMap[param]
param.parent = newUid
self._defaultParamMap = newDefaultParamMap
self._paramMap = newParamMap
return self
| apache-2.0 | 1,622,447,795,900,820,200 | 32.604697 | 99 | 0.585896 | false |
jtorrents/networkx | doc/source/conf.py | 2 | 5695 | # -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 8 21:47:50 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
# If your extensions are in another directory, add it here.
sys.path.append(os.path.abspath('../sphinxext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
# 'sphinx.ext.mathjax',
'numpydoc',
'sphinx.ext.coverage',
'sphinx.ext.autosummary','sphinx.ext.todo','sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'customroles']
# generate autosummary pages
autosummary_generate=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates','../rst_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'NetworkX'
copyright = '2013, NetworkX Developers'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import networkx
version = networkx.__version__
# The full version, including dev info
release = networkx.__version__.replace('_','')
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = ['reference/pdf_reference']
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'friendly'
pygments_style = 'sphinx'
# A list of prefixs that are ignored when creating the module index. (new in Sphinx 0.6)
modindex_common_prefix=['networkx.']
doctest_global_setup="import networkx as nx"
# Options for HTML output
# -----------------------
html_theme = "sphinxdoc"
#html_theme_options = {
# "rightsidebar": "true",
# "relbarbgcolor: "black"
#}
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'networkx.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = 'index.html'
html_index = 'contents.html'
# Custom sidebar templates, maps page names to templates.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
#html_additional_pages = {'index': 'index.html','gallery':'gallery.html'}
html_additional_pages = {'gallery':'gallery.html'}
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
html_use_opensearch = 'http://networkx.lanl.gov'
# Output file base name for HTML help builder.
htmlhelp_basename = 'NetworkX'
pngmath_use_preview = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('tutorial/index', 'networkx_tutorial.tex',
'NetworkX Tutorial',
'Aric Hagberg, Dan Schult, Pieter Swart', 'howto', 1),
('reference/pdf_reference', 'networkx_reference.tex',
'NetworkX Reference',
'Aric Hagberg, Dan Schult, Pieter Swart', 'manual', 1)]
#latex_appendices = ['installing']#,'legal'],'citing','credits','history']
#latex_appendices = ['credits']
# Intersphinx mapping
intersphinx_mapping = {'http://docs.python.org/': None,
'http://docs.scipy.org/doc/numpy/': None,
}
# For trac custom roles
trac_url = 'https://networkx.lanl.gov/trac/'
default_role = 'math'
#mathjax_path = 'http://mathjax.connectmv.com/MathJax.js'
| bsd-3-clause | -8,630,463,174,791,881,000 | 31.175141 | 88 | 0.676734 | false |
qedi-r/home-assistant | tests/components/sleepiq/test_init.py | 4 | 2776 | """The tests for the SleepIQ component."""
import unittest
from unittest.mock import MagicMock, patch
import requests_mock
from homeassistant import setup
import homeassistant.components.sleepiq as sleepiq
from tests.common import load_fixture, get_test_home_assistant
def mock_responses(mock, single=False):
"""Mock responses for SleepIQ."""
base_url = "https://prod-api.sleepiq.sleepnumber.com/rest/"
if single:
suffix = "-single"
else:
suffix = ""
mock.put(base_url + "login", text=load_fixture("sleepiq-login.json"))
mock.get(
base_url + "bed?_k=0987", text=load_fixture("sleepiq-bed{}.json".format(suffix))
)
mock.get(base_url + "sleeper?_k=0987", text=load_fixture("sleepiq-sleeper.json"))
mock.get(
base_url + "bed/familyStatus?_k=0987",
text=load_fixture("sleepiq-familystatus{}.json".format(suffix)),
)
class TestSleepIQ(unittest.TestCase):
"""Tests the SleepIQ component."""
def setUp(self):
"""Initialize values for this test case class."""
self.hass = get_test_home_assistant()
self.username = "foo"
self.password = "bar"
self.config = {
"sleepiq": {"username": self.username, "password": self.password}
}
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@requests_mock.Mocker()
def test_setup(self, mock):
"""Test the setup."""
mock_responses(mock)
# We're mocking the load_platform discoveries or else the platforms
# will be setup during tear down when blocking till done, but the mocks
# are no longer active.
with patch("homeassistant.helpers.discovery.load_platform", MagicMock()):
assert sleepiq.setup(self.hass, self.config)
@requests_mock.Mocker()
def test_setup_login_failed(self, mock):
"""Test the setup if a bad username or password is given."""
mock.put(
"https://prod-api.sleepiq.sleepnumber.com/rest/login",
status_code=401,
json=load_fixture("sleepiq-login-failed.json"),
)
response = sleepiq.setup(self.hass, self.config)
assert not response
def test_setup_component_no_login(self):
"""Test the setup when no login is configured."""
conf = self.config.copy()
del conf["sleepiq"]["username"]
assert not setup.setup_component(self.hass, sleepiq.DOMAIN, conf)
def test_setup_component_no_password(self):
"""Test the setup when no password is configured."""
conf = self.config.copy()
del conf["sleepiq"]["password"]
assert not setup.setup_component(self.hass, sleepiq.DOMAIN, conf)
| apache-2.0 | -2,349,374,305,424,731,000 | 33.271605 | 88 | 0.636527 | false |
lemonsong/lemonsong.github.io | blog/pelican-plugins/representative_image/test_representative_image.py | 63 | 2040 | #!/bin/sh
import unittest
from jinja2.utils import generate_lorem_ipsum
# Generate content with image
TEST_CONTENT_IMAGE_URL = 'https://testimage.com/test.jpg'
TEST_CONTENT = str(generate_lorem_ipsum(n=3, html=True)) + '<img src="' + TEST_CONTENT_IMAGE_URL + '"/>'+ str(generate_lorem_ipsum(n=2,html=True))
TEST_SUMMARY_IMAGE_URL = 'https://testimage.com/summary.jpg'
TEST_SUMMARY_WITHOUTIMAGE = str(generate_lorem_ipsum(n=1, html=True))
TEST_SUMMARY_WITHIMAGE = TEST_SUMMARY_WITHOUTIMAGE + '<img src="' + TEST_SUMMARY_IMAGE_URL + '"/>'
TEST_CUSTOM_IMAGE_URL = 'https://testimage.com/custom.jpg'
from pelican.contents import Article
import representative_image
class TestRepresentativeImage(unittest.TestCase):
def setUp(self):
super(TestRepresentativeImage, self).setUp()
representative_image.register()
def test_extract_image_from_content(self):
args = {
'content': TEST_CONTENT,
'metadata': {
'summary': TEST_SUMMARY_WITHOUTIMAGE,
},
}
article = Article(**args)
self.assertEqual(article.featured_image, TEST_CONTENT_IMAGE_URL)
def test_extract_image_from_summary(self):
args = {
'content': TEST_CONTENT,
'metadata': {
'summary': TEST_SUMMARY_WITHIMAGE,
},
}
article = Article(**args)
self.assertEqual(article.featured_image, TEST_SUMMARY_IMAGE_URL)
self.assertEqual(article.summary, TEST_SUMMARY_WITHOUTIMAGE)
def test_extract_image_from_summary_with_custom_image(self):
args = {
'content': TEST_CONTENT,
'metadata': {
'summary': TEST_SUMMARY_WITHIMAGE,
'image': TEST_CUSTOM_IMAGE_URL,
},
}
article = Article(**args)
self.assertEqual(article.featured_image, TEST_CUSTOM_IMAGE_URL)
self.assertEqual(article.summary, TEST_SUMMARY_WITHOUTIMAGE)
if __name__ == '__main__':
unittest.main()
| mit | 5,224,455,482,497,638,000 | 29 | 146 | 0.621078 | false |
tjlaboss/openmc | tests/regression_tests/source_parameterized_dlopen/test.py | 7 | 2162 | from pathlib import Path
import os
import shutil
import subprocess
import textwrap
import openmc
import pytest
from tests.testing_harness import PyAPITestHarness
@pytest.fixture
def compile_source(request):
"""Compile the external source"""
# Get build directory and write CMakeLists.txt file
openmc_dir = Path(str(request.config.rootdir)) / 'build'
with open('CMakeLists.txt', 'w') as f:
f.write(textwrap.dedent("""
cmake_minimum_required(VERSION 3.3 FATAL_ERROR)
project(openmc_sources CXX)
add_library(source SHARED parameterized_source_sampling.cpp)
find_package(OpenMC REQUIRED HINTS {})
target_link_libraries(source OpenMC::libopenmc)
""".format(openmc_dir)))
# Create temporary build directory and change to there
local_builddir = Path('build')
local_builddir.mkdir(exist_ok=True)
os.chdir(str(local_builddir))
# Run cmake/make to build the shared libary
subprocess.run(['cmake', os.path.pardir], check=True)
subprocess.run(['make'], check=True)
os.chdir(os.path.pardir)
yield
# Remove local build directory when test is complete
shutil.rmtree('build')
os.remove('CMakeLists.txt')
@pytest.fixture
def model():
model = openmc.model.Model()
natural_lead = openmc.Material(name="natural_lead")
natural_lead.add_element('Pb', 1.0)
natural_lead.set_density('g/cm3', 11.34)
model.materials.append(natural_lead)
# geometry
surface_sph1 = openmc.Sphere(r=100, boundary_type='vacuum')
cell_1 = openmc.Cell(fill=natural_lead, region=-surface_sph1)
model.geometry = openmc.Geometry([cell_1])
# settings
model.settings.batches = 10
model.settings.inactive = 0
model.settings.particles = 1000
model.settings.run_mode = 'fixed source'
# custom source from shared library
source = openmc.Source()
source.library = 'build/libsource.so'
source.parameters = '1e3'
model.settings.source = source
return model
def test_dlopen_source(compile_source, model):
harness = PyAPITestHarness('statepoint.10.h5', model)
harness.main()
| mit | 4,885,972,692,589,280,000 | 27.826667 | 72 | 0.684089 | false |
cbitterfield/JobCard | example/code_blocks.py | 1 | 1238 | # Variables for standard use:
config = {}
jobcard = {}
noexec = True
command = {}
command_status = {}
command_name = "example"
CMD = ''
item_src = ''
# Standard Imports
import os
import job
import logging
import subprocess
logger = logging.getLogger(__name__)
# Code Block - Run a command an check results
#
command_name = 'MyCommand'
# Run Command
if noexec:
command[command_name] = subprocess.Popen("echo", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
logger.warning("Running Command - " + str(command_name))
command[command_name] = subprocess.Popen(CMD, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.info( "COMMAND" + command_name + " for "+ item_src + " Started" )
# Check if Command executed
logger.info("Check if " + str(command_name) + " Completed")
stdoutdata, stderrdata = command[command_name].communicate()
command_status[command_name] = command[command_name].returncode
if command_status[command_name] == 0:
logger.info(str(command_name) + " Completed, returned Status: " + str(command_status[command_name]))
else:
logger.error(str(command_name) + "failed, with Status:"+ str(command_status[command_name]))
Error = True
| gpl-3.0 | 5,514,271,849,741,310,000 | 24.8125 | 116 | 0.693861 | false |
anilthanki/tgac-galaxytools | tools/Ensembl-REST/get_feature_info.py | 2 | 1637 | # A simple tool to connect to the Ensembl server and retrieve feature
# information using the Ensembl REST API.
from __future__ import print_function
import json
import optparse
from itertools import islice
import requests
from six.moves.urllib.parse import urljoin
parser = optparse.OptionParser()
parser.add_option('-i', '--input', help='List of Ensembl IDs')
parser.add_option('-e', '--expand', type='choice', choices=['0', '1'],
default='0',
help='Expands the search to include any connected features. e.g. If the object is a gene, its transcripts, translations and exons will be returned as well.')
parser.add_option('-f', '--format', type='choice',
choices=['full', 'condensed'], default='full',
help='Specify the formats to emit from this endpoint')
options, args = parser.parse_args()
if options.input is None:
raise Exception('-i option must be specified')
server = 'http://rest.ensembl.org'
ext = 'lookup/id'
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
params = dict((k, getattr(options, k)) for k in ['format', 'expand'])
first = True
print('{')
with open(options.input) as f:
while True:
ids = [line.strip() for line in islice(f, 50)]
if not ids:
break
if not first:
print(",")
data = {'ids': ids}
r = requests.post(urljoin(server, ext), params=params, headers=headers,
data=json.dumps(data))
if not r.ok:
r.raise_for_status()
print(r.text[1:-1])
first = False
print('}')
| mit | -4,122,030,656,891,136,500 | 29.314815 | 175 | 0.618815 | false |
chemelnucfin/tensorflow | tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py | 19 | 23308 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing GridRNN cells"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.contrib import layers
from tensorflow.contrib import rnn
class GridRNNCell(rnn.RNNCell):
"""Grid recurrent cell.
This implementation is based on:
http://arxiv.org/pdf/1507.01526v3.pdf
This is the generic implementation of GridRNN. Users can specify arbitrary
number of dimensions,
set some of them to be priority (section 3.2), non-recurrent (section 3.3)
and input/output dimensions (section 3.4).
Weight sharing can also be specified using the `tied` parameter.
Type of recurrent units can be specified via `cell_fn`.
"""
def __init__(self,
num_units,
num_dims=1,
input_dims=None,
output_dims=None,
priority_dims=None,
non_recurrent_dims=None,
tied=False,
cell_fn=None,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True):
"""Initialize the parameters of a Grid RNN cell
Args:
num_units: int, The number of units in all dimensions of this GridRNN cell
num_dims: int, Number of dimensions of this grid.
input_dims: int or list, List of dimensions which will receive input data.
output_dims: int or list, List of dimensions from which the output will be
recorded.
priority_dims: int or list, List of dimensions to be considered as
priority dimensions.
If None, no dimension is prioritized.
non_recurrent_dims: int or list, List of dimensions that are not
recurrent.
The transfer function for non-recurrent dimensions is specified
via `non_recurrent_fn`, which is
default to be `tensorflow.nn.relu`.
tied: bool, Whether to share the weights among the dimensions of this
GridRNN cell.
If there are non-recurrent dimensions in the grid, weights are
shared between each group of recurrent and non-recurrent
dimensions.
cell_fn: function, a function which returns the recurrent cell object.
Has to be in the following signature:
```
def cell_func(num_units):
# ...
```
and returns an object of type `RNNCell`. If None, LSTMCell with
default parameters will be used.
Note that if you use a custom RNNCell (with `cell_fn`), it is your
responsibility to make sure the inner cell use `state_is_tuple=True`.
non_recurrent_fn: a tensorflow Op that will be the transfer function of
the non-recurrent dimensions
state_is_tuple: If True, accepted and returned states are tuples of the
states of the recurrent dimensions. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
Note that if you use a custom RNNCell (with `cell_fn`), it is your
responsibility to make sure the inner cell use `state_is_tuple=True`.
output_is_tuple: If True, the output is a tuple of the outputs of the
recurrent dimensions. If False, they are concatenated along the
column axis. The later behavior will soon be deprecated.
Raises:
TypeError: if cell_fn does not return an RNNCell instance.
"""
if not state_is_tuple:
logging.warning('%s: Using a concatenated state is slower and will '
'soon be deprecated. Use state_is_tuple=True.', self)
if not output_is_tuple:
logging.warning('%s: Using a concatenated output is slower and will '
'soon be deprecated. Use output_is_tuple=True.', self)
if num_dims < 1:
raise ValueError('dims must be >= 1: {}'.format(num_dims))
self._config = _parse_rnn_config(num_dims, input_dims, output_dims,
priority_dims, non_recurrent_dims,
non_recurrent_fn or nn.relu, tied,
num_units)
self._state_is_tuple = state_is_tuple
self._output_is_tuple = output_is_tuple
if cell_fn is None:
my_cell_fn = functools.partial(
rnn.LSTMCell, num_units=num_units, state_is_tuple=state_is_tuple)
else:
my_cell_fn = lambda: cell_fn(num_units)
if tied:
self._cells = [my_cell_fn()] * num_dims
else:
self._cells = [my_cell_fn() for _ in range(num_dims)]
if not isinstance(self._cells[0], rnn.RNNCell):
raise TypeError('cell_fn must return an RNNCell instance, saw: %s' %
type(self._cells[0]))
if self._output_is_tuple:
self._output_size = tuple(self._cells[0].output_size
for _ in self._config.outputs)
else:
self._output_size = self._cells[0].output_size * len(self._config.outputs)
if self._state_is_tuple:
self._state_size = tuple(self._cells[0].state_size
for _ in self._config.recurrents)
else:
self._state_size = self._cell_state_size() * len(self._config.recurrents)
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of GridRNN.
Args:
inputs: input Tensor, 2D, batch x input_size. Or None
state: state Tensor, 2D, batch x state_size. Note that state_size =
cell_state_size * recurrent_dims
scope: VariableScope for the created subgraph; defaults to "GridRNNCell".
Returns:
A tuple containing:
- A 2D, batch x output_size, Tensor representing the output of the cell
after reading "inputs" when previous state was "state".
- A 2D, batch x state_size, Tensor representing the new state of the cell
after reading "inputs" when previous state was "state".
"""
conf = self._config
dtype = inputs.dtype
c_prev, m_prev, cell_output_size = self._extract_states(state)
new_output = [None] * conf.num_dims
new_state = [None] * conf.num_dims
with vs.variable_scope(scope or type(self).__name__): # GridRNNCell
# project input, populate c_prev and m_prev
self._project_input(inputs, c_prev, m_prev, cell_output_size > 0)
# propagate along dimensions, first for non-priority dimensions
# then priority dimensions
_propagate(conf.non_priority, conf, self._cells, c_prev, m_prev,
new_output, new_state, True)
_propagate(conf.priority, conf, self._cells,
c_prev, m_prev, new_output, new_state, False)
# collect outputs and states
output_tensors = [new_output[i] for i in self._config.outputs]
if self._output_is_tuple:
output = tuple(output_tensors)
else:
if output_tensors:
output = array_ops.concat(output_tensors, 1)
else:
output = array_ops.zeros([0, 0], dtype)
if self._state_is_tuple:
states = tuple(new_state[i] for i in self._config.recurrents)
else:
# concat each state first, then flatten the whole thing
state_tensors = [
x for i in self._config.recurrents for x in new_state[i]
]
if state_tensors:
states = array_ops.concat(state_tensors, 1)
else:
states = array_ops.zeros([0, 0], dtype)
return output, states
def _extract_states(self, state):
"""Extract the cell and previous output tensors from the given state.
Args:
state: The RNN state.
Returns:
Tuple of the cell value, previous output, and cell_output_size.
Raises:
ValueError: If len(self._config.recurrents) != len(state).
"""
conf = self._config
# c_prev is `m` (cell value), and
# m_prev is `h` (previous output) in the paper.
# Keeping c and m here for consistency with the codebase
c_prev = [None] * conf.num_dims
m_prev = [None] * conf.num_dims
# for LSTM : state = memory cell + output, hence cell_output_size > 0
# for GRU/RNN: state = output (whose size is equal to _num_units),
# hence cell_output_size = 0
total_cell_state_size = self._cell_state_size()
cell_output_size = total_cell_state_size - conf.num_units
if self._state_is_tuple:
if len(conf.recurrents) != len(state):
raise ValueError('Expected state as a tuple of {} '
'element'.format(len(conf.recurrents)))
for recurrent_dim, recurrent_state in zip(conf.recurrents, state):
if cell_output_size > 0:
c_prev[recurrent_dim], m_prev[recurrent_dim] = recurrent_state
else:
m_prev[recurrent_dim] = recurrent_state
else:
for recurrent_dim, start_idx in zip(conf.recurrents,
range(0, self.state_size,
total_cell_state_size)):
if cell_output_size > 0:
c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],
[-1, conf.num_units])
m_prev[recurrent_dim] = array_ops.slice(
state, [0, start_idx + conf.num_units], [-1, cell_output_size])
else:
m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],
[-1, conf.num_units])
return c_prev, m_prev, cell_output_size
def _project_input(self, inputs, c_prev, m_prev, with_c):
"""Fills in c_prev and m_prev with projected input, for input dimensions.
Args:
inputs: inputs tensor
c_prev: cell value
m_prev: previous output
with_c: boolean; whether to include project_c.
Raises:
ValueError: if len(self._config.input) != len(inputs)
"""
conf = self._config
if (inputs is not None and
tensor_shape.dimension_value(inputs.shape.with_rank(2)[1]) > 0 and
conf.inputs):
if isinstance(inputs, tuple):
if len(conf.inputs) != len(inputs):
raise ValueError('Expect inputs as a tuple of {} '
'tensors'.format(len(conf.inputs)))
input_splits = inputs
else:
input_splits = array_ops.split(
value=inputs, num_or_size_splits=len(conf.inputs), axis=1)
input_sz = tensor_shape.dimension_value(
input_splits[0].shape.with_rank(2)[1])
for i, j in enumerate(conf.inputs):
input_project_m = vs.get_variable(
'project_m_{}'.format(j), [input_sz, conf.num_units],
dtype=inputs.dtype)
m_prev[j] = math_ops.matmul(input_splits[i], input_project_m)
if with_c:
input_project_c = vs.get_variable(
'project_c_{}'.format(j), [input_sz, conf.num_units],
dtype=inputs.dtype)
c_prev[j] = math_ops.matmul(input_splits[i], input_project_c)
def _cell_state_size(self):
"""Total size of the state of the inner cell used in this grid.
Returns:
Total size of the state of the inner cell.
"""
state_sizes = self._cells[0].state_size
if isinstance(state_sizes, tuple):
return sum(state_sizes)
return state_sizes
"""Specialized cells, for convenience
"""
class Grid1BasicRNNCell(GridRNNCell):
"""1D BasicRNN cell"""
def __init__(self, num_units, state_is_tuple=True, output_is_tuple=True):
super(Grid1BasicRNNCell, self).__init__(
num_units=num_units,
num_dims=1,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=False,
cell_fn=lambda n: rnn.BasicRNNCell(num_units=n),
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid2BasicRNNCell(GridRNNCell):
"""2D BasicRNN cell
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True):
super(Grid2BasicRNNCell, self).__init__(
num_units=num_units,
num_dims=2,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=lambda n: rnn.BasicRNNCell(num_units=n),
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid1BasicLSTMCell(GridRNNCell):
"""1D BasicLSTM cell."""
def __init__(self,
num_units,
forget_bias=1,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.BasicLSTMCell(num_units=n, forget_bias=forget_bias)
super(Grid1BasicLSTMCell, self).__init__(
num_units=num_units,
num_dims=1,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=False,
cell_fn=cell_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid2BasicLSTMCell(GridRNNCell):
"""2D BasicLSTM cell.
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
forget_bias=1,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.BasicLSTMCell(num_units=n, forget_bias=forget_bias)
super(Grid2BasicLSTMCell, self).__init__(
num_units=num_units,
num_dims=2,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=cell_fn,
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid1LSTMCell(GridRNNCell):
"""1D LSTM cell.
This is different from Grid1BasicLSTMCell because it gives options to
specify the forget bias and enabling peepholes.
"""
def __init__(self,
num_units,
use_peepholes=False,
forget_bias=1.0,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.LSTMCell(
num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)
super(Grid1LSTMCell, self).__init__(
num_units=num_units,
num_dims=1,
input_dims=0,
output_dims=0,
priority_dims=0,
cell_fn=cell_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid2LSTMCell(GridRNNCell):
"""2D LSTM cell.
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
use_peepholes=False,
forget_bias=1.0,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.LSTMCell(
num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)
super(Grid2LSTMCell, self).__init__(
num_units=num_units,
num_dims=2,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=cell_fn,
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid3LSTMCell(GridRNNCell):
"""3D BasicLSTM cell.
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
The second and third dimensions are LSTM.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
use_peepholes=False,
forget_bias=1.0,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.LSTMCell(
num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)
super(Grid3LSTMCell, self).__init__(
num_units=num_units,
num_dims=3,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=cell_fn,
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid2GRUCell(GridRNNCell):
"""2D LSTM cell.
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True):
super(Grid2GRUCell, self).__init__(
num_units=num_units,
num_dims=2,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=lambda n: rnn.GRUCell(num_units=n),
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
# Helpers
_GridRNNDimension = namedtuple('_GridRNNDimension', [
'idx', 'is_input', 'is_output', 'is_priority', 'non_recurrent_fn'
])
_GridRNNConfig = namedtuple('_GridRNNConfig',
['num_dims', 'dims', 'inputs', 'outputs',
'recurrents', 'priority', 'non_priority', 'tied',
'num_units'])
def _parse_rnn_config(num_dims, ls_input_dims, ls_output_dims, ls_priority_dims,
ls_non_recurrent_dims, non_recurrent_fn, tied, num_units):
def check_dim_list(ls):
if ls is None:
ls = []
if not isinstance(ls, (list, tuple)):
ls = [ls]
ls = sorted(set(ls))
if any(_ < 0 or _ >= num_dims for _ in ls):
raise ValueError('Invalid dims: {}. Must be in [0, {})'.format(ls,
num_dims))
return ls
input_dims = check_dim_list(ls_input_dims)
output_dims = check_dim_list(ls_output_dims)
priority_dims = check_dim_list(ls_priority_dims)
non_recurrent_dims = check_dim_list(ls_non_recurrent_dims)
rnn_dims = []
for i in range(num_dims):
rnn_dims.append(
_GridRNNDimension(
idx=i,
is_input=(i in input_dims),
is_output=(i in output_dims),
is_priority=(i in priority_dims),
non_recurrent_fn=non_recurrent_fn
if i in non_recurrent_dims else None))
return _GridRNNConfig(
num_dims=num_dims,
dims=rnn_dims,
inputs=input_dims,
outputs=output_dims,
recurrents=[x for x in range(num_dims) if x not in non_recurrent_dims],
priority=priority_dims,
non_priority=[x for x in range(num_dims) if x not in priority_dims],
tied=tied,
num_units=num_units)
def _propagate(dim_indices, conf, cells, c_prev, m_prev, new_output, new_state,
first_call):
"""Propagates through all the cells in dim_indices dimensions.
"""
if len(dim_indices) == 0:
return
# Because of the way RNNCells are implemented, we take the last dimension
# (H_{N-1}) out and feed it as the state of the RNN cell
# (in `last_dim_output`).
# The input of the cell (H_0 to H_{N-2}) are concatenated into `cell_inputs`
if conf.num_dims > 1:
ls_cell_inputs = [None] * (conf.num_dims - 1)
for d in conf.dims[:-1]:
if new_output[d.idx] is None:
ls_cell_inputs[d.idx] = m_prev[d.idx]
else:
ls_cell_inputs[d.idx] = new_output[d.idx]
cell_inputs = array_ops.concat(ls_cell_inputs, 1)
else:
cell_inputs = array_ops.zeros([m_prev[0].get_shape().as_list()[0], 0],
m_prev[0].dtype)
last_dim_output = (new_output[-1]
if new_output[-1] is not None else m_prev[-1])
for i in dim_indices:
d = conf.dims[i]
if d.non_recurrent_fn:
if conf.num_dims > 1:
linear_args = array_ops.concat([cell_inputs, last_dim_output], 1)
else:
linear_args = last_dim_output
with vs.variable_scope('non_recurrent' if conf.tied else
'non_recurrent/cell_{}'.format(i)):
if conf.tied and not (first_call and i == dim_indices[0]):
vs.get_variable_scope().reuse_variables()
new_output[d.idx] = layers.fully_connected(
linear_args,
num_outputs=conf.num_units,
activation_fn=d.non_recurrent_fn,
weights_initializer=(vs.get_variable_scope().initializer or
layers.initializers.xavier_initializer),
weights_regularizer=vs.get_variable_scope().regularizer)
else:
if c_prev[i] is not None:
cell_state = (c_prev[i], last_dim_output)
else:
# for GRU/RNN, the state is just the previous output
cell_state = last_dim_output
with vs.variable_scope('recurrent' if conf.tied else
'recurrent/cell_{}'.format(i)):
if conf.tied and not (first_call and i == dim_indices[0]):
vs.get_variable_scope().reuse_variables()
cell = cells[i]
new_output[d.idx], new_state[d.idx] = cell(cell_inputs, cell_state)
| apache-2.0 | 5,037,187,476,360,267,000 | 33.892216 | 80 | 0.59988 | false |
bloyl/mne-python | mne/viz/topomap.py | 1 | 108149 | """Functions to plot M/EEG data e.g. topographies."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Robert Luke <mail@robertluke.net>
#
# License: Simplified BSD
import copy
import itertools
from functools import partial
from numbers import Integral
import warnings
import numpy as np
from ..baseline import rescale
from ..channels.channels import _get_ch_type
from ..channels.layout import (
_find_topomap_coords, find_layout, _pair_grad_sensors, _merge_ch_data)
from ..defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT
from ..io.pick import (pick_types, _picks_by_type, pick_info, pick_channels,
_pick_data_channels, _picks_to_idx, _get_channel_types,
_MEG_CH_TYPES_SPLIT)
from ..utils import (_clean_names, _time_mask, verbose, logger, fill_doc,
_validate_type, _check_sphere, _check_option, _is_numeric,
warn, check_version)
from .utils import (tight_layout, _setup_vmin_vmax, _prepare_trellis,
_check_delayed_ssp, _draw_proj_checkbox, figure_nobar,
plt_show, _process_times, DraggableColorbar,
_validate_if_list_of_axes, _setup_cmap, _check_time_unit)
from ..time_frequency import psd_multitaper
from ..defaults import _handle_default
from ..transforms import apply_trans, invert_transform
from ..io.meas_info import Info, _simplify_info
from ..io.proj import Projection
_fnirs_types = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od')
def _adjust_meg_sphere(sphere, info, ch_type):
sphere = _check_sphere(sphere, info)
assert ch_type is not None
if ch_type in ('mag', 'grad', 'planar1', 'planar2'):
# move sphere X/Y (head coords) to device X/Y space
if info['dev_head_t'] is not None:
head_dev_t = invert_transform(info['dev_head_t'])
sphere[:3] = apply_trans(head_dev_t, sphere[:3])
# Set the sphere Z=0 because all this really affects is flattening.
# We could make the head size change as a function of depth in
# the helmet like:
#
# sphere[2] /= -5
#
# but let's just assume some orthographic rather than parallel
# projection for explicitness / simplicity.
sphere[2] = 0.
clip_origin = (0., 0.)
else:
clip_origin = sphere[:2].copy()
return sphere, clip_origin
def _prepare_topomap_plot(inst, ch_type, sphere=None):
"""Prepare topo plot."""
info = copy.deepcopy(inst if isinstance(inst, Info) else inst.info)
sphere, clip_origin = _adjust_meg_sphere(sphere, info, ch_type)
clean_ch_names = _clean_names(info['ch_names'])
for ii, this_ch in enumerate(info['chs']):
this_ch['ch_name'] = clean_ch_names[ii]
info['bads'] = _clean_names(info['bads'])
for comp in info['comps']:
comp['data']['col_names'] = _clean_names(comp['data']['col_names'])
info._update_redundant()
info._check_consistency()
# special case for merging grad channels
layout = find_layout(info)
if (ch_type == 'grad' and layout is not None and
(layout.kind.startswith('Vectorview') or
layout.kind.startswith('Neuromag_122'))):
picks, _ = _pair_grad_sensors(info, layout)
pos = _find_topomap_coords(info, picks[::2], sphere=sphere)
merge_channels = True
elif ch_type in _fnirs_types:
# fNIRS data commonly has overlapping channels, so deal with separately
picks, pos, merge_channels, overlapping_channels = \
_average_fnirs_overlaps(info, ch_type, sphere)
else:
merge_channels = False
if ch_type == 'eeg':
picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
elif ch_type == 'csd':
picks = pick_types(info, meg=False, csd=True, ref_meg=False,
exclude='bads')
elif ch_type == 'dbs':
picks = pick_types(info, meg=False, dbs=True, ref_meg=False,
exclude='bads')
elif ch_type == 'seeg':
picks = pick_types(info, meg=False, seeg=True, ref_meg=False,
exclude='bads')
else:
picks = pick_types(info, meg=ch_type, ref_meg=False,
exclude='bads')
if len(picks) == 0:
raise ValueError("No channels of type %r" % ch_type)
pos = _find_topomap_coords(info, picks, sphere=sphere)
ch_names = [info['ch_names'][k] for k in picks]
if ch_type in _fnirs_types:
# Remove the chroma label type for cleaner labeling.
ch_names = [k[:-4] for k in ch_names]
if merge_channels:
if ch_type == 'grad':
# change names so that vectorview combined grads appear as MEG014x
# instead of MEG0142 or MEG0143 which are the 2 planar grads.
ch_names = [ch_names[k][:-1] + 'x' for k in
range(0, len(ch_names), 2)]
else:
assert ch_type in _fnirs_types
# Modify the nirs channel names to indicate they are to be merged
# New names will have the form S1_D1xS2_D2
# More than two channels can overlap and be merged
for set in overlapping_channels:
idx = ch_names.index(set[0][:-4])
new_name = 'x'.join(s[:-4] for s in set)
ch_names[idx] = new_name
pos = np.array(pos)[:, :2] # 2D plot, otherwise interpolation bugs
return picks, pos, merge_channels, ch_names, ch_type, sphere, clip_origin
def _average_fnirs_overlaps(info, ch_type, sphere):
from scipy.spatial.distance import pdist, squareform
picks = pick_types(info, meg=False, ref_meg=False,
fnirs=ch_type, exclude='bads')
chs = [info['chs'][i] for i in picks]
locs3d = np.array([ch['loc'][:3] for ch in chs])
dist = pdist(locs3d)
# Store the sets of channels to be merged
overlapping_channels = list()
# Channels to be excluded from picks, as will be removed after merging
channels_to_exclude = list()
if len(locs3d) > 1 and np.min(dist) < 1e-10:
overlapping_mask = np.triu(squareform(dist < 1e-10))
for chan_idx in range(overlapping_mask.shape[0]):
already_overlapped = list(itertools.chain.from_iterable(
overlapping_channels))
if overlapping_mask[chan_idx].any() and \
(chs[chan_idx]['ch_name'] not in already_overlapped):
# Determine the set of channels to be combined. Ensure the
# first listed channel is the one to be replaced with merge
overlapping_set = [chs[i]['ch_name'] for i in
np.where(overlapping_mask[chan_idx])[0]]
overlapping_set = np.insert(overlapping_set, 0,
(chs[chan_idx]['ch_name']))
overlapping_channels.append(overlapping_set)
channels_to_exclude.append(overlapping_set[1:])
exclude = list(itertools.chain.from_iterable(channels_to_exclude))
[exclude.append(bad) for bad in info['bads']]
picks = pick_types(info, meg=False, ref_meg=False, fnirs=ch_type,
exclude=exclude)
pos = _find_topomap_coords(info, picks, sphere=sphere)
picks = pick_types(info, meg=False, ref_meg=False, fnirs=ch_type)
# Overload the merge_channels variable as this is returned to calling
# function and indicates that merging of data is required
merge_channels = overlapping_channels
else:
picks = pick_types(info, meg=False, ref_meg=False, fnirs=ch_type,
exclude='bads')
merge_channels = False
pos = _find_topomap_coords(info, picks, sphere=sphere)
return picks, pos, merge_channels, overlapping_channels
def _plot_update_evoked_topomap(params, bools):
"""Update topomaps."""
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = params['evoked'].copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
data = new_evoked.data[:, params['time_idx']] * params['scale']
if params['merge_channels']:
data, _ = _merge_ch_data(data, 'grad', [])
interp = params['interp']
new_contours = list()
for cont, ax, im, d in zip(params['contours_'], params['axes'],
params['images'], data.T):
Zi = interp.set_values(d)()
im.set_data(Zi)
# must be removed and re-added
if len(cont.collections) > 0:
tp = cont.collections[0]
visible = tp.get_visible()
patch_ = tp.get_clip_path()
color = tp.get_color()
lw = tp.get_linewidth()
for tp in cont.collections:
tp.remove()
cont = ax.contour(interp.Xi, interp.Yi, Zi, params['contours'],
colors=color, linewidths=lw)
for tp in cont.collections:
tp.set_visible(visible)
tp.set_clip_path(patch_)
new_contours.append(cont)
params['contours_'] = new_contours
params['fig'].canvas.draw()
def _add_colorbar(ax, im, cmap, side="right", pad=.05, title=None,
format=None, size="5%"):
"""Add a colorbar to an axis."""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes(side, size=size, pad=pad)
cbar = plt.colorbar(im, cax=cax, format=format)
if cmap is not None and cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
if title is not None:
cax.set_title(title, y=1.05, fontsize=10)
return cbar, cax
def _eliminate_zeros(proj):
"""Remove grad or mag data if only contains 0s (gh 5641)."""
GRAD_ENDING = ('2', '3')
MAG_ENDING = '1'
proj = copy.deepcopy(proj)
proj['data']['data'] = np.atleast_2d(proj['data']['data'])
for ending in (GRAD_ENDING, MAG_ENDING):
names = proj['data']['col_names']
idx = [i for i, name in enumerate(names) if name.endswith(ending)]
# if all 0, remove the 0s an their labels
if not proj['data']['data'][0][idx].any():
new_col_names = np.delete(np.array(names), idx).tolist()
new_data = np.delete(np.array(proj['data']['data'][0]), idx)
proj['data']['col_names'] = new_col_names
proj['data']['data'] = np.array([new_data])
proj['data']['ncol'] = len(proj['data']['col_names'])
return proj
@fill_doc
def plot_projs_topomap(projs, info, cmap=None, sensors=True,
colorbar=False, res=64, size=1, show=True,
outlines='head', contours=6, image_interp='bilinear',
axes=None, vlim=(None, None),
sphere=None, extrapolate=_EXTRAPOLATE_DEFAULT,
border=_BORDER_DEFAULT):
"""Plot topographic maps of SSP projections.
Parameters
----------
projs : list of Projection
The projections.
info : instance of Info
The info associated with the channels in the projectors.
.. versionchanged:: 0.20
The positional argument ``layout`` was deprecated and replaced
by ``info``.
%(proj_topomap_kwargs)s
%(topomap_sphere_auto)s
%(topomap_extrapolate)s
.. versionadded:: 0.20
%(topomap_border)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure with a topomap subplot for each projector.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
sphere = _check_sphere(sphere, info)
# be forgiving if `projs` isn't a list
if isinstance(projs, Projection):
projs = [projs]
_validate_type(info, 'info', 'info')
types, datas, poss, spheres, outliness, ch_typess = [], [], [], [], [], []
for proj in projs:
# get ch_names, ch_types, data
proj = _eliminate_zeros(proj) # gh 5641
ch_names = _clean_names(proj['data']['col_names'],
remove_whitespace=True)
if vlim == 'joint':
ch_idxs = np.where(np.in1d(info['ch_names'],
proj['data']['col_names']))[0]
these_ch_types = _get_channel_types(info, ch_idxs, unique=True)
# each projector should have only one channel type
assert len(these_ch_types) == 1
types.append(list(these_ch_types)[0])
data = proj['data']['data'].ravel()
info_names = _clean_names(info['ch_names'], remove_whitespace=True)
picks = pick_channels(info_names, ch_names)
if len(picks) == 0:
raise ValueError(
f'No channel names in info match projector {proj}')
use_info = pick_info(info, picks)
data_picks, pos, merge_channels, names, ch_type, this_sphere, \
clip_origin = _prepare_topomap_plot(
use_info, _get_ch_type(use_info, None), sphere=sphere)
these_outlines = _make_head_outlines(
sphere, pos, outlines, clip_origin)
data = data[data_picks]
if merge_channels:
data, _ = _merge_ch_data(data, 'grad', [])
data = data.ravel()
# populate containers
datas.append(data)
poss.append(pos)
spheres.append(this_sphere)
outliness.append(these_outlines)
ch_typess.append(ch_type)
del data, pos, this_sphere, these_outlines, ch_type
del sphere
# setup axes
n_projs = len(projs)
if axes is None:
fig, axes, ncols, nrows = _prepare_trellis(
n_projs, ncols='auto', nrows='auto', sharex=True, sharey=True)
elif isinstance(axes, plt.Axes):
axes = [axes]
_validate_if_list_of_axes(axes, n_projs)
# handle vmin/vmax
vlims = [None for _ in range(len(datas))]
if vlim == 'joint':
for _ch_type in set(types):
idx = np.where(np.in1d(types, _ch_type))[0]
these_data = np.concatenate(np.array(datas, dtype=object)[idx])
norm = all(these_data >= 0)
_vl = _setup_vmin_vmax(these_data, vmin=None, vmax=None, norm=norm)
for _idx in idx:
vlims[_idx] = _vl
# make sure we got a vlim for all projs
assert all([vl is not None for vl in vlims])
else:
vlims = [vlim for _ in range(len(datas))]
# plot
for proj, ax, _data, _pos, _vlim, _sphere, _outlines, _ch_type in zip(
projs, axes, datas, poss, vlims, spheres, outliness, ch_typess):
# title
title = proj['desc']
title = '\n'.join(title[ii:ii + 22] for ii in range(0, len(title), 22))
ax.set_title(title, fontsize=10)
# plot
vmin, vmax = _vlim
im = plot_topomap(_data, _pos[:, :2], vmin=vmin, vmax=vmax, cmap=cmap,
sensors=sensors, res=res, axes=ax,
outlines=_outlines, contours=contours,
image_interp=image_interp, show=False,
extrapolate=extrapolate, sphere=_sphere,
border=border, ch_type=_ch_type)[0]
if colorbar:
_add_colorbar(ax, im, cmap)
fig = ax.get_figure()
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
tight_layout(fig=fig)
plt_show(show)
return fig
def _make_head_outlines(sphere, pos, outlines, clip_origin):
"""Check or create outlines for topoplot."""
assert isinstance(sphere, np.ndarray)
x, y, _, radius = sphere
del sphere
if outlines in ('head', 'skirt', None):
ll = np.linspace(0, 2 * np.pi, 101)
head_x = np.cos(ll) * radius + x
head_y = np.sin(ll) * radius + y
dx = np.exp(np.arccos(np.deg2rad(12)) * 1j)
dx, dy = dx.real, dx.imag
nose_x = np.array([-dx, 0, dx]) * radius + x
nose_y = np.array([dy, 1.15, dy]) * radius + y
ear_x = np.array([.497, .510, .518, .5299, .5419, .54, .547,
.532, .510, .489]) * (radius * 2)
ear_y = np.array([.0555, .0775, .0783, .0746, .0555, -.0055, -.0932,
-.1313, -.1384, -.1199]) * (radius * 2) + y
if outlines is not None:
# Define the outline of the head, ears and nose
outlines_dict = dict(head=(head_x, head_y), nose=(nose_x, nose_y),
ear_left=(ear_x + x, ear_y),
ear_right=(-ear_x + x, ear_y))
else:
outlines_dict = dict()
# Make the figure encompass slightly more than all points
mask_scale = 1.25 if outlines == 'skirt' else 1.
# We probably want to ensure it always contains our most
# extremely positioned channels, so we do:
mask_scale = max(
mask_scale, np.linalg.norm(pos, axis=1).max() * 1.01 / radius)
outlines_dict['mask_pos'] = (mask_scale * head_x, mask_scale * head_y)
clip_radius = radius * mask_scale
outlines_dict['clip_radius'] = (clip_radius,) * 2
outlines_dict['clip_origin'] = clip_origin
outlines = outlines_dict
elif isinstance(outlines, dict):
if 'mask_pos' not in outlines:
raise ValueError('You must specify the coordinates of the image '
'mask.')
else:
raise ValueError('Invalid value for `outlines`.')
return outlines
def _draw_outlines(ax, outlines):
"""Draw the outlines for a topomap."""
from matplotlib import rcParams
outlines_ = {k: v for k, v in outlines.items()
if k not in ['patch']}
for key, (x_coord, y_coord) in outlines_.items():
if 'mask' in key or key in ('clip_radius', 'clip_origin'):
continue
ax.plot(x_coord, y_coord, color=rcParams['axes.edgecolor'],
linewidth=1, clip_on=False)
return outlines_
def _get_extra_points(pos, extrapolate, origin, radii):
"""Get coordinates of additinal interpolation points."""
from scipy.spatial.qhull import Delaunay
radii = np.array(radii, float)
assert radii.shape == (2,)
x, y = origin
# auto should be gone by now
_check_option('extrapolate', extrapolate, ('head', 'box', 'local'))
# the old method of placement - large box
mask_pos = None
if extrapolate == 'box':
extremes = np.array([pos.min(axis=0), pos.max(axis=0)])
diffs = extremes[1] - extremes[0]
extremes[0] -= diffs
extremes[1] += diffs
eidx = np.array(list(itertools.product(
*([[0] * (pos.shape[1] - 1) + [1]] * pos.shape[1]))))
pidx = np.tile(np.arange(pos.shape[1])[np.newaxis], (len(eidx), 1))
outer_pts = extremes[eidx, pidx]
return outer_pts, mask_pos, Delaunay(np.concatenate((pos, outer_pts)))
# check if positions are colinear:
diffs = np.diff(pos, axis=0)
with np.errstate(divide='ignore'):
slopes = diffs[:, 1] / diffs[:, 0]
colinear = ((slopes == slopes[0]).all() or np.isinf(slopes).all())
# compute median inter-electrode distance
if colinear or pos.shape[0] < 4:
dim = 1 if diffs[:, 1].sum() > diffs[:, 0].sum() else 0
sorting = np.argsort(pos[:, dim])
pos_sorted = pos[sorting, :]
diffs = np.diff(pos_sorted, axis=0)
distances = np.linalg.norm(diffs, axis=1)
distance = np.median(distances)
else:
tri = Delaunay(pos, incremental=True)
idx1, idx2, idx3 = tri.simplices.T
distances = np.concatenate(
[np.linalg.norm(pos[i1, :] - pos[i2, :], axis=1)
for i1, i2 in zip([idx1, idx2], [idx2, idx3])])
distance = np.median(distances)
if extrapolate == 'local':
if colinear or pos.shape[0] < 4:
# special case for colinear points and when there is too
# little points for Delaunay (needs at least 3)
edge_points = sorting[[0, -1]]
line_len = np.diff(pos[edge_points, :], axis=0)
unit_vec = line_len / np.linalg.norm(line_len) * distance
unit_vec_par = unit_vec[:, ::-1] * [[-1, 1]]
edge_pos = (pos[edge_points, :] +
np.concatenate([-unit_vec, unit_vec], axis=0))
new_pos = np.concatenate([pos + unit_vec_par,
pos - unit_vec_par, edge_pos], axis=0)
if pos.shape[0] == 3:
# there may be some new_pos points that are too close
# to the original points
new_pos_diff = pos[..., np.newaxis] - new_pos.T[np.newaxis, :]
new_pos_diff = np.linalg.norm(new_pos_diff, axis=1)
good_extra = (new_pos_diff > 0.5 * distance).all(axis=0)
new_pos = new_pos[good_extra]
tri = Delaunay(np.concatenate([pos, new_pos], axis=0))
return new_pos, new_pos, tri
# get the convex hull of data points from triangulation
hull_pos = pos[tri.convex_hull]
# extend the convex hull limits outwards a bit
channels_center = pos.mean(axis=0)
radial_dir = hull_pos - channels_center
unit_radial_dir = radial_dir / np.linalg.norm(radial_dir, axis=-1,
keepdims=True)
hull_extended = hull_pos + unit_radial_dir * distance
mask_pos = hull_pos + unit_radial_dir * distance * 0.5
hull_diff = np.diff(hull_pos, axis=1)[:, 0]
hull_distances = np.linalg.norm(hull_diff, axis=-1)
del channels_center
# Construct a mask
mask_pos = np.unique(mask_pos.reshape(-1, 2), axis=0)
mask_center = np.mean(mask_pos, axis=0)
mask_pos -= mask_center
mask_pos = mask_pos[
np.argsort(np.arctan2(mask_pos[:, 1], mask_pos[:, 0]))]
mask_pos += mask_center
# add points along hull edges so that the distance between points
# is around that of average distance between channels
add_points = list()
eps = np.finfo('float').eps
n_times_dist = np.round(0.25 * hull_distances / distance).astype('int')
for n in range(2, n_times_dist.max() + 1):
mask = n_times_dist == n
mult = np.arange(1 / n, 1 - eps, 1 / n)[:, np.newaxis, np.newaxis]
steps = hull_diff[mask][np.newaxis, ...] * mult
add_points.append((hull_extended[mask, 0][np.newaxis, ...] +
steps).reshape((-1, 2)))
# remove duplicates from hull_extended
hull_extended = np.unique(hull_extended.reshape((-1, 2)), axis=0)
new_pos = np.concatenate([hull_extended] + add_points)
else:
assert extrapolate == 'head'
# return points on the head circle
angle = np.arcsin(distance / np.mean(radii))
n_pnts = max(12, int(np.round(2 * np.pi / angle)))
points_l = np.linspace(0, 2 * np.pi, n_pnts, endpoint=False)
use_radii = radii * 1.1 + distance
points_x = np.cos(points_l) * use_radii[0] + x
points_y = np.sin(points_l) * use_radii[1] + y
new_pos = np.stack([points_x, points_y], axis=1)
if colinear or pos.shape[0] == 3:
tri = Delaunay(np.concatenate([pos, new_pos], axis=0))
return new_pos, mask_pos, tri
tri.add_points(new_pos)
return new_pos, mask_pos, tri
class _GridData(object):
"""Unstructured (x,y) data interpolator.
This class allows optimized interpolation by computing parameters
for a fixed set of true points, and allowing the values at those points
to be set independently.
"""
def __init__(self, pos, extrapolate, origin, radii, border):
# in principle this works in N dimensions, not just 2
assert pos.ndim == 2 and pos.shape[1] == 2, pos.shape
_validate_type(border, ('numeric', str), 'border')
# check that border, if string, is correct
if isinstance(border, str):
_check_option('border', border, ('mean',), extra='when a string')
# Adding points outside the extremes helps the interpolators
outer_pts, mask_pts, tri = _get_extra_points(
pos, extrapolate, origin, radii)
self.n_extra = outer_pts.shape[0]
self.mask_pts = mask_pts
self.border = border
self.tri = tri
def set_values(self, v):
"""Set the values at interpolation points."""
# Rbf with thin-plate is what we used to use, but it's slower and
# looks about the same:
#
# zi = Rbf(x, y, v, function='multiquadric', smooth=0)(xi, yi)
#
# Eventually we could also do set_values with this class if we want,
# see scipy/interpolate/rbf.py, especially the self.nodes one-liner.
from scipy.interpolate import CloughTocher2DInterpolator
if isinstance(self.border, str):
# we've already checked that border = 'mean'
n_points = v.shape[0]
v_extra = np.zeros(self.n_extra)
indices, indptr = self.tri.vertex_neighbor_vertices
rng = range(n_points, n_points + self.n_extra)
used = np.zeros(len(rng), bool)
for idx, extra_idx in enumerate(rng):
ngb = indptr[indices[extra_idx]:indices[extra_idx + 1]]
ngb = ngb[ngb < n_points]
if len(ngb) > 0:
used[idx] = True
v_extra[idx] = v[ngb].mean()
if not used.all() and used.any():
# Eventually we might want to use the value of the nearest
# point or something, but this case should hopefully be
# rare so for now just use the average value of all extras
v_extra[~used] = np.mean(v_extra[used])
else:
v_extra = np.full(self.n_extra, self.border, dtype=float)
v = np.concatenate((v, v_extra))
self.interpolator = CloughTocher2DInterpolator(self.tri, v)
return self
def set_locations(self, Xi, Yi):
"""Set locations for easier (delayed) calling."""
self.Xi = Xi
self.Yi = Yi
return self
def __call__(self, *args):
"""Evaluate the interpolator."""
if len(args) == 0:
args = [self.Xi, self.Yi]
return self.interpolator(*args)
def _topomap_plot_sensors(pos_x, pos_y, sensors, ax):
"""Plot sensors."""
if sensors is True:
ax.scatter(pos_x, pos_y, s=0.25, marker='o',
edgecolor=['k'] * len(pos_x), facecolor='none')
else:
ax.plot(pos_x, pos_y, sensors)
def _get_pos_outlines(info, picks, sphere, to_sphere=True):
ch_type = _get_ch_type(pick_info(_simplify_info(info), picks), None)
orig_sphere = sphere
sphere, clip_origin = _adjust_meg_sphere(sphere, info, ch_type)
logger.debug('Generating pos outlines with sphere '
f'{sphere} from {orig_sphere} for {ch_type}')
pos = _find_topomap_coords(
info, picks, ignore_overlap=True, to_sphere=to_sphere,
sphere=sphere)
outlines = _make_head_outlines(sphere, pos, 'head', clip_origin)
return pos, outlines
@fill_doc
def plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
res=64, axes=None, names=None, show_names=False, mask=None,
mask_params=None, outlines='head',
contours=6, image_interp='bilinear', show=True,
onselect=None, extrapolate=_EXTRAPOLATE_DEFAULT,
sphere=None, border=_BORDER_DEFAULT,
ch_type='eeg', cnorm=None):
"""Plot a topographic map as image.
Parameters
----------
data : array, shape (n_chan,)
The data values to plot.
pos : array, shape (n_chan, 2) | instance of Info
Location information for the data points(/channels).
If an array, for each data point, the x and y coordinates.
If an Info object, it must contain only one data type and
exactly ``len(data)`` data channels, and the x/y coordinates will
be inferred from this Info object.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap to use. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True (default), circles
will be used.
res : int
The resolution of the topomap image (n pixels along each side).
axes : instance of Axes | None
The axes to plot to. If None, the current axes will be used.
names : list | None
List of channel names. If None, channel names are not plotted.
%(topomap_show_names)s
If ``True``, a list of names must be provided (see ``names`` keyword).
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to ``True`` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
If an array, the values represent the levels for the contours. The
values are in µV for EEG, fT for magnetometers and fT/m for
gradiometers. Defaults to 6.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
show : bool
Show figure if True.
onselect : callable | None
Handle for a function that is called when the user selects a set of
channels by rectangle selection (matplotlib ``RectangleSelector``). If
None interactive selection is disabled. Defaults to None.
%(topomap_extrapolate)s
.. versionadded:: 0.18
%(topomap_sphere)s
%(topomap_border)s
%(topomap_ch_type)s
..versionadded:: 0.24.0
cnorm : matplotlib.colors.Normalize | None
Colormap normalization, default None means linear normalization. If not
None, ``vmin`` and ``vmax`` arguments are ignored. See Notes for more
details.
.. versionadded:: 0.24
Returns
-------
im : matplotlib.image.AxesImage
The interpolated data.
cn : matplotlib.contour.ContourSet
The fieldlines.
Notes
-----
The ``cnorm`` parameter can be used to implement custom colormap
normalization. By default, a linear mapping from vmin to vmax is used,
which correspond to the first and last colors in the colormap. This might
be undesired when vmin and vmax are not symmetrical around zero (or a value
that can be interpreted as some midpoint). For example, assume we want to
use the RdBu colormap (red to white to blue) for values ranging from -1 to
3, and 0 should be white. However, white corresponds to the midpoint in the
data by default, i.e. 1. Therefore, we use the following colormap
normalization ``cnorm`` and pass it as the the ``cnorm`` argument:
from matplotlib.colors import TwoSlopeNorm
cnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=3)
Note that because we define ``vmin`` and ``vmax`` in the normalization,
arguments ``vmin`` and ``vmax`` to ``plot_topomap`` will be ignored if a
normalization is provided. See the
:doc:`matplotlib docs <matplotlib:tutorials/colors/colormapnorms>`
for more details on colormap normalization.
"""
sphere = _check_sphere(sphere)
if check_version("matplotlib", "3.2.0"):
from matplotlib.colors import TwoSlopeNorm
else:
from matplotlib.colors import DivergingNorm as TwoSlopeNorm
_validate_type(cnorm, (TwoSlopeNorm, None), 'cnorm')
if cnorm is not None:
if vmin is not None:
warn(f"vmin={cnorm.vmin} is implicitly defined by cnorm, ignoring "
f"vmin={vmin}.")
if vmax is not None:
warn(f"vmax={cnorm.vmax} is implicitly defined by cnorm, ignoring "
f"vmax={vmax}.")
return _plot_topomap(data, pos, vmin, vmax, cmap, sensors, res, axes,
names, show_names, mask, mask_params, outlines,
contours, image_interp, show,
onselect, extrapolate, sphere=sphere, border=border,
ch_type=ch_type, cnorm=cnorm)[:2]
def _setup_interp(pos, res, extrapolate, sphere, outlines, border):
logger.debug(f'Interpolation mode {extrapolate} to {border}')
xlim = np.inf, -np.inf,
ylim = np.inf, -np.inf,
mask_ = np.c_[outlines['mask_pos']]
clip_radius = outlines['clip_radius']
clip_origin = outlines.get('clip_origin', (0., 0.))
xmin, xmax = (np.min(np.r_[xlim[0],
mask_[:, 0],
clip_origin[0] - clip_radius[0]]),
np.max(np.r_[xlim[1],
mask_[:, 0],
clip_origin[0] + clip_radius[0]]))
ymin, ymax = (np.min(np.r_[ylim[0],
mask_[:, 1],
clip_origin[1] - clip_radius[1]]),
np.max(np.r_[ylim[1],
mask_[:, 1],
clip_origin[1] + clip_radius[1]]))
xi = np.linspace(xmin, xmax, res)
yi = np.linspace(ymin, ymax, res)
Xi, Yi = np.meshgrid(xi, yi)
interp = _GridData(pos, extrapolate, clip_origin, clip_radius, border)
extent = (xmin, xmax, ymin, ymax)
return extent, Xi, Yi, interp
def _get_patch(outlines, extrapolate, interp, ax):
from matplotlib import patches
clip_radius = outlines['clip_radius']
clip_origin = outlines.get('clip_origin', (0., 0.))
_use_default_outlines = any(k.startswith('head') for k in outlines)
patch_ = None
if 'patch' in outlines:
patch_ = outlines['patch']
patch_ = patch_() if callable(patch_) else patch_
patch_.set_clip_on(False)
ax.add_patch(patch_)
ax.set_transform(ax.transAxes)
ax.set_clip_path(patch_)
if _use_default_outlines:
if extrapolate == 'local':
patch_ = patches.Polygon(
interp.mask_pts, clip_on=True, transform=ax.transData)
else:
patch_ = patches.Ellipse(
clip_origin, 2 * clip_radius[0], 2 * clip_radius[1],
clip_on=True, transform=ax.transData)
return patch_
def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
res=64, axes=None, names=None, show_names=False, mask=None,
mask_params=None, outlines='head',
contours=6, image_interp='bilinear', show=True,
onselect=None, extrapolate=_EXTRAPOLATE_DEFAULT, sphere=None,
border=_BORDER_DEFAULT, ch_type='eeg', cnorm=None):
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
data = np.asarray(data)
logger.debug(f'Plotting topomap for {ch_type} data shape {data.shape}')
if isinstance(pos, Info): # infer pos from Info object
picks = _pick_data_channels(pos, exclude=()) # pick only data channels
pos = pick_info(pos, picks)
# check if there is only 1 channel type, and n_chans matches the data
ch_type = _get_channel_types(pos, unique=True)
info_help = ("Pick Info with e.g. mne.pick_info and "
"mne.io.pick.channel_indices_by_type.")
if len(ch_type) > 1:
raise ValueError("Multiple channel types in Info structure. " +
info_help)
elif len(pos["chs"]) != data.shape[0]:
raise ValueError("Number of channels in the Info object (%s) and "
"the data array (%s) do not match. "
% (len(pos['chs']), data.shape[0]) + info_help)
else:
ch_type = ch_type.pop()
if any(type_ in ch_type for type_ in ('planar', 'grad')):
# deal with grad pairs
picks = _pair_grad_sensors(pos, topomap_coords=False)
pos = _find_topomap_coords(pos, picks=picks[::2], sphere=sphere)
data, _ = _merge_ch_data(data[picks], ch_type, [])
data = data.reshape(-1)
else:
picks = list(range(data.shape[0]))
pos = _find_topomap_coords(pos, picks=picks, sphere=sphere)
extrapolate = _check_extrapolate(extrapolate, ch_type)
if data.ndim > 1:
raise ValueError("Data needs to be array of shape (n_sensors,); got "
"shape %s." % str(data.shape))
# Give a helpful error message for common mistakes regarding the position
# matrix.
pos_help = ("Electrode positions should be specified as a 2D array with "
"shape (n_channels, 2). Each row in this matrix contains the "
"(x, y) position of an electrode.")
if pos.ndim != 2:
error = ("{ndim}D array supplied as electrode positions, where a 2D "
"array was expected").format(ndim=pos.ndim)
raise ValueError(error + " " + pos_help)
elif pos.shape[1] == 3:
error = ("The supplied electrode positions matrix contains 3 columns. "
"Are you trying to specify XYZ coordinates? Perhaps the "
"mne.channels.create_eeg_layout function is useful for you.")
raise ValueError(error + " " + pos_help)
# No error is raised in case of pos.shape[1] == 4. In this case, it is
# assumed the position matrix contains both (x, y) and (width, height)
# values, such as Layout.pos.
elif pos.shape[1] == 1 or pos.shape[1] > 4:
raise ValueError(pos_help)
pos = pos[:, :2]
if len(data) != len(pos):
raise ValueError("Data and pos need to be of same length. Got data of "
"length %s, pos of length %s" % (len(data), len(pos)))
norm = min(data) >= 0
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
outlines = _make_head_outlines(sphere, pos, outlines, (0., 0.))
assert isinstance(outlines, dict)
ax = axes if axes else plt.gca()
_prepare_topomap(pos, ax)
mask_params = _handle_default('mask_params', mask_params)
# find mask limits
extent, Xi, Yi, interp = _setup_interp(
pos, res, extrapolate, sphere, outlines, border)
interp.set_values(data)
Zi = interp.set_locations(Xi, Yi)()
# plot outline
patch_ = _get_patch(outlines, extrapolate, interp, ax)
# plot interpolated map
if cnorm is None:
cnorm = Normalize(vmin=vmin, vmax=vmax)
im = ax.imshow(Zi, cmap=cmap, origin='lower', aspect='equal',
extent=extent, interpolation=image_interp, norm=cnorm)
# gh-1432 had a workaround for no contours here, but we'll remove it
# because mpl has probably fixed it
linewidth = mask_params['markeredgewidth']
cont = True
if isinstance(contours, (np.ndarray, list)):
pass
elif contours == 0 or ((Zi == Zi[0, 0]) | np.isnan(Zi)).all():
cont = None # can't make contours for constant-valued functions
if cont:
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
linewidths=linewidth / 2.)
if patch_ is not None:
im.set_clip_path(patch_)
if cont is not None:
for col in cont.collections:
col.set_clip_path(patch_)
pos_x, pos_y = pos.T
if sensors is not False and mask is None:
_topomap_plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
elif sensors and mask is not None:
idx = np.where(mask)[0]
ax.plot(pos_x[idx], pos_y[idx], **mask_params)
idx = np.where(~mask)[0]
_topomap_plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
elif not sensors and mask is not None:
idx = np.where(mask)[0]
ax.plot(pos_x[idx], pos_y[idx], **mask_params)
if isinstance(outlines, dict):
_draw_outlines(ax, outlines)
if show_names:
if names is None:
raise ValueError("To show names, a list of names must be provided"
" (see `names` keyword).")
if show_names is True:
def _show_names(x):
return x
else:
_show_names = show_names
show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
for ii, (p, ch_id) in enumerate(zip(pos, names)):
if ii not in show_idx:
continue
ch_id = _show_names(ch_id)
ax.text(p[0], p[1], ch_id, horizontalalignment='center',
verticalalignment='center', size='x-small')
plt.subplots_adjust(top=.95)
if onselect is not None:
lim = ax.dataLim
x0, y0, width, height = lim.x0, lim.y0, lim.width, lim.height
ax.RS = RectangleSelector(ax, onselect=onselect)
ax.set(xlim=[x0, x0 + width], ylim=[y0, y0 + height])
plt_show(show)
return im, cont, interp
def _plot_ica_topomap(ica, idx=0, ch_type=None, res=64,
vmin=None, vmax=None, cmap='RdBu_r', colorbar=False,
title=None, show=True, outlines='head', contours=6,
image_interp='bilinear', axes=None,
sensors=True, allow_ref_meg=False,
extrapolate=_EXTRAPOLATE_DEFAULT,
sphere=None, border=_BORDER_DEFAULT):
"""Plot single ica map to axes."""
from matplotlib.axes import Axes
if ica.info is None:
raise RuntimeError('The ICA\'s measurement info is missing. Please '
'fit the ICA or add the corresponding info object.')
sphere = _check_sphere(sphere, ica.info)
if not isinstance(axes, Axes):
raise ValueError('axis has to be an instance of matplotlib Axes, '
'got %s instead.' % type(axes))
ch_type = _get_ch_type(ica, ch_type, allow_ref_meg=ica.allow_ref_meg)
if ch_type == "ref_meg":
logger.info("Cannot produce topographies for MEG reference channels.")
return
data = ica.get_components()[:, idx]
data_picks, pos, merge_channels, names, _, sphere, clip_origin = \
_prepare_topomap_plot(ica, ch_type, sphere=sphere)
data = data[data_picks]
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
if merge_channels:
data, names = _merge_ch_data(data, ch_type, names)
axes.set_title(ica._ica_names[idx], fontsize=12)
vmin_, vmax_ = _setup_vmin_vmax(data, vmin, vmax)
im = plot_topomap(
data.ravel(), pos, vmin=vmin_, vmax=vmax_, res=res, axes=axes,
cmap=cmap, outlines=outlines, contours=contours, sensors=sensors,
image_interp=image_interp, show=show, extrapolate=extrapolate,
sphere=sphere, border=border, ch_type=ch_type)[0]
if colorbar:
cbar, cax = _add_colorbar(axes, im, cmap, pad=.05, title="AU",
format='%3.2f')
cbar.ax.tick_params(labelsize=12)
cbar.set_ticks((vmin_, vmax_))
_hide_frame(axes)
@verbose
def plot_ica_components(ica, picks=None, ch_type=None, res=64,
vmin=None, vmax=None, cmap='RdBu_r',
sensors=True, colorbar=False, title=None,
show=True, outlines='head', contours=6,
image_interp='bilinear',
inst=None, plot_std=True, topomap_args=None,
image_args=None, psd_args=None, reject='auto',
sphere=None, *, verbose=None):
"""Project mixing matrix on interpolated sensor topography.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
%(picks_all)s
If None all are plotted in batches of 20.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
res : int
The resolution of the topomap image (n pixels along each side).
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small amount
of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True (default),
circles will be used.
colorbar : bool
Plot a colorbar.
title : str | None
Title to use.
show : bool
Show figure if True.
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. Defaults to 6.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
inst : Raw | Epochs | None
To be able to see component properties after clicking on component
topomap you need to pass relevant data - instances of Raw or Epochs
(for example the data that ICA was trained on). This takes effect
only when running matplotlib in interactive mode.
plot_std : bool | float
Whether to plot standard deviation in ERP/ERF and spectrum plots.
Defaults to True, which plots one standard deviation above/below.
If set to float allows to control how many standard deviations are
plotted. For example 2.5 will plot 2.5 standard deviation above/below.
topomap_args : dict | None
Dictionary of arguments to ``plot_topomap``. If None, doesn't pass any
additional arguments. Defaults to None.
image_args : dict | None
Dictionary of arguments to ``plot_epochs_image``. If None, doesn't pass
any additional arguments. Defaults to None.
psd_args : dict | None
Dictionary of arguments to ``psd_multitaper``. If None, doesn't pass
any additional arguments. Defaults to None.
reject : 'auto' | dict | None
Allows to specify rejection parameters used to drop epochs
(or segments if continuous signal is passed as inst).
If None, no rejection is applied. The default is 'auto',
which applies the rejection parameters used when fitting
the ICA object.
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure or list
The figure object(s).
Notes
-----
When run in interactive mode, ``plot_ica_components`` allows to reject
components by clicking on their title label. The state of each component
is indicated by its label color (gray: rejected; black: retained). It is
also possible to open component properties by clicking on the component
topomap (this option is only available when the ``inst`` argument is
supplied).
"""
from ..io import BaseRaw
from ..epochs import BaseEpochs
if ica.info is None:
raise RuntimeError('The ICA\'s measurement info is missing. Please '
'fit the ICA or add the corresponding info object.')
topomap_args = dict() if topomap_args is None else topomap_args
topomap_args = copy.copy(topomap_args)
if 'sphere' not in topomap_args:
topomap_args['sphere'] = sphere
if picks is None: # plot components by sets of 20
ch_type = _get_ch_type(ica, ch_type)
n_components = ica.mixing_matrix_.shape[1]
p = 20
figs = []
for k in range(0, n_components, p):
picks = range(k, min(k + p, n_components))
fig = plot_ica_components(
ica, picks=picks, ch_type=ch_type, res=res, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar, title=title,
show=show, outlines=outlines, contours=contours,
image_interp=image_interp, inst=inst, plot_std=plot_std,
topomap_args=topomap_args, image_args=image_args,
psd_args=psd_args, reject=reject, sphere=sphere)
figs.append(fig)
return figs
else:
picks = _picks_to_idx(ica.info, picks)
ch_type = _get_ch_type(ica, ch_type)
cmap = _setup_cmap(cmap, n_axes=len(picks))
data = np.dot(ica.mixing_matrix_[:, picks].T,
ica.pca_components_[:ica.n_components_])
data_picks, pos, merge_channels, names, ch_type, sphere, clip_origin = \
_prepare_topomap_plot(ica, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes, _, _ = _prepare_trellis(len(data), ncols=5)
if title is None:
title = 'ICA components'
fig.suptitle(title)
titles = list()
for ii, data_, ax in zip(picks, data, axes):
kwargs = dict(color='gray') if ii in ica.exclude else dict()
titles.append(ax.set_title(ica._ica_names[ii], fontsize=12, **kwargs))
if merge_channels:
data_, names_ = _merge_ch_data(data_, ch_type, names.copy())
vmin_, vmax_ = _setup_vmin_vmax(data_, vmin, vmax)
im = plot_topomap(
data_.flatten(), pos, vmin=vmin_, vmax=vmax_, res=res, axes=ax,
cmap=cmap[0], outlines=outlines, contours=contours,
image_interp=image_interp, show=False, sensors=sensors,
ch_type=ch_type, **topomap_args)[0]
im.axes.set_label(ica._ica_names[ii])
if colorbar:
cbar, cax = _add_colorbar(ax, im, cmap, title="AU",
side="right", pad=.05, format='%3.2f')
cbar.ax.tick_params(labelsize=12)
cbar.set_ticks((vmin_, vmax_))
_hide_frame(ax)
del pos
tight_layout(fig=fig)
fig.subplots_adjust(top=0.88, bottom=0.)
fig.canvas.draw()
# add title selection interactivity
def onclick_title(event, ica=ica, titles=titles):
# check if any title was pressed
title_pressed = None
for title in titles:
if title.contains(event)[0]:
title_pressed = title
break
# title was pressed -> identify the IC
if title_pressed is not None:
label = title_pressed.get_text()
ic = int(label[-3:])
# add or remove IC from exclude depending on current state
if ic in ica.exclude:
ica.exclude.remove(ic)
title_pressed.set_color('k')
else:
ica.exclude.append(ic)
title_pressed.set_color('gray')
fig.canvas.draw()
fig.canvas.mpl_connect('button_press_event', onclick_title)
# add plot_properties interactivity only if inst was passed
if isinstance(inst, (BaseRaw, BaseEpochs)):
def onclick_topo(event, ica=ica, inst=inst):
# check which component to plot
if event.inaxes is not None:
label = event.inaxes.get_label()
if label.startswith('ICA'):
ic = int(label[-3:])
ica.plot_properties(inst, picks=ic, show=True,
plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args,
psd_args=psd_args, reject=reject)
fig.canvas.mpl_connect('button_press_event', onclick_topo)
plt_show(show)
return fig
@fill_doc
def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head',
contours=6, sphere=None):
"""Plot topographic maps of specific time-frequency intervals of TFR data.
Parameters
----------
tfr : AverageTFR
The AverageTFR object.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point available
is used.
fmin : None | float
The first frequency to display. If None the first frequency available
is used.
fmax : None | float
The last frequency to display. If None the last frequency available is
used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the mean for each pair is plotted. If None, then channels are
chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction. If None do
not apply it. If baseline is (a, b) the interval is between "a (s)" and
"b (s)". If a is None the beginning of the data is used and if b is
None then b is set to the end of the interval. If baseline is equal to
(None, None) the whole time interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio' | None
Perform baseline correction by
- subtracting the mean baseline power ('mean')
- dividing by the mean baseline power ('ratio')
- dividing by the mean baseline power and taking the log ('logratio')
- subtracting the mean baseline power followed by dividing by the
mean baseline power ('percent')
- subtracting the mean baseline power and dividing by the standard
deviation of the baseline power ('zscore')
- dividing by the mean baseline power, taking the log, and dividing
by the standard deviation of the baseline power ('zlogratio')
If None no baseline correction is applied.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output equals
vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None, the
maximum value is used. If callable, the output equals vmax(data).
Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+'). If True (default), circles will be used.
colorbar : bool
Plot a colorbar.
unit : str | None
The unit of the channel type used for colorbar labels.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches (only applies when plotting multiple
topomaps at a time).
cbar_fmt : str
String format for colorbar values.
%(topomap_show_names)s
title : str | None
Plot title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Show figure if True.
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. If colorbar=True, the ticks in colorbar correspond to the
contour levels. Defaults to 6.
%(topomap_sphere_auto)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
import matplotlib.pyplot as plt
ch_type = _get_ch_type(tfr, ch_type)
picks, pos, merge_channels, names, _, sphere, clip_origin = \
_prepare_topomap_plot(tfr, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
if not show_names:
names = None
data = tfr.data[picks, :, :]
# merging grads before rescaling makes ERDs visible
if merge_channels:
data, names = _merge_ch_data(data, ch_type, names, method='mean')
data = rescale(data, tfr.times, baseline, mode, copy=True)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(tfr.times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(tfr.freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
data = data[:, ifmin:ifmax, itmin:itmax]
data = np.mean(np.mean(data, axis=2), axis=1)[:, np.newaxis]
norm = False if np.min(data) < 0 else True
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
cmap = _setup_cmap(cmap, norm=norm)
axes = plt.subplots(figsize=(size, size))[1] if axes is None else axes
fig = axes.figure
_hide_frame(axes)
locator = None
if not isinstance(contours, (list, np.ndarray)):
locator, contours = _set_contour_locator(vmin, vmax, contours)
if title is not None:
axes.set_title(title)
fig_wrapper = list()
selection_callback = partial(_onselect, tfr=tfr, pos=pos, ch_type=ch_type,
itmin=itmin, itmax=itmax, ifmin=ifmin,
ifmax=ifmax, cmap=cmap[0], fig=fig_wrapper)
if not isinstance(contours, (list, np.ndarray)):
_, contours = _set_contour_locator(vmin, vmax, contours)
im, _ = plot_topomap(data[:, 0], pos, vmin=vmin, vmax=vmax,
axes=axes, cmap=cmap[0], image_interp='bilinear',
contours=contours, names=names, show_names=show_names,
show=False, onselect=selection_callback,
sensors=sensors, res=res, ch_type=ch_type,
outlines=outlines, sphere=sphere)
if colorbar:
from matplotlib import ticker
unit = _handle_default('units', unit)['misc']
cbar, cax = _add_colorbar(axes, im, cmap, title=unit, format=cbar_fmt)
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
cbar.ax.tick_params(labelsize=12)
plt_show(show)
return fig
@fill_doc
def plot_evoked_topomap(evoked, times="auto", ch_type=None,
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, scalings=None,
units=None, res=64, size=1, cbar_fmt='%3.1f',
time_unit='s', time_format=None, proj=False,
show=True, show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None,
axes=None, extrapolate=_EXTRAPOLATE_DEFAULT,
sphere=None, border=_BORDER_DEFAULT,
nrows=1, ncols='auto'):
"""Plot topographic maps of specific time points of evoked data.
Parameters
----------
evoked : Evoked
The Evoked object.
times : float | array of float | "auto" | "peaks" | "interactive"
The time point(s) to plot. If "auto", the number of ``axes`` determines
the amount of time point(s). If ``axes`` is also None, at most 10
topographies will be shown with a regular time spacing between the
first and last time instant. If "peaks", finds time points
automatically by checking for local maxima in global field power. If
"interactive", the time can be set interactively at run-time by using a
slider.
%(topomap_ch_type)s
%(topomap_vmin_vmax)s
%(topomap_cmap)s
%(topomap_sensors)s
%(topomap_colorbar)s
%(topomap_scalings)s
%(topomap_units)s
%(topomap_res)s
%(topomap_size)s
%(topomap_cbar_fmt)s
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
time_format : str | None
String format for topomap values. Defaults (None) to "%%01d ms" if
``time_unit='ms'``, "%%0.3f s" if ``time_unit='s'``, and
"%%g" otherwise. Can be an empty string to omit the time label.
%(plot_proj)s
%(show)s
%(topomap_show_names)s
%(title_None)s
%(topomap_mask)s
%(topomap_mask_params)s
%(topomap_outlines)s
%(topomap_contours)s
%(topomap_image_interp)s
%(topomap_average)s
%(topomap_axes)s
%(topomap_extrapolate)s
.. versionadded:: 0.18
%(topomap_sphere_auto)s
%(topomap_border)s
nrows : int | 'auto'
The number of rows of topographies to plot. Defaults to 1. If 'auto',
obtains the number of rows depending on the amount of times to plot
and the number of cols. Not valid when times == 'interactive'.
.. versionadded:: 0.20
ncols : int | 'auto'
The number of columns of topographies to plot. If 'auto' (default),
obtains the number of columns depending on the amount of times to plot
and the number of rows. Not valid when times == 'interactive'.
.. versionadded:: 0.20
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
Notes
-----
When existing ``axes`` are provided and ``colorbar=True``, note that the
colorbar scale will only accurately reflect topomaps that are generated in
the same call as the colorbar. Note also that the colorbar will not be
resized automatically when ``axes`` are provided; use matplotlib's
:meth:`axes.set_position() <matplotlib.axes.Axes.set_position>` method or
:doc:`gridspec <matplotlib:tutorials/intermediate/gridspec>` interface to
adjust the colorbar size yourself.
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.widgets import Slider
from ..evoked import Evoked
_validate_type(evoked, Evoked, 'evoked')
_validate_type(colorbar, bool, 'colorbar')
evoked = evoked.copy() # make a copy, since we'll be picking
ch_type = _get_ch_type(evoked, ch_type)
# time units / formatting
time_unit, _ = _check_time_unit(time_unit, evoked.times)
scaling_time = 1. if time_unit == 's' else 1e3
_validate_type(time_format, (None, str), 'time_format')
if time_format is None:
time_format = '%0.3f s' if time_unit == 's' else '%01d ms'
del time_unit
# mask_params defaults
mask_params = _handle_default('mask_params', mask_params)
mask_params['markersize'] *= size / 2.
mask_params['markeredgewidth'] *= size / 2.
# setup various parameters, and prepare outlines
picks, pos, merge_channels, names, ch_type, sphere, clip_origin = \
_prepare_topomap_plot(evoked, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
# check interactive
axes_given = axes is not None
interactive = isinstance(times, str) and times == 'interactive'
if interactive and axes_given:
raise ValueError("User-provided axes not allowed when "
"times='interactive'.")
# units, scalings
key = 'grad' if ch_type.startswith('planar') else ch_type
scaling = _handle_default('scalings', scalings)[key]
unit = _handle_default('units', units)[key]
# ch_names (required for NIRS)
ch_names = names
if not show_names:
names = None
# apply projections before picking. NOTE: the `if proj is True`
# anti-pattern is needed here to exclude proj='interactive'
_check_option('proj', proj, (True, False, 'interactive', 'reconstruct'))
if proj is True and not evoked.proj:
evoked.apply_proj()
elif proj == 'reconstruct':
evoked._reconstruct_proj()
data = evoked.data
# remove compensation matrices (safe: only plotting & already made copy)
evoked.info['comps'] = []
evoked = evoked._pick_drop_channels(picks)
# determine which times to plot
if isinstance(axes, plt.Axes):
axes = [axes]
n_peaks = len(axes) - int(colorbar) if axes_given else None
times = _process_times(evoked, times, n_peaks)
n_times = len(times)
space = 1 / (2. * evoked.info['sfreq'])
if (max(times) > max(evoked.times) + space or
min(times) < min(evoked.times) - space):
raise ValueError(f'Times should be between {evoked.times[0]:0.3} and '
f'{evoked.times[-1]:0.3}.')
# create axes
want_axes = n_times + int(colorbar)
if interactive:
height_ratios = [5, 1]
nrows = 2
ncols = want_axes
width = size * ncols
height = size + max(0, 0.1 * (4 - size)) + bool(title) * 0.5
fig = figure_nobar(figsize=(width * 1.5, height * 1.5))
g_kwargs = {'left': 0.2, 'right': 0.8, 'bottom': 0.05, 'top': 0.9}
gs = GridSpec(nrows, ncols, height_ratios=height_ratios, **g_kwargs)
axes = []
for ax_idx in range(n_times):
axes.append(plt.subplot(gs[0, ax_idx]))
elif axes is None:
fig, axes, ncols, nrows = _prepare_trellis(
n_times, ncols=ncols, nrows=nrows, title=title,
colorbar=colorbar, size=size)
else:
nrows, ncols = None, None # Deactivate ncols when axes were passed
fig = axes[0].get_figure()
# check: enough space for colorbar?
if len(axes) != want_axes:
cbar_err = ' plus one for the colorbar' if colorbar else ''
raise RuntimeError(f'You must provide {want_axes} axes (one for '
f'each time{cbar_err}), got {len(axes)}.')
# figure margins
side_margin = plt.rcParams['figure.subplot.wspace'] / (2 * want_axes)
top_margin = max((0.05 if title is None else 0.25), .2 / size)
fig.subplots_adjust(left=side_margin, right=1 - side_margin, bottom=0,
top=1 - top_margin)
# find first index that's >= (to rounding error) to each time point
time_idx = [np.where(_time_mask(evoked.times, tmin=t, tmax=None,
sfreq=evoked.info['sfreq']))[0][0]
for t in times]
# do averaging if requested
avg_err = '"average" must be `None` or a positive number of seconds'
if average is None:
data = data[np.ix_(picks, time_idx)]
elif not _is_numeric(average):
raise TypeError(f'{avg_err}; got type {type(average)}.')
elif average <= 0:
raise ValueError(f'{avg_err}; got {average}.')
else:
data_ = np.zeros((len(picks), len(time_idx)))
ave_time = average / 2.
iter_times = evoked.times[time_idx]
for ii, (idx, tmin_, tmax_) in enumerate(zip(time_idx,
iter_times - ave_time,
iter_times + ave_time)):
my_range = (tmin_ < evoked.times) & (evoked.times < tmax_)
data_[:, ii] = data[picks][:, my_range].mean(-1)
data = data_
# apply scalings and merge channels
data *= scaling
if merge_channels:
data, ch_names = _merge_ch_data(data, ch_type, ch_names)
if ch_type in _fnirs_types:
merge_channels = False
# apply mask if requested
if mask is not None:
if ch_type == 'grad':
mask_ = (mask[np.ix_(picks[::2], time_idx)] |
mask[np.ix_(picks[1::2], time_idx)])
else: # mag, eeg, planar1, planar2
mask_ = mask[np.ix_(picks, time_idx)]
# set up colormap
vlims = [_setup_vmin_vmax(data[:, i], vmin, vmax, norm=merge_channels)
for i in range(n_times)]
vmin = np.min(vlims)
vmax = np.max(vlims)
cmap = _setup_cmap(cmap, n_axes=n_times, norm=vmin >= 0)
# set up contours
if not isinstance(contours, (list, np.ndarray)):
_, contours = _set_contour_locator(vmin, vmax, contours)
# prepare for main loop over times
kwargs = dict(vmin=vmin, vmax=vmax, sensors=sensors, res=res, names=names,
show_names=show_names, cmap=cmap[0], mask_params=mask_params,
outlines=outlines, contours=contours,
image_interp=image_interp, show=False,
extrapolate=extrapolate, sphere=sphere, border=border,
ch_type=ch_type)
images, contours_ = [], []
# loop over times
for idx, time in enumerate(times):
adjust_for_cbar = colorbar and ncols is not None and idx >= ncols - 1
ax_idx = idx + 1 if adjust_for_cbar else idx
tp, cn, interp = _plot_topomap(
data[:, idx], pos, axes=axes[ax_idx],
mask=mask_[:, idx] if mask is not None else None, **kwargs)
images.append(tp)
if cn is not None:
contours_.append(cn)
if time_format != '':
axes[ax_idx].set_title(time_format % (time * scaling_time))
if interactive:
axes.append(plt.subplot(gs[1, :-1]))
slider = Slider(axes[-1], 'Time', evoked.times[0], evoked.times[-1],
times[0], valfmt='%1.2fs')
slider.vline.remove() # remove initial point indicator
func = _merge_ch_data if merge_channels else lambda x: x
changed_callback = partial(_slider_changed, ax=axes[0],
data=evoked.data, times=evoked.times,
pos=pos, scaling=scaling, func=func,
time_format=time_format,
scaling_time=scaling_time, kwargs=kwargs)
slider.on_changed(changed_callback)
ts = np.tile(evoked.times, len(evoked.data)).reshape(evoked.data.shape)
axes[-1].plot(ts, evoked.data, color='k')
axes[-1].slider = slider
if title is not None:
plt.suptitle(title, verticalalignment='top', size='x-large')
if colorbar:
if interactive:
cax = plt.subplot(gs[0, -1])
_resize_cbar(cax, ncols, size)
elif nrows is None or ncols is None:
# axes were given by the user, so don't resize the colorbar
cax = axes[-1]
else: # use the entire last column
cax = axes[ncols - 1]
_resize_cbar(cax, ncols, size)
if unit is not None:
cax.set_title(unit)
cbar = fig.colorbar(images[-1], ax=cax, cax=cax, format=cbar_fmt)
if cn is not None:
cbar.set_ticks(contours)
cbar.ax.tick_params(labelsize=7)
if cmap[1]:
for im in images:
im.axes.CB = DraggableColorbar(cbar, im)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(
evoked=evoked, fig=fig, projs=evoked.info['projs'], picks=picks,
images=images, contours_=contours_, pos=pos, time_idx=time_idx,
res=res, plot_update_proj_callback=_plot_update_evoked_topomap,
merge_channels=merge_channels, scale=scaling, axes=axes,
contours=contours, interp=interp, extrapolate=extrapolate)
_draw_proj_checkbox(None, params)
plt_show(show, block=False)
if axes_given:
fig.canvas.draw()
return fig
def _resize_cbar(cax, n_fig_axes, size=1):
"""Resize colorbar."""
cpos = cax.get_position()
if size <= 1:
cpos.x0 = 1 - (0.7 + 0.1 / size) / n_fig_axes
cpos.x1 = cpos.x0 + 0.1 / n_fig_axes
cpos.y0 = 0.2
cpos.y1 = 0.7
cax.set_position(cpos)
def _slider_changed(val, ax, data, times, pos, scaling, func, time_format,
scaling_time, kwargs):
"""Handle selection in interactive topomap."""
idx = np.argmin(np.abs(times - val))
data = func(data[:, idx]).ravel() * scaling
ax.clear()
im, _ = plot_topomap(data, pos, axes=ax, **kwargs)
if hasattr(ax, 'CB'):
ax.CB.mappable = im
_resize_cbar(ax.CB.cbar.ax, 2)
if time_format is not None:
ax.set_title(time_format % (val * scaling_time))
def _plot_topomap_multi_cbar(data, pos, ax, title=None, unit=None, vmin=None,
vmax=None, cmap=None, outlines='head',
colorbar=False, cbar_fmt='%3.3f',
sphere=None, ch_type='eeg'):
"""Plot topomap multi cbar."""
_hide_frame(ax)
vmin = np.min(data) if vmin is None else vmin
vmax = np.max(data) if vmax is None else vmax
# this definition of "norm" allows non-diverging colormap for cases where
# min & vmax are both negative (e.g., when they are power in dB)
signs = np.sign([vmin, vmax])
norm = len(set(signs)) == 1 or np.any(signs == 0)
cmap = _setup_cmap(cmap, norm=norm)
if title is not None:
ax.set_title(title, fontsize=10)
im, _ = plot_topomap(data, pos, vmin=vmin, vmax=vmax, axes=ax,
cmap=cmap[0], image_interp='bilinear', contours=0,
outlines=outlines, show=False, sphere=sphere,
ch_type=ch_type)
if colorbar:
cbar, cax = _add_colorbar(ax, im, cmap, pad=0.25, title=None,
size="10%", format=cbar_fmt)
cbar.set_ticks((vmin, vmax))
if unit is not None:
cbar.ax.set_ylabel(unit, fontsize=8)
cbar.ax.tick_params(labelsize=8)
@verbose
def plot_epochs_psd_topomap(epochs, bands=None,
tmin=None, tmax=None, proj=False,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', ch_type=None,
cmap=None, agg_fun=None, dB=False, n_jobs=1,
normalize=False, cbar_fmt='auto',
outlines='head', axes=None, show=True,
sphere=None, vlim=(None, None), verbose=None):
"""Plot the topomap of the power spectral density across epochs.
Parameters
----------
epochs : instance of Epochs
The epochs object.
%(psd_topo_bands)s
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the mean for each pair is plotted. If None, then first
available channel type from order given above is used. Defaults to
None.
%(psd_topo_cmap)s
%(psd_topo_agg_fun)s
%(psd_topo_dB)s
%(n_jobs)s
%(psd_topo_normalize)s
%(psd_topo_cbar_fmt)s
%(topomap_outlines)s
%(psd_topo_axes)s
show : bool
Show figure if True.
%(topomap_sphere_auto)s
%(psd_topo_vlim_joint)s
%(verbose)s
Returns
-------
fig : instance of Figure
Figure distributing one image per channel across sensor topography.
"""
ch_type = _get_ch_type(epochs, ch_type)
units = _handle_default('units', None)
unit = units[ch_type]
picks, pos, merge_channels, names, ch_type, sphere, clip_origin = \
_prepare_topomap_plot(epochs, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
psds, freqs = psd_multitaper(epochs, tmin=tmin, tmax=tmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias,
normalization=normalization, picks=picks,
proj=proj, n_jobs=n_jobs)
psds = np.mean(psds, axis=0)
if merge_channels:
psds, names = _merge_ch_data(psds, ch_type, names, method='mean')
return plot_psds_topomap(
psds=psds, freqs=freqs, pos=pos, agg_fun=agg_fun,
bands=bands, cmap=cmap, dB=dB, normalize=normalize,
cbar_fmt=cbar_fmt, outlines=outlines, axes=axes, show=show,
sphere=sphere, vlim=vlim, unit=unit, ch_type=ch_type)
@fill_doc
def plot_psds_topomap(
psds, freqs, pos, agg_fun=None, bands=None,
cmap=None, dB=True, normalize=False, cbar_fmt='%0.3f', outlines='head',
axes=None, show=True, sphere=None, vlim=(None, None), unit=None,
ch_type='eeg'):
"""Plot spatial maps of PSDs.
Parameters
----------
psds : np.ndarray of float, shape (n_channels, n_freqs)
Power spectral densities
freqs : np.ndarray of float, shape (n_freqs)
Frequencies used to compute psds.
pos : numpy.ndarray of float, shape (n_sensors, 2)
The positions of the sensors.
%(psd_topo_agg_fun)s
%(psd_topo_bands)s
%(psd_topo_cmap)s
%(psd_topo_dB)s
%(psd_topo_normalize)s
%(psd_topo_cbar_fmt)s
%(topomap_outlines)s
%(psd_topo_axes)s
show : bool
Show figure if True.
%(topomap_sphere)s
%(psd_topo_vlim_joint)s
unit : str | None
Measurement unit to be displayed with the colorbar. If ``None``, no
unit is displayed (only "power" or "dB" as appropriate).
%(topomap_ch_type)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure with a topomap subplot for each band.
"""
import matplotlib.pyplot as plt
sphere = _check_sphere(sphere)
if cbar_fmt == 'auto':
cbar_fmt = '%0.1f' if dB else '%0.3f'
if bands is None:
bands = [(0, 4, 'Delta (0-4 Hz)'), (4, 8, 'Theta (4-8 Hz)'),
(8, 12, 'Alpha (8-12 Hz)'), (12, 30, 'Beta (12-30 Hz)'),
(30, 45, 'Gamma (30-45 Hz)')]
else: # upconvert single freqs to band upper/lower edges as needed
bin_spacing = np.diff(freqs)[0]
bin_edges = np.array([0, bin_spacing]) - bin_spacing / 2
bands = [tuple(bin_edges + freqs[np.argmin(np.abs(freqs - band[0]))]) +
(band[1],) if len(band) == 2 else band for band in bands]
if agg_fun is None:
agg_fun = np.sum if normalize else np.mean
if normalize:
psds /= psds.sum(axis=-1, keepdims=True)
assert np.allclose(psds.sum(axis=-1), 1.)
n_axes = len(bands)
if axes is not None:
_validate_if_list_of_axes(axes, n_axes)
fig = axes[0].figure
else:
fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5))
if n_axes == 1:
axes = [axes]
# handle vmin/vmax
if vlim == 'joint':
_freq_masks = [(fmin < freqs) & (freqs < fmax)
for (fmin, fmax, _) in bands]
_datas = [agg_fun(psds[:, _freq_mask], axis=1)
for _freq_mask in _freq_masks]
_datas = [10 * np.log10(_d) if (dB and not normalize) else _d
for _d in _datas]
vmin = np.array(_datas).min()
vmax = np.array(_datas).max()
else:
vmin, vmax = vlim
if unit is None:
unit = 'dB' if dB and not normalize else 'power'
else:
if '/' in unit:
unit = '(%s)' % unit
unit += '²/Hz'
if dB and not normalize:
unit += ' (dB)'
for ax, (fmin, fmax, title) in zip(axes, bands):
freq_mask = (fmin < freqs) & (freqs < fmax)
if freq_mask.sum() == 0:
raise RuntimeError('No frequencies in band "%s" (%s, %s)'
% (title, fmin, fmax))
data = agg_fun(psds[:, freq_mask], axis=1)
if dB and not normalize:
data = 10 * np.log10(data)
_plot_topomap_multi_cbar(data, pos, ax, title=title, vmin=vmin,
vmax=vmax, cmap=cmap, outlines=outlines,
colorbar=True, unit=unit, cbar_fmt=cbar_fmt,
sphere=sphere, ch_type=ch_type)
tight_layout(fig=fig)
fig.canvas.draw()
plt_show(show)
return fig
@fill_doc
def plot_layout(layout, picks=None, show_axes=False, show=True):
"""Plot the sensor positions.
Parameters
----------
layout : None | Layout
Layout instance specifying sensor positions.
%(picks_nostr)s
show_axes : bool
Show layout axes if True. Defaults to False.
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
Notes
-----
.. versionadded:: 0.12.0
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(max(plt.rcParams['figure.figsize']),) * 2)
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None,
hspace=None)
ax.set(xticks=[], yticks=[], aspect='equal')
outlines = dict(border=([0, 1, 1, 0, 0], [0, 0, 1, 1, 0]))
_draw_outlines(ax, outlines)
picks = _picks_to_idx(len(layout.names), picks)
pos = layout.pos[picks]
names = np.array(layout.names)[picks]
for ii, (p, ch_id) in enumerate(zip(pos, names)):
center_pos = np.array((p[0] + p[2] / 2., p[1] + p[3] / 2.))
ax.annotate(ch_id, xy=center_pos, horizontalalignment='center',
verticalalignment='center', size='x-small')
if show_axes:
x1, x2, y1, y2 = p[0], p[0] + p[2], p[1], p[1] + p[3]
ax.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1], color='k')
ax.axis('off')
tight_layout(fig=fig, pad=0, w_pad=0, h_pad=0)
plt_show(show)
return fig
def _onselect(eclick, erelease, tfr, pos, ch_type, itmin, itmax, ifmin, ifmax,
cmap, fig, layout=None):
"""Handle drawing average tfr over channels called from topomap."""
import matplotlib.pyplot as plt
from matplotlib.collections import PathCollection
ax = eclick.inaxes
xmin = min(eclick.xdata, erelease.xdata)
xmax = max(eclick.xdata, erelease.xdata)
ymin = min(eclick.ydata, erelease.ydata)
ymax = max(eclick.ydata, erelease.ydata)
indices = ((pos[:, 0] < xmax) & (pos[:, 0] > xmin) &
(pos[:, 1] < ymax) & (pos[:, 1] > ymin))
colors = ['r' if ii else 'k' for ii in indices]
indices = np.where(indices)[0]
for collection in ax.collections:
if isinstance(collection, PathCollection): # this is our "scatter"
collection.set_color(colors)
ax.figure.canvas.draw()
if len(indices) == 0:
return
data = tfr.data
if ch_type == 'mag':
picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
elif ch_type == 'grad':
grads = _pair_grad_sensors(tfr.info, layout=layout,
topomap_coords=False)
idxs = list()
for idx in indices:
idxs.append(grads[idx * 2])
idxs.append(grads[idx * 2 + 1]) # pair of grads
data = np.mean(data[idxs, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[x] for x in idxs]
elif ch_type == 'eeg':
picks = pick_types(tfr.info, meg=False, eeg=True, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
logger.info('Averaging TFR over channels ' + str(chs))
if len(fig) == 0:
fig.append(figure_nobar())
if not plt.fignum_exists(fig[0].number):
fig[0] = figure_nobar()
ax = fig[0].add_subplot(111)
itmax = len(tfr.times) - 1 if itmax is None else min(itmax,
len(tfr.times) - 1)
ifmax = len(tfr.freqs) - 1 if ifmax is None else min(ifmax,
len(tfr.freqs) - 1)
if itmin is None:
itmin = 0
if ifmin is None:
ifmin = 0
extent = (tfr.times[itmin] * 1e3, tfr.times[itmax] * 1e3, tfr.freqs[ifmin],
tfr.freqs[ifmax])
title = 'Average over %d %s channels.' % (len(chs), ch_type)
ax.set_title(title)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
img = ax.imshow(data, extent=extent, aspect="auto", origin="lower",
cmap=cmap)
if len(fig[0].get_axes()) < 2:
fig[0].get_axes()[1].cbar = fig[0].colorbar(mappable=img)
else:
fig[0].get_axes()[1].cbar.on_mappable_changed(mappable=img)
fig[0].canvas.draw()
plt.figure(fig[0].number)
plt_show(True)
def _prepare_topomap(pos, ax, check_nonzero=True):
"""Prepare the topomap axis and check positions.
Hides axis frame and check that position information is present.
"""
_hide_frame(ax)
if check_nonzero and not pos.any():
raise RuntimeError('No position information found, cannot compute '
'geometries for topomap.')
def _hide_frame(ax):
"""Hide axis frame for topomaps."""
ax.get_yticks()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
def _check_extrapolate(extrapolate, ch_type):
_check_option('extrapolate', extrapolate, ('box', 'local', 'head', 'auto'))
if extrapolate == 'auto':
extrapolate = 'local' if ch_type in _MEG_CH_TYPES_SPLIT else 'head'
return extrapolate
@verbose
def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere, ch_type,
extrapolate, verbose):
"""Initialize animated topomap."""
logger.info('Initializing animation...')
data = params['data']
items = list()
if params['butterfly']:
all_times = params['all_times']
for idx in range(len(data)):
ax_line.plot(all_times, data[idx], color='k', lw=1)
vmin, vmax = _setup_vmin_vmax(data, None, None)
ax_line.set(yticks=np.around(np.linspace(vmin, vmax, 5), -1),
xlim=all_times[[0, -1]])
params['line'] = ax_line.axvline(all_times[0], color='r')
items.append(params['line'])
if merge_channels:
from mne.channels.layout import _merge_ch_data
data, _ = _merge_ch_data(data, 'grad', [])
norm = True if np.min(data) > 0 else False
cmap = 'Reds' if norm else 'RdBu_r'
vmin, vmax = _setup_vmin_vmax(data, None, None, norm)
outlines = _make_head_outlines(sphere, params['pos'], 'head',
params['clip_origin'])
_hide_frame(ax)
extent, Xi, Yi, interp = _setup_interp(
params['pos'], 64, extrapolate, sphere, outlines, 0)
patch_ = _get_patch(outlines, extrapolate, interp, ax)
params['Zis'] = list()
for frame in params['frames']:
params['Zis'].append(interp.set_values(data[:, frame])(Xi, Yi))
Zi = params['Zis'][0]
zi_min = np.nanmin(params['Zis'])
zi_max = np.nanmax(params['Zis'])
cont_lims = np.linspace(zi_min, zi_max, 7, endpoint=False)[1:]
params.update({'vmin': vmin, 'vmax': vmax, 'Xi': Xi, 'Yi': Yi, 'Zi': Zi,
'extent': extent, 'cmap': cmap, 'cont_lims': cont_lims})
# plot map and contour
im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
aspect='equal', extent=extent,
interpolation='bilinear')
ax.autoscale(enable=True, tight=True)
ax.figure.colorbar(im, cax=ax_cbar)
cont = ax.contour(Xi, Yi, Zi, levels=cont_lims, colors='k', linewidths=1)
im.set_clip_path(patch_)
text = ax.text(0.55, 0.95, '', transform=ax.transAxes, va='center',
ha='right')
params['text'] = text
items.append(im)
items.append(text)
for col in cont.collections:
col.set_clip_path(patch_)
outlines_ = _draw_outlines(ax, outlines)
params.update({'patch': patch_, 'outlines': outlines_})
ax.figure.tight_layout()
return tuple(items) + tuple(cont.collections)
def _animate(frame, ax, ax_line, params):
"""Update animated topomap."""
if params['pause']:
frame = params['frame']
time_idx = params['frames'][frame]
if params['time_unit'] == 'ms':
title = '%6.0f ms' % (params['times'][frame] * 1e3,)
else:
title = '%6.3f s' % (params['times'][frame],)
if params['blit']:
text = params['text']
else:
ax.cla() # Clear old contours.
text = ax.text(0.45, 1.15, '', transform=ax.transAxes)
for k, (x, y) in params['outlines'].items():
if 'mask' in k:
continue
ax.plot(x, y, color='k', linewidth=1, clip_on=False)
_hide_frame(ax)
text.set_text(title)
vmin = params['vmin']
vmax = params['vmax']
Xi = params['Xi']
Yi = params['Yi']
Zi = params['Zis'][frame]
extent = params['extent']
cmap = params['cmap']
patch = params['patch']
im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
aspect='equal', extent=extent, interpolation='bilinear')
cont_lims = params['cont_lims']
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
cont = ax.contour(
Xi, Yi, Zi, levels=cont_lims, colors='k', linewidths=1)
im.set_clip_path(patch)
for col in cont.collections:
col.set_clip_path(patch)
items = [im, text]
if params['butterfly']:
all_times = params['all_times']
line = params['line']
line.remove()
ylim = ax_line.get_ylim()
params['line'] = ax_line.axvline(all_times[time_idx], color='r')
ax_line.set_ylim(ylim)
items.append(params['line'])
params['frame'] = frame
return tuple(items) + tuple(cont.collections)
def _pause_anim(event, params):
"""Pause or continue the animation on mouse click."""
params['pause'] = not params['pause']
def _key_press(event, params):
"""Handle key presses for the animation."""
if event.key == 'left':
params['pause'] = True
params['frame'] = max(params['frame'] - 1, 0)
elif event.key == 'right':
params['pause'] = True
params['frame'] = min(params['frame'] + 1, len(params['frames']) - 1)
def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit,
show, time_unit, sphere, extrapolate, *, verbose=None):
"""Make animation of evoked data as topomap timeseries.
See mne.evoked.Evoked.animate_topomap.
"""
from matplotlib import pyplot as plt, animation
if ch_type is None:
ch_type = _picks_by_type(evoked.info)[0][0]
if ch_type not in ('mag', 'grad', 'eeg',
'hbo', 'hbr', 'fnirs_od', 'fnirs_cw_amplitude'):
raise ValueError("Channel type not supported. Supported channel "
"types include 'mag', 'grad', 'eeg'. 'hbo', 'hbr', "
"'fnirs_cw_amplitude', and 'fnirs_od'.")
time_unit, _ = _check_time_unit(time_unit, evoked.times)
if times is None:
times = np.linspace(evoked.times[0], evoked.times[-1], 10)
times = np.array(times)
if times.ndim != 1:
raise ValueError('times must be 1D, got %d dimensions' % times.ndim)
if max(times) > evoked.times[-1] or min(times) < evoked.times[0]:
raise ValueError('All times must be inside the evoked time series.')
frames = [np.abs(evoked.times - time).argmin() for time in times]
picks, pos, merge_channels, _, ch_type, sphere, clip_origin = \
_prepare_topomap_plot(evoked, ch_type, sphere=sphere)
data = evoked.data[picks, :]
data *= _handle_default('scalings')[ch_type]
fig = plt.figure(figsize=(6, 5))
shape = (8, 12)
colspan = shape[1] - 1
rowspan = shape[0] - bool(butterfly)
ax = plt.subplot2grid(shape, (0, 0), rowspan=rowspan, colspan=colspan)
if butterfly:
ax_line = plt.subplot2grid(shape, (rowspan, 0), colspan=colspan)
else:
ax_line = None
if isinstance(frames, Integral):
frames = np.linspace(0, len(evoked.times) - 1, frames).astype(int)
ax_cbar = plt.subplot2grid(shape, (0, colspan), rowspan=rowspan)
ax_cbar.set_title(_handle_default('units')[ch_type], fontsize=10)
extrapolate = _check_extrapolate(extrapolate, ch_type)
params = dict(data=data, pos=pos, all_times=evoked.times, frame=0,
frames=frames, butterfly=butterfly, blit=blit,
pause=False, times=times, time_unit=time_unit,
clip_origin=clip_origin)
init_func = partial(_init_anim, ax=ax, ax_cbar=ax_cbar, ax_line=ax_line,
params=params, merge_channels=merge_channels,
sphere=sphere, ch_type=ch_type,
extrapolate=extrapolate, verbose=verbose)
animate_func = partial(_animate, ax=ax, ax_line=ax_line, params=params)
pause_func = partial(_pause_anim, params=params)
fig.canvas.mpl_connect('button_press_event', pause_func)
key_press_func = partial(_key_press, params=params)
fig.canvas.mpl_connect('key_press_event', key_press_func)
if frame_rate is None:
frame_rate = evoked.info['sfreq'] / 10.
interval = 1000 / frame_rate # interval is in ms
anim = animation.FuncAnimation(fig, animate_func, init_func=init_func,
frames=len(frames), interval=interval,
blit=blit)
fig.mne_animation = anim # to make sure anim is not garbage collected
plt_show(show, block=False)
if 'line' in params:
# Finally remove the vertical line so it does not appear in saved fig.
params['line'].remove()
return fig, anim
def _set_contour_locator(vmin, vmax, contours):
"""Set correct contour levels."""
locator = None
if isinstance(contours, Integral) and contours > 0:
from matplotlib import ticker
# nbins = ticks - 1, since 2 of the ticks are vmin and vmax, the
# correct number of bins is equal to contours + 1.
locator = ticker.MaxNLocator(nbins=contours + 1)
contours = locator.tick_values(vmin, vmax)
return locator, contours
def _plot_corrmap(data, subjs, indices, ch_type, ica, label, show, outlines,
cmap, contours, template=False, sphere=None):
"""Customize ica.plot_components for corrmap."""
if not template:
title = 'Detected components'
if label is not None:
title += ' of type ' + label
else:
title = "Supplied template"
picks = list(range(len(data)))
p = 20
if len(picks) > p: # plot components by sets of 20
n_components = len(picks)
figs = [_plot_corrmap(data[k:k + p], subjs[k:k + p],
indices[k:k + p], ch_type, ica, label, show,
outlines=outlines, cmap=cmap, contours=contours)
for k in range(0, n_components, p)]
return figs
elif np.isscalar(picks):
picks = [picks]
data_picks, pos, merge_channels, names, _, sphere, clip_origin = \
_prepare_topomap_plot(ica, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes, _, _ = _prepare_trellis(len(picks), ncols=5)
fig.suptitle(title)
for ii, data_, ax, subject, idx in zip(picks, data, axes, subjs, indices):
if template:
ttl = 'Subj. {}, {}'.format(subject, ica._ica_names[idx])
ax.set_title(ttl, fontsize=12)
else:
ax.set_title('Subj. {}'.format(subject))
if merge_channels:
data_, _ = _merge_ch_data(data_, ch_type, [])
vmin_, vmax_ = _setup_vmin_vmax(data_, None, None)
plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
res=64, axes=ax, cmap=cmap, outlines=outlines,
contours=contours, show=False, image_interp='bilinear')[0]
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.8)
fig.canvas.draw()
plt_show(show)
return fig
def _trigradient(x, y, z):
"""Take gradients of z on a mesh."""
from matplotlib.tri import CubicTriInterpolator, Triangulation
with warnings.catch_warnings(): # catch matplotlib warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
tri = Triangulation(x, y)
tci = CubicTriInterpolator(tri, z)
dx, dy = tci.gradient(tri.x, tri.y)
return dx, dy
@fill_doc
def plot_arrowmap(data, info_from, info_to=None, scale=3e-10, vmin=None,
vmax=None, cmap=None, sensors=True, res=64, axes=None,
names=None, show_names=False, mask=None, mask_params=None,
outlines='head', contours=6, image_interp='bilinear',
show=True, onselect=None, extrapolate=_EXTRAPOLATE_DEFAULT,
sphere=None):
"""Plot arrow map.
Compute arrowmaps, based upon the Hosaka-Cohen transformation
:footcite:`CohenHosaka1976`, these arrows represents an estimation of the
current flow underneath the MEG sensors. They are a poor man's MNE.
Since planar gradiometers takes gradients along latitude and longitude,
they need to be projected to the flattened manifold span by magnetometer
or radial gradiometers before taking the gradients in the 2D Cartesian
coordinate system for visualization on the 2D topoplot. You can use the
``info_from`` and ``info_to`` parameters to interpolate from
gradiometer data to magnetometer data.
Parameters
----------
data : array, shape (n_channels,)
The data values to plot.
info_from : instance of Info
The measurement info from data to interpolate from.
info_to : instance of Info | None
The measurement info to interpolate to. If None, it is assumed
to be the same as info_from.
scale : float, default 3e-10
To scale the arrows.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap to use. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True (default), circles
will be used.
res : int
The resolution of the topomap image (n pixels along each side).
axes : instance of Axes | None
The axes to plot to. If None, a new figure will be created.
names : list | None
List of channel names. If None, channel names are not plotted.
%(topomap_show_names)s
If ``True``, a list of names must be provided (see ``names`` keyword).
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to ``True`` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
If an array, the values represent the levels for the contours. The
values are in µV for EEG, fT for magnetometers and fT/m for
gradiometers. Defaults to 6.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
show : bool
Show figure if True.
onselect : callable | None
Handle for a function that is called when the user selects a set of
channels by rectangle selection (matplotlib ``RectangleSelector``). If
None interactive selection is disabled. Defaults to None.
%(topomap_extrapolate)s
.. versionadded:: 0.18
%(topomap_sphere_auto)s
Returns
-------
fig : matplotlib.figure.Figure
The Figure of the plot.
Notes
-----
.. versionadded:: 0.17
References
----------
.. footbibliography::
"""
from matplotlib import pyplot as plt
from ..forward import _map_meg_or_eeg_channels
sphere = _check_sphere(sphere, info_from)
ch_type = _picks_by_type(info_from)
if len(ch_type) > 1:
raise ValueError('Multiple channel types are not supported.'
'All channels must either be of type \'grad\' '
'or \'mag\'.')
else:
ch_type = ch_type[0][0]
if ch_type not in ('mag', 'grad'):
raise ValueError("Channel type '%s' not supported. Supported channel "
"types are 'mag' and 'grad'." % ch_type)
if info_to is None and ch_type == 'mag':
info_to = info_from
else:
ch_type = _picks_by_type(info_to)
if len(ch_type) > 1:
raise ValueError("Multiple channel types are not supported.")
else:
ch_type = ch_type[0][0]
if ch_type != 'mag':
raise ValueError("only 'mag' channel type is supported. "
"Got %s" % ch_type)
if info_to is not info_from:
info_to = pick_info(info_to, pick_types(info_to, meg=True))
info_from = pick_info(info_from, pick_types(info_from, meg=True))
# XXX should probably support the "origin" argument
mapping = _map_meg_or_eeg_channels(
info_from, info_to, origin=(0., 0., 0.04), mode='accurate')
data = np.dot(mapping, data)
_, pos, _, _, _, sphere, clip_origin = \
_prepare_topomap_plot(info_to, 'mag', sphere=sphere)
outlines = _make_head_outlines(
sphere, pos, outlines, clip_origin)
if axes is None:
fig, axes = plt.subplots()
else:
fig = axes.figure
plot_topomap(data, pos, axes=axes, vmin=vmin, vmax=vmax, cmap=cmap,
sensors=sensors, res=res, names=names, show_names=show_names,
mask=mask, mask_params=mask_params, outlines=outlines,
contours=contours, image_interp=image_interp, show=False,
onselect=onselect, extrapolate=extrapolate, sphere=sphere,
ch_type=ch_type)
x, y = tuple(pos.T)
dx, dy = _trigradient(x, y, data)
dxx = dy.data
dyy = -dx.data
axes.quiver(x, y, dxx, dyy, scale=scale, color='k', lw=1, clip_on=False)
axes.figure.canvas.draw_idle()
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
tight_layout(fig=fig)
plt_show(show)
return fig
| bsd-3-clause | 457,051,150,807,582,500 | 40.261351 | 84 | 0.587548 | false |
alexgleith/Quantum-GIS | python/plugins/sextante/algs/AddTableField.py | 2 | 4121 | # -*- coding: utf-8 -*-
"""
***************************************************************************
AddTableField.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.QGisLayers import QGisLayers
from sextante.parameters.ParameterVector import ParameterVector
from sextante.parameters.ParameterString import ParameterString
from sextante.parameters.ParameterNumber import ParameterNumber
from sextante.parameters.ParameterSelection import ParameterSelection
from sextante.outputs.OutputVector import OutputVector
class AddTableField(GeoAlgorithm):
OUTPUT_LAYER = "OUTPUT_LAYER"
INPUT_LAYER = "INPUT_LAYER"
FIELD_NAME = "FIELD_NAME"
FIELD_TYPE = "FIELD_TYPE"
FIELD_LENGTH = "FIELD_LENGTH"
FIELD_PRECISION = "FIELD_PRECISION"
TYPE_NAMES = ["Integer", "Float", "String"]
TYPES = [QVariant.Int, QVariant.Double, QVariant.String]
#===========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/../images/qgis.png")
#===========================================================================
def defineCharacteristics(self):
self.name = "Add field to attributes table"
self.group = "Vector table tools"
self.addParameter(ParameterVector(self.INPUT_LAYER, "Input layer", ParameterVector.VECTOR_TYPE_ANY, False))
self.addParameter(ParameterString(self.FIELD_NAME, "Field name"))
self.addParameter(ParameterSelection(self.FIELD_TYPE, "Field type", self.TYPE_NAMES))
self.addParameter(ParameterNumber(self.FIELD_LENGTH, "Field length", 1, 255, 10))
self.addParameter(ParameterNumber(self.FIELD_PRECISION, "Field precision", 0, 10, 0))
self.addOutput(OutputVector(self.OUTPUT_LAYER, "Output layer"))
def processAlgorithm(self, progress):
fieldType = self.getParameterValue(self.FIELD_TYPE)
fieldName = self.getParameterValue(self.FIELD_NAME)
fieldLength = self.getParameterValue(self.FIELD_LENGTH)
fieldPrecision = self.getParameterValue(self.FIELD_PRECISION)
output = self.getOutputFromName(self.OUTPUT_LAYER)
layer = QGisLayers.getObjectFromUri(self.getParameterValue(self.INPUT_LAYER))
provider = layer.dataProvider()
fields = provider.fields()
fields.append(QgsField(fieldName, self.TYPES[fieldType], "", fieldLength, fieldPrecision))
writer = output.getVectorWriter(fields, provider.geometryType(), layer.crs())
outFeat = QgsFeature()
inGeom = QgsGeometry()
nElement = 0
features = QGisLayers.features(layer)
nFeat = len(features)
for inFeat in features:
progress.setPercentage(int((100 * nElement)/nFeat))
nElement += 1
inGeom = inFeat.geometry()
outFeat.setGeometry( inGeom )
atMap = inFeat.attributes()
atMap.append(None)
outFeat.setAttributes(atMap)
writer.addFeature( outFeat )
del writer
| gpl-2.0 | 4,025,250,056,043,229,700 | 42.378947 | 115 | 0.583111 | false |
DANCEcollaborative/forum-xblock | XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/django/contrib/gis/geos/libgeos.py | 4 | 5646 | """
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import os
import re
import sys
from ctypes import c_char_p, Structure, CDLL, CFUNCTYPE, POINTER
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError, ImportError):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifiying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if not lib_path is None: break
# No GEOS library could be found.
if lib_path is None:
raise ImportError('Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names))
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
lgeos = CDLL(lib_path)
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst, output_h=sys.stdout):
try:
warn_msg = fmt % lst
except:
warn_msg = fmt
output_h.write('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst, output_h=sys.stderr):
try:
err_msg = fmt % lst
except:
err_msg = fmt
output_h.write('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
#### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure): pass
class GEOSPrepGeom_t(Structure): pass
class GEOSCoordSeq_t(Structure): pass
class GEOSContextHandle_t(Structure): pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility accross 32 and 64-bit platforms.
geos_version = lgeos.GEOSversion
geos_version.argtypes = None
geos_version.restype = c_char_p
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return dict((key, m.group(key)) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor'))
# Version numbers and whether or not prepared geometry support is available.
_verinfo = geos_version_info()
GEOS_MAJOR_VERSION = int(_verinfo['major'])
GEOS_MINOR_VERSION = int(_verinfo['minor'])
GEOS_SUBMINOR_VERSION = int(_verinfo['subminor'])
del _verinfo
GEOS_VERSION = (GEOS_MAJOR_VERSION, GEOS_MINOR_VERSION, GEOS_SUBMINOR_VERSION)
GEOS_PREPARE = GEOS_VERSION >= (3, 1, 0)
if GEOS_PREPARE:
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
lgeos.initGEOS_r.restype = CONTEXT_PTR
lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
else:
# When thread-safety isn't available, the initGEOS routine must be called
# first. This function takes the notice and error functions, defined
# as Python callbacks above, as parameters. Here is the C code that is
# wrapped:
# extern void GEOS_DLL initGEOS(GEOSMessageHandler notice_function, GEOSMessageHandler error_function);
lgeos.initGEOS(notice_h, error_h)
# Calling finishGEOS() upon exit of the interpreter.
import atexit
atexit.register(lgeos.finishGEOS)
| mit | -2,832,675,149,344,871,000 | 37.148649 | 108 | 0.700496 | false |
tomchristie/django-rest-framework | tests/test_response.py | 3 | 10775 | from django.test import TestCase, override_settings
from django.urls import include, path, re_path
from rest_framework import generics, routers, serializers, status, viewsets
from rest_framework.parsers import JSONParser
from rest_framework.renderers import (
BaseRenderer, BrowsableAPIRenderer, JSONRenderer
)
from rest_framework.response import Response
from rest_framework.views import APIView
from tests.models import BasicModel
# Serializer used to test BasicModel
class BasicModelSerializer(serializers.ModelSerializer):
class Meta:
model = BasicModel
fields = '__all__'
class MockPickleRenderer(BaseRenderer):
media_type = 'application/pickle'
class MockJsonRenderer(BaseRenderer):
media_type = 'application/json'
class MockTextMediaRenderer(BaseRenderer):
media_type = 'text/html'
DUMMYSTATUS = status.HTTP_200_OK
DUMMYCONTENT = 'dummycontent'
def RENDERER_A_SERIALIZER(x):
return ('Renderer A: %s' % x).encode('ascii')
def RENDERER_B_SERIALIZER(x):
return ('Renderer B: %s' % x).encode('ascii')
class RendererA(BaseRenderer):
media_type = 'mock/renderera'
format = "formata"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_A_SERIALIZER(data)
class RendererB(BaseRenderer):
media_type = 'mock/rendererb'
format = "formatb"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_B_SERIALIZER(data)
class RendererC(RendererB):
media_type = 'mock/rendererc'
format = 'formatc'
charset = "rendererc"
class MockView(APIView):
renderer_classes = (RendererA, RendererB, RendererC)
def get(self, request, **kwargs):
return Response(DUMMYCONTENT, status=DUMMYSTATUS)
class MockViewSettingContentType(APIView):
renderer_classes = (RendererA, RendererB, RendererC)
def get(self, request, **kwargs):
return Response(DUMMYCONTENT, status=DUMMYSTATUS, content_type='setbyview')
class JSONView(APIView):
parser_classes = (JSONParser,)
def post(self, request, **kwargs):
assert request.data
return Response(DUMMYCONTENT)
class HTMLView(APIView):
renderer_classes = (BrowsableAPIRenderer, )
def get(self, request, **kwargs):
return Response('text')
class HTMLView1(APIView):
renderer_classes = (BrowsableAPIRenderer, JSONRenderer)
def get(self, request, **kwargs):
return Response('text')
class HTMLNewModelViewSet(viewsets.ModelViewSet):
serializer_class = BasicModelSerializer
queryset = BasicModel.objects.all()
class HTMLNewModelView(generics.ListCreateAPIView):
renderer_classes = (BrowsableAPIRenderer,)
permission_classes = []
serializer_class = BasicModelSerializer
queryset = BasicModel.objects.all()
new_model_viewset_router = routers.DefaultRouter()
new_model_viewset_router.register(r'', HTMLNewModelViewSet)
urlpatterns = [
path('setbyview', MockViewSettingContentType.as_view(renderer_classes=[RendererA, RendererB, RendererC])),
re_path(r'^.*\.(?P<format>.+)$', MockView.as_view(renderer_classes=[RendererA, RendererB, RendererC])),
path('', MockView.as_view(renderer_classes=[RendererA, RendererB, RendererC])),
path('html', HTMLView.as_view()),
path('json', JSONView.as_view()),
path('html1', HTMLView1.as_view()),
path('html_new_model', HTMLNewModelView.as_view()),
path('html_new_model_viewset', include(new_model_viewset_router.urls)),
path('restframework', include('rest_framework.urls', namespace='rest_framework'))
]
# TODO: Clean tests bellow - remove duplicates with above, better unit testing, ...
@override_settings(ROOT_URLCONF='tests.test_response')
class RendererIntegrationTests(TestCase):
"""
End-to-end testing of renderers using an ResponseMixin on a generic view.
"""
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEqual(resp.status_code, DUMMYSTATUS)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, b'')
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format,
HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
@override_settings(ROOT_URLCONF='tests.test_response')
class UnsupportedMediaTypeTests(TestCase):
def test_should_allow_posting_json(self):
response = self.client.post('/json', data='{"test": 123}', content_type='application/json')
self.assertEqual(response.status_code, 200)
def test_should_not_allow_posting_xml(self):
response = self.client.post('/json', data='<test>123</test>', content_type='application/xml')
self.assertEqual(response.status_code, 415)
def test_should_not_allow_posting_a_form(self):
response = self.client.post('/json', data={'test': 123})
self.assertEqual(response.status_code, 415)
@override_settings(ROOT_URLCONF='tests.test_response')
class Issue122Tests(TestCase):
"""
Tests that covers #122.
"""
def test_only_html_renderer(self):
"""
Test if no infinite recursion occurs.
"""
self.client.get('/html')
def test_html_renderer_is_first(self):
"""
Test if no infinite recursion occurs.
"""
self.client.get('/html1')
@override_settings(ROOT_URLCONF='tests.test_response')
class Issue467Tests(TestCase):
"""
Tests for #467
"""
def test_form_has_label_and_help_text(self):
resp = self.client.get('/html_new_model')
self.assertEqual(resp['Content-Type'], 'text/html; charset=utf-8')
# self.assertContains(resp, 'Text comes here')
# self.assertContains(resp, 'Text description.')
@override_settings(ROOT_URLCONF='tests.test_response')
class Issue807Tests(TestCase):
"""
Covers #807
"""
def test_does_not_append_charset_by_default(self):
"""
Renderers don't include a charset unless set explicitly.
"""
headers = {"HTTP_ACCEPT": RendererA.media_type}
resp = self.client.get('/', **headers)
expected = "{}; charset={}".format(RendererA.media_type, 'utf-8')
self.assertEqual(expected, resp['Content-Type'])
def test_if_there_is_charset_specified_on_renderer_it_gets_appended(self):
"""
If renderer class has charset attribute declared, it gets appended
to Response's Content-Type
"""
headers = {"HTTP_ACCEPT": RendererC.media_type}
resp = self.client.get('/', **headers)
expected = "{}; charset={}".format(RendererC.media_type, RendererC.charset)
self.assertEqual(expected, resp['Content-Type'])
def test_content_type_set_explicitly_on_response(self):
"""
The content type may be set explicitly on the response.
"""
headers = {"HTTP_ACCEPT": RendererC.media_type}
resp = self.client.get('/setbyview', **headers)
self.assertEqual('setbyview', resp['Content-Type'])
def test_form_has_label_and_help_text(self):
resp = self.client.get('/html_new_model')
self.assertEqual(resp['Content-Type'], 'text/html; charset=utf-8')
# self.assertContains(resp, 'Text comes here')
# self.assertContains(resp, 'Text description.')
| bsd-2-clause | 8,652,225,553,536,476,000 | 36.807018 | 110 | 0.684826 | false |
weolar/miniblink49 | v8_5_1/tools/testrunner/network/endpoint.py | 23 | 4536 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
import os
import Queue
import threading
import time
from ..local import execution
from ..local import progress
from ..local import testsuite
from ..local import utils
from ..server import compression
class EndpointProgress(progress.ProgressIndicator):
def __init__(self, sock, server, ctx):
super(EndpointProgress, self).__init__()
self.sock = sock
self.server = server
self.context = ctx
self.results_queue = [] # Accessors must synchronize themselves.
self.sender_lock = threading.Lock()
self.senderthread = threading.Thread(target=self._SenderThread)
self.senderthread.start()
def HasRun(self, test, has_unexpected_output):
# The runners that call this have a lock anyway, so this is safe.
self.results_queue.append(test)
def _SenderThread(self):
keep_running = True
tests = []
self.sender_lock.acquire()
while keep_running:
time.sleep(0.1)
# This should be "atomic enough" without locking :-)
# (We don't care which list any new elements get appended to, as long
# as we don't lose any and the last one comes last.)
current = self.results_queue
self.results_queue = []
for c in current:
if c is None:
keep_running = False
else:
tests.append(c)
if keep_running and len(tests) < 1:
continue # Wait for more results.
if len(tests) < 1: break # We're done here.
result = []
for t in tests:
result.append(t.PackResult())
try:
compression.Send(result, self.sock)
except:
self.runner.terminate = True
for t in tests:
self.server.CompareOwnPerf(t, self.context.arch, self.context.mode)
tests = []
self.sender_lock.release()
def Execute(workspace, ctx, tests, sock, server):
suite_paths = utils.GetSuitePaths(os.path.join(workspace, "test"))
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
suite.SetupWorkingDirectory()
suites.append(suite)
suites_dict = {}
for s in suites:
suites_dict[s.name] = s
s.tests = []
for t in tests:
suite = suites_dict[t.suite]
t.suite = suite
suite.tests.append(t)
suites = [ s for s in suites if len(s.tests) > 0 ]
for s in suites:
s.DownloadData()
progress_indicator = EndpointProgress(sock, server, ctx)
runner = execution.Runner(suites, progress_indicator, ctx)
try:
runner.Run(server.jobs)
except IOError, e:
if e.errno == 2:
message = ("File not found: %s, maybe you forgot to 'git add' it?" %
e.filename)
else:
message = "%s" % e
compression.Send([[-1, message]], sock)
progress_indicator.HasRun(None, None) # Sentinel to signal the end.
progress_indicator.sender_lock.acquire() # Released when sending is done.
progress_indicator.sender_lock.release()
| apache-2.0 | -4,016,352,023,141,227,000 | 35.288 | 76 | 0.690917 | false |
themad/xmenud | xmenud.py | 1 | 6727 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# xmenud - a small desktop menu
# This is
#
# for launching the app
import subprocess
# for drawing the stuff
import gtk
# for catching the error
import glib
# for reading that stuff
import xdg.Menu
import xdg.DesktopEntry
# for finding that stuff to draw
import xdg.IconTheme
# for finding what stuff to do
import getopt
# for not doing anything anymore
import sys
# regular expressions for funny parsing
import re
NAME="xmenud"
VERSION="0.8"
AUTHOR="Matthias Kühlke"
EMAIL="mad@unserver.de"
YEAR="2010"
TAGLINE="A desktop menu, with klickibunti."
LICENSE='''
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
'''
def error(string):
''' output errors to stderr '''
print >>sys.stderr, string
def launcher_execute(string):
try:
subprocess.Popen(string, shell=True)
except:
# well, the user probably doesn't want anything to happen, so I'll just
pass
def launcher_print(string):
print string
def create_menu(menu, use_icons=True, launch=launcher_execute):
def launch_callback(widget, string):
launch(string)
def get_exec(string, terminal=False):
''' Parses the string according to the XDG Desktop Entry Specifications. '''
r1 = re.compile('(?<!%)%[fFuUdDnNickvm]')
r2 = re.compile('%%')
result=r2.sub('%', r1.sub('', string))
if(terminal):
result = 'urxvt -e "%s"' % result
return result
def new_item(label, icon, use_icons):
def get_icon(iconname):
if (iconname=="" or iconname.find('.')<>-1):
try:
pixbuf = gtk.gdk.pixbuf_new_from_file(xdg.IconTheme.getIconPath(iconname))
ick = gtk.IconSet(pixbuf)
scaled = ick.render_icon(gtk.Style(), gtk.TEXT_DIR_LTR, gtk.STATE_NORMAL, gtk.ICON_SIZE_LARGE_TOOLBAR, None, None)
img = gtk.image_new_from_pixbuf(scaled)
except (TypeError, glib.GError):
img = gtk.image_new_from_stock(gtk.STOCK_DIALOG_QUESTION, gtk.ICON_SIZE_LARGE_TOOLBAR)
else:
img = gtk.image_new_from_icon_name(iconname, gtk.ICON_SIZE_LARGE_TOOLBAR)
return img
if use_icons:
item = gtk.ImageMenuItem(stock_id=label)
item.set_image(get_icon(icon))
else:
if (label=="- - -"):
item = gtk.SeparatorMenuItem()
else:
item = gtk.MenuItem(label=label)
return item
themenu = gtk.Menu()
for entry in menu.getEntries():
if isinstance(entry, xdg.Menu.Menu):
item = new_item(entry.getName(), entry.getIcon(), use_icons)
submenu = create_menu(entry, use_icons, launch)
item.set_submenu(submenu)
themenu.append(item)
item.set_tooltip_text(entry.getComment())
item.show()
elif isinstance(entry, xdg.Menu.MenuEntry):
item = new_item( ' - '.join(filter(None, [ entry.DesktopEntry.getName(), entry.DesktopEntry.getGenericName() ])), entry.DesktopEntry.getIcon(), use_icons)
item.connect("activate", launch_callback, get_exec(entry.DesktopEntry.getExec(), entry.DesktopEntry.getTerminal()))
themenu.append(item)
item.set_tooltip_text(entry.DesktopEntry.getComment())
item.show()
elif isinstance(entry, xdg.Menu.Separator):
item = new_item('- - -', '', 0)
themenu.append(item)
item.show()
themenu.show()
return themenu
def create_popup():
m=gtk.Menu()
about = gtk.ImageMenuItem(stock_id=gtk.STOCK_ABOUT)
quit = gtk.ImageMenuItem(stock_id=gtk.STOCK_QUIT)
about.connect('activate', lambda w: about_dialog())
quit.connect('activate', lambda w: gtk.main_quit())
m.append(about)
m.append(quit)
about.show()
quit.show()
return m
def about_dialog():
def close(w, r):
if r == gtk.RESPONSE_CANCEL:
w.hide()
d = gtk.AboutDialog()
d.set_name(NAME)
d.set_version(VERSION)
d.set_authors(['%s <%s>' % (AUTHOR,EMAIL)])
d.set_copyright("(C) %s %s" % (YEAR,AUTHOR))
d.set_license(LICENSE)
d.connect('response', close)
d.show()
def tray():
i = gtk.StatusIcon()
i.set_from_stock(gtk.STOCK_EXECUTE)
i.set_tooltip("xmenud")
i.set_visible(True)
return i
def main():
run_tray = False
use_icons = True
launch = launcher_execute
try:
opts, args = getopt.getopt(sys.argv[1:],"htvnp",["help", "tray", "version", "no-icons", "pipe-mode"])
except getopt.GetoptError, err:
error(str(err))
usage()
sys.exit(2)
for o, a in opts:
if o in ('-v', '--version'):
showversion()
sys.exit()
elif o in ('-h', '--help'):
usage(verbose=True)
sys.exit()
elif o in ('-t', '--tray'):
run_tray = True
elif o in ('-p', '--pipe-mode'):
launch = launcher_print
elif o in ('-n', '--no-icons'):
use_icons = False
try:
desktopmenu = xdg.Menu.parse(filename = "/etc/xdg/menus/gnome-applications.menu")
except xdg.Exceptions.ParsingError as e:
error('Error parsing the menu files: \n' + e.__str__())
sys.exit(-1)
mainmenu=create_menu(desktopmenu, use_icons, launch)
if run_tray:
popupmenu=create_popup()
trayicon=tray()
trayicon.connect("activate", lambda w: mainmenu.popup(None, None, None, 0, 0))
trayicon.connect("popup-menu", lambda w,b,t: popupmenu.popup(None, None, None, b, t))
else:
mainmenu.connect("hide", lambda w: gtk.main_quit())
mainmenu.popup(None, None, None, 0, 0)
try:
gtk.main()
except KeyboardInterrupt:
pass
return 0
def showversion():
print '%s %s- %s' % (NAME, VERSION, TAGLINE)
print ' Copyright (C) %s %s <%s>' % (YEAR, AUTHOR, EMAIL)
print LICENSE
def usage(verbose=False):
print 'usage: %s [--tray|--help] [--no-icons] [--pipe-mode] [--version]' % sys.argv[0]
if verbose:
print '''Options:
--help,-h This help message.
--tray,-t Instead of launching a menu right away, put an icon into the systray.
--no-icons,-n Don't load or show program icons.
--pipe-mode,-p Instead of launching a program, just output its name to stdout.
--version,-v Show version information.
'''
if __name__ == "__main__":
main()
# vim: set et ts=4 sw=4:
| gpl-3.0 | 8,592,755,799,032,689,000 | 29.995392 | 166 | 0.592774 | false |
bitcraft/PURIKURA | pyrikura/smtp.py | 1 | 1435 | import smtplib
import threading
import pickle
import email
from .config import Config as pkConfig
class SenderThread(threading.Thread):
def __init__(self, address, filename):
threading.Thread.__init__(self)
self.address = address
self.filename = filename
def run(self):
sender = pkConfig.get('email', 'sender')
subject = pkConfig.get('email', 'subject')
auth_file = '/home/mjolnir/git/PURIKURA/secrets'
msg = email.MIMEMultipart.MIMEMultipart('mixed')
msg['subject'] = subject
msg['from'] = sender
msg['to'] = self.address
body = email.mime.Text.MIMEText('Here\'s your photo!\n\nThank you!\n\n')
msg.attach(body)
file_msg = email.mime.base.MIMEBase('image', 'jpeg')
file_msg.set_payload(open(self.filename).read())
email.encoders.encode_base64(file_msg)
file_msg.add_header(
'Content-Disposition',
'attachment;filname=photo.jpg')
msg.attach(file_msg)
with open(auth_file) as fh:
auth = pickle.load(fh)
auth = auth['smtp']
with open('email.log', 'a') as fh:
fh.write('{}\t{}\n'.format(self.address, self.filename))
smtpout = smtplib.SMTP(auth['host'])
smtpout.login(auth['username'], auth['password'])
smtpout.sendmail(sender, [self.address], msg.as_string())
smtpout.quit()
| gpl-3.0 | -4,168,127,869,118,357,000 | 30.888889 | 80 | 0.599303 | false |
megmontero/tweevy | apps/oauth.py | 1 | 4757 | from flask import Flask, redirect, url_for, session, request, render_template
from flask_oauth import OAuth
import facebook as fb
from flask import Blueprint
from apps import app
app_oauth = Blueprint('app_oauth', __name__,template_folder='templates')
###https://github.com/mitsuhiko/flask-oauth/tree/master/example
SECRET_KEY = 'development key'
DEBUG = True
FACEBOOK_APP_ID = '236507713421072'
FACEBOOK_APP_SECRET = '75cb7fb97ea05ea1f27f14e0fd5605df'
method = None
#app = Flask(__name__)
#app.debug = DEBUG
#app.secret_key = SECRET_KEY
oauth = OAuth()
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_APP_ID,
consumer_secret=FACEBOOK_APP_SECRET,
request_token_params={'scope': 'email,user_birthday'}
#request_token_params={'scope': 'email,user_birthday,user_photos,publish_actions,user_friends,user_relationships,user_status'}
)
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/userinfo.email',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key='503619580307-c2idr2bfvuqvg42kd4477eegff04t2sm.apps.googleusercontent.com',
consumer_secret='FBRYxnoR6hR6AsmRta-h49G0')
def get_method():
global method
return method
def set_method(m):
global method
method = m
def get_user_info(method):
if method == 'google':
return get_google_user_info()
if method == 'facebook':
return get_facebook_user_info()
return {}
def get_google_user_info():
#return {'email': 'prueba'}
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('app_oauth.login_google'))
access_token = access_token[0]
from urllib2 import Request, urlopen, URLError
headers = {'Authorization': 'OAuth '+access_token}
req = Request('https://www.googleapis.com/oauth2/v1/userinfo',
None, headers)
try:
res = urlopen(req)
except URLError, e:
if e.code == 401:
# Unauthorized - bad token
session.pop('access_token', None)
return redirect('/')
return res.read()
for l in [item.split('":') for item in res.read().replace('{', '').replace('}','').split(',')]:
k = l[0].replace('"', '').strip()
if k == 'id':
g_id = l[1].replace('"', '').strip()
elif k == 'name':
g_name = l[1].replace('"', '').strip()
elif k == 'email':
g_email = l[1].replace('"', '').strip()
#user[k] = v
return {'id': g_id, 'name': g_name, 'email': g_email}
def get_facebook_user_info():
graph = fb.GraphAPI(session['oauth_token'][0])
#me = facebook.get('/me')
me = graph.get_object("me?fields=email,first_name,last_name,name,birthday")
return me
@app_oauth.route('/login/facebook')
def login_facebook():
global method
method = 'facebook'
return facebook.authorize(callback=url_for('app_oauth.facebook_authorized',
next=request.args.get('next') or request.referrer or None,
_external=True))
@app_oauth.route('/facebook_authorized')
@facebook.authorized_handler
def facebook_authorized(resp):
global method
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['oauth_token'] = (resp['access_token'], '')
method = 'facebook'
return redirect('/')
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
@app_oauth.route('/login/google')
def login_google():
global method
method = 'google'
callback=url_for('app_oauth.authorized', _external=True)
return google.authorize(callback=callback)
@app_oauth.route('/authorized/google')
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
session['access_token'] = access_token, ''
return redirect('/')
@google.tokengetter
def get_access_token():
return session.get('access_token')
| bsd-3-clause | -6,090,719,618,717,257,000 | 28.918239 | 130 | 0.615094 | false |
qgis/QGIS-Django | qgis-app/models/migrations/0002_rename_Review_model_and_file_field.py | 1 | 1156 | # Custom migration, rename ModelReview to Review
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('models', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='model',
old_name='model_file',
new_name='file',
),
migrations.RenameField(
model_name='modelreview',
old_name='model',
new_name='resource',
),
migrations.RenameModel(
old_name='ModelReview',
new_name='Review'
),
migrations.AlterField(
model_name='review',
name='reviewer',
field=models.ForeignKey(help_text='The user who reviewed this GeoPackage.',
on_delete=django.db.models.deletion.CASCADE, related_name='models_review_related',
to=settings.AUTH_USER_MODEL, verbose_name='Reviewed by'),
),
]
| gpl-2.0 | 7,157,951,517,705,604,000 | 27.195122 | 118 | 0.564879 | false |
GenericStudent/home-assistant | tests/components/speedtestdotnet/test_config_flow.py | 6 | 4464 | """Tests for SpeedTest config flow."""
from datetime import timedelta
import pytest
from speedtest import NoMatchedServers
from homeassistant import data_entry_flow
from homeassistant.components import speedtestdotnet
from homeassistant.components.speedtestdotnet.const import (
CONF_MANUAL,
CONF_SERVER_ID,
CONF_SERVER_NAME,
DOMAIN,
SENSOR_TYPES,
)
from homeassistant.const import CONF_MONITORED_CONDITIONS, CONF_SCAN_INTERVAL
from . import MOCK_SERVERS
from tests.async_mock import patch
from tests.common import MockConfigEntry
@pytest.fixture(name="mock_setup")
def mock_setup():
"""Mock entry setup."""
with patch(
"homeassistant.components.speedtestdotnet.async_setup_entry",
return_value=True,
):
yield
async def test_flow_works(hass, mock_setup):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "SpeedTest"
async def test_import_fails(hass, mock_setup):
"""Test import step fails if server_id is not valid."""
with patch("speedtest.Speedtest") as mock_api:
mock_api.return_value.get_servers.side_effect = NoMatchedServers
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN,
context={"source": "import"},
data={
CONF_SERVER_ID: "223",
CONF_MANUAL: True,
CONF_SCAN_INTERVAL: timedelta(minutes=1),
CONF_MONITORED_CONDITIONS: list(SENSOR_TYPES),
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "wrong_server_id"
async def test_import_success(hass, mock_setup):
"""Test import step is successful if server_id is valid."""
with patch("speedtest.Speedtest"):
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN,
context={"source": "import"},
data={
CONF_SERVER_ID: "1",
CONF_MANUAL: True,
CONF_SCAN_INTERVAL: timedelta(minutes=1),
CONF_MONITORED_CONDITIONS: list(SENSOR_TYPES),
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "SpeedTest"
assert result["data"][CONF_SERVER_ID] == "1"
assert result["data"][CONF_MANUAL] is True
assert result["data"][CONF_SCAN_INTERVAL] == 1
async def test_options(hass):
"""Test updating options."""
entry = MockConfigEntry(
domain=DOMAIN,
title="SpeedTest",
data={},
options={},
)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest") as mock_api:
mock_api.return_value.get_servers.return_value = MOCK_SERVERS
await hass.config_entries.async_setup(entry.entry_id)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_SERVER_NAME: "Country1 - Sponsor1 - Server1",
CONF_SCAN_INTERVAL: 30,
CONF_MANUAL: False,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SERVER_NAME: "Country1 - Sponsor1 - Server1",
CONF_SERVER_ID: "1",
CONF_SCAN_INTERVAL: 30,
CONF_MANUAL: False,
}
async def test_integration_already_configured(hass):
"""Test integration is already configured."""
entry = MockConfigEntry(
domain=DOMAIN,
data={},
options={},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
| apache-2.0 | 1,362,920,396,697,211,400 | 31.347826 | 77 | 0.617608 | false |
pschmitt/home-assistant | tests/components/arlo/test_sensor.py | 6 | 6961 | """The tests for the Netgear Arlo sensors."""
from collections import namedtuple
import pytest
from homeassistant.components.arlo import DATA_ARLO, sensor as arlo
from homeassistant.const import (
ATTR_ATTRIBUTION,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
UNIT_PERCENTAGE,
)
from tests.async_mock import patch
def _get_named_tuple(input_dict):
return namedtuple("Struct", input_dict.keys())(*input_dict.values())
def _get_sensor(name="Last", sensor_type="last_capture", data=None):
if data is None:
data = {}
return arlo.ArloSensor(name, data, sensor_type)
@pytest.fixture()
def default_sensor():
"""Create an ArloSensor with default values."""
return _get_sensor()
@pytest.fixture()
def battery_sensor():
"""Create an ArloSensor with battery data."""
data = _get_named_tuple({"battery_level": 50})
return _get_sensor("Battery Level", "battery_level", data)
@pytest.fixture()
def temperature_sensor():
"""Create a temperature ArloSensor."""
return _get_sensor("Temperature", "temperature")
@pytest.fixture()
def humidity_sensor():
"""Create a humidity ArloSensor."""
return _get_sensor("Humidity", "humidity")
@pytest.fixture()
def cameras_sensor():
"""Create a total cameras ArloSensor."""
data = _get_named_tuple({"cameras": [0, 0]})
return _get_sensor("Arlo Cameras", "total_cameras", data)
@pytest.fixture()
def captured_sensor():
"""Create a captured today ArloSensor."""
data = _get_named_tuple({"captured_today": [0, 0, 0, 0, 0]})
return _get_sensor("Captured Today", "captured_today", data)
class PlatformSetupFixture:
"""Fixture for testing platform setup call to add_entities()."""
def __init__(self):
"""Instantiate the platform setup fixture."""
self.sensors = None
self.update = False
def add_entities(self, sensors, update):
"""Mock method for adding devices."""
self.sensors = sensors
self.update = update
@pytest.fixture()
def platform_setup():
"""Create an instance of the PlatformSetupFixture class."""
return PlatformSetupFixture()
@pytest.fixture()
def sensor_with_hass_data(default_sensor, hass):
"""Create a sensor with async_dispatcher_connected mocked."""
hass.data = {}
default_sensor.hass = hass
return default_sensor
@pytest.fixture()
def mock_dispatch():
"""Mock the dispatcher connect method."""
target = "homeassistant.components.arlo.sensor.async_dispatcher_connect"
with patch(target) as _mock:
yield _mock
def test_setup_with_no_data(platform_setup, hass):
"""Test setup_platform with no data."""
arlo.setup_platform(hass, None, platform_setup.add_entities)
assert platform_setup.sensors is None
assert not platform_setup.update
def test_setup_with_valid_data(platform_setup, hass):
"""Test setup_platform with valid data."""
config = {
"monitored_conditions": [
"last_capture",
"total_cameras",
"captured_today",
"battery_level",
"signal_strength",
"temperature",
"humidity",
"air_quality",
]
}
hass.data[DATA_ARLO] = _get_named_tuple(
{
"cameras": [_get_named_tuple({"name": "Camera", "model_id": "ABC1000"})],
"base_stations": [
_get_named_tuple({"name": "Base Station", "model_id": "ABC1000"})
],
}
)
arlo.setup_platform(hass, config, platform_setup.add_entities)
assert len(platform_setup.sensors) == 8
assert platform_setup.update
def test_sensor_name(default_sensor):
"""Test the name property."""
assert default_sensor.name == "Last"
async def test_async_added_to_hass(sensor_with_hass_data, mock_dispatch):
"""Test dispatcher called when added."""
await sensor_with_hass_data.async_added_to_hass()
assert len(mock_dispatch.mock_calls) == 1
kall = mock_dispatch.call_args
args, kwargs = kall
assert len(args) == 3
assert args[0] == sensor_with_hass_data.hass
assert args[1] == "arlo_update"
assert not kwargs
def test_sensor_state_default(default_sensor):
"""Test the state property."""
assert default_sensor.state is None
def test_sensor_icon_battery(battery_sensor):
"""Test the battery icon."""
assert battery_sensor.icon == "mdi:battery-50"
def test_sensor_icon(temperature_sensor):
"""Test the icon property."""
assert temperature_sensor.icon == "mdi:thermometer"
def test_unit_of_measure(default_sensor, battery_sensor):
"""Test the unit_of_measurement property."""
assert default_sensor.unit_of_measurement is None
assert battery_sensor.unit_of_measurement == UNIT_PERCENTAGE
def test_device_class(default_sensor, temperature_sensor, humidity_sensor):
"""Test the device_class property."""
assert default_sensor.device_class is None
assert temperature_sensor.device_class == DEVICE_CLASS_TEMPERATURE
assert humidity_sensor.device_class == DEVICE_CLASS_HUMIDITY
def test_update_total_cameras(cameras_sensor):
"""Test update method for total_cameras sensor type."""
cameras_sensor.update()
assert cameras_sensor.state == 2
def test_update_captured_today(captured_sensor):
"""Test update method for captured_today sensor type."""
captured_sensor.update()
assert captured_sensor.state == 5
def _test_attributes(sensor_type):
data = _get_named_tuple({"model_id": "TEST123"})
sensor = _get_sensor("test", sensor_type, data)
attrs = sensor.device_state_attributes
assert attrs.get(ATTR_ATTRIBUTION) == "Data provided by arlo.netgear.com"
assert attrs.get("brand") == "Netgear Arlo"
assert attrs.get("model") == "TEST123"
def test_state_attributes():
"""Test attributes for camera sensor types."""
_test_attributes("battery_level")
_test_attributes("signal_strength")
_test_attributes("temperature")
_test_attributes("humidity")
_test_attributes("air_quality")
def test_attributes_total_cameras(cameras_sensor):
"""Test attributes for total cameras sensor type."""
attrs = cameras_sensor.device_state_attributes
assert attrs.get(ATTR_ATTRIBUTION) == "Data provided by arlo.netgear.com"
assert attrs.get("brand") == "Netgear Arlo"
assert attrs.get("model") is None
def _test_update(sensor_type, key, value):
data = _get_named_tuple({key: value})
sensor = _get_sensor("test", sensor_type, data)
sensor.update()
assert sensor.state == value
def test_update():
"""Test update method for direct transcription sensor types."""
_test_update("battery_level", "battery_level", 100)
_test_update("signal_strength", "signal_strength", 100)
_test_update("temperature", "ambient_temperature", 21.4)
_test_update("humidity", "ambient_humidity", 45.1)
_test_update("air_quality", "ambient_air_quality", 14.2)
| apache-2.0 | -2,340,961,110,717,036,500 | 28.747863 | 85 | 0.666427 | false |
klenov/mashinka | main.py | 1 | 1340 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import serial
radio = serial.Serial('/dev/tty.usbserial') # TODO: change this to command line argument
pshyk_enabled = 0
def getch():
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
print "Press q for exit\n"
print "Press a, s, w, d for movement and spacebar to paint\n"
try:
while True:
key = getch()
print "Key pressed is " + key
if key == 'a':
radio.write("\xA1")
elif key == 'd':
radio.write("\xA2")
elif key == 'w':
radio.write("\xA3")
elif key == 's':
radio.write("\xA4")
elif key == ' ':
if( not pshyk_enabled ):
radio.write("\x3F")
pshyk_enabled = 1
print "paint enabled"
else:
radio.write('a')
pshyk_enabled = 0
print "paint disabled"
elif key == 'q':
break # Exit the while loop
except KeyboardInterrupt:
pass
finally:
radio.close()
| gpl-2.0 | -3,078,600,315,873,372,000 | 21.928571 | 88 | 0.493284 | false |
wenxichen/tensorflow_yolo2 | src/slim_dir/preprocessing/preprocessing_factory.py | 14 | 2762 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from preprocessing import cifarnet_preprocessing
from preprocessing import inception_preprocessing
from preprocessing import lenet_preprocessing
from preprocessing import vgg_preprocessing
slim = tf.contrib.slim
def get_preprocessing(name, is_training=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'resnet_v2_50': vgg_preprocessing,
'resnet_v2_101': vgg_preprocessing,
'resnet_v2_152': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image, output_height, output_width, is_training=is_training, **kwargs)
return preprocessing_fn
| mit | 2,041,764,836,220,496,600 | 35.826667 | 80 | 0.701665 | false |
abdhaleegit/avocado-misc-tests | memory/memhotplug.py | 4 | 8438 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author: Abdul Haleem <abdhalee@linux.vnet.ibm.com>
import os
import glob
import re
import platform
import multiprocessing
from avocado import Test
from avocado.utils import process, memory, build, archive
from avocado.utils.software_manager import SoftwareManager
MEM_PATH = '/sys/devices/system/memory'
ERRORLOG = ['WARNING: CPU:', 'Oops',
'Segfault', 'soft lockup',
'Unable to handle paging request',
'rcu_sched detected stalls',
'NMI backtrace for cpu',
'WARNING: at',
'INFO: possible recursive locking detected',
'Kernel BUG at', 'Kernel panic - not syncing:',
'double fault:', 'BUG: Bad page state in']
def clear_dmesg():
process.run("dmesg -C ", sudo=True)
def online(block):
try:
memory.hotplug(block)
return ""
except IOError:
return "memory%s : Resource is busy" % block
def offline(block):
try:
memory.hotunplug(block)
return ""
except IOError:
return "memory%s : Resource is busy" % block
def get_hotpluggable_blocks(path, ratio):
mem_blocks = []
for mem_blk in glob.glob(path):
block = re.findall(r"\d+", os.path.basename(mem_blk))[0]
block = re.sub(r'^\s*$', '', block)
if memory.is_hot_pluggable(block):
mem_blocks.append(block)
def chunks(num):
"""
Return number of blocks in chunks of 100
"""
if num % 2:
return num // 100 + 1
return num // 100
count = chunks(len(mem_blocks) * ratio)
return mem_blocks[:count]
def collect_dmesg(object):
object.whiteboard = process.system_output("dmesg")
class MemStress(Test):
'''
Stress test to excersize memory component
This test performs memory hotunplug/hotplug tests with below scenarios:
1. hotunplug one by one in a loop for all
2. Toggle memory blocks by making off/on in a loop
3. hot unplug % of memory for different ratios
4. dlpar memory hotplug using drmgr
5. shared resource : dlpar in CMO mode
6. try hotplug each different numa node memblocks
7. run stress memory in background
:avocado: tags=memory,privileged
'''
def setUp(self):
if not memory.check_hotplug():
self.cancel("UnSupported : memory hotplug not enabled\n")
smm = SoftwareManager()
if not smm.check_installed('stress') and not smm.install('stress'):
tarball = self.fetch_asset(
'https://fossies.org/linux/privat/stress-1.0.4.tar.gz')
archive.extract(tarball, self.teststmpdir)
self.sourcedir = os.path.join(
self.teststmpdir, os.path.basename(tarball.split('.tar.')[0]))
os.chdir(self.sourcedir)
process.run('[ -x configure ] && ./configure', shell=True)
build.make(self.sourcedir)
build.make(self.sourcedir, extra_args='install')
self.iteration = self.params.get('iteration', default=1)
self.stresstime = self.params.get('stresstime', default=10)
self.vmcount = self.params.get('vmcount', default=4)
self.iocount = self.params.get('iocount', default=4)
self.memratio = self.params.get('memratio', default=5)
self.blocks_hotpluggable = get_hotpluggable_blocks(
(os.path.join('%s', 'memory*') % MEM_PATH), self.memratio)
if os.path.exists("%s/auto_online_blocks" % MEM_PATH):
if not self.__is_auto_online():
self.hotplug_all(self.blocks_hotpluggable)
clear_dmesg()
def hotunplug_all(self, blocks):
for block in blocks:
if memory._check_memory_state(block):
err = offline(block)
if err:
self.log.error(err)
def hotplug_all(self, blocks):
for block in blocks:
if not memory._check_memory_state(block):
err = online(block)
if err:
self.log.error(err)
@staticmethod
def __is_auto_online():
with open('%s/auto_online_blocks' % MEM_PATH, 'r') as auto_file:
if auto_file.read() == 'online\n':
return True
return False
def __error_check(self):
err_list = []
logs = process.system_output("dmesg -Txl 1,2,3,4").splitlines()
for error in ERRORLOG:
for log in logs:
if error in log.decode():
err_list.append(log)
if "\n".join(err_list):
collect_dmesg(self)
self.fail('ERROR: Test failed, please check the dmesg logs')
def run_stress(self):
mem_free = memory.meminfo.MemFree.m // 4
cpu_count = int(multiprocessing.cpu_count()) // 2
process.run("stress --cpu %s --io %s --vm %s --vm-bytes %sM --timeout %ss" %
(cpu_count, self.iocount, self.vmcount, mem_free, self.stresstime), ignore_status=True, sudo=True, shell=True)
def test_hotplug_loop(self):
self.log.info("\nTEST: hotunplug and hotplug in a loop\n")
for _ in range(self.iteration):
self.log.info("\nhotunplug all memory\n")
self.hotunplug_all(self.blocks_hotpluggable)
self.run_stress()
self.log.info("\nReclaim back memory\n")
self.hotplug_all(self.blocks_hotpluggable)
self.__error_check()
def test_hotplug_toggle(self):
self.log.info("\nTEST: Memory toggle\n")
for _ in range(self.iteration):
for block in self.blocks_hotpluggable:
err = offline(block)
if err:
self.log.error(err)
self.log.info("memory%s block hotunplugged", block)
self.run_stress()
err = online(block)
if err:
self.log.error(err)
self.log.info("memory%s block hotplugged", block)
self.__error_check()
def test_dlpar_mem_hotplug(self):
if 'ppc' in platform.processor() and 'PowerNV' not in open('/proc/cpuinfo', 'r').read():
if b"mem_dlpar=yes" in process.system_output("drmgr -C", ignore_status=True, shell=True):
self.log.info("\nDLPAR remove memory operation\n")
for _ in range(len(self.blocks_hotpluggable) // 2):
process.run(
"drmgr -c mem -d 5 -w 30 -r", shell=True, ignore_status=True, sudo=True)
self.run_stress()
self.log.info("\nDLPAR add memory operation\n")
for _ in range(len(self.blocks_hotpluggable) // 2):
process.run(
"drmgr -c mem -d 5 -w 30 -a", shell=True, ignore_status=True, sudo=True)
self.__error_check()
else:
self.log.info('UNSUPPORTED: dlpar not configured..')
else:
self.log.info("UNSUPPORTED: Test not supported on this platform")
def test_hotplug_per_numa_node(self):
self.log.info("\nTEST: Numa Node memory off on\n")
with open('/sys/devices/system/node/has_normal_memory', 'r') as node_file:
nodes = node_file.read()
for node in re.split("[,-]", nodes):
node = node.strip('\n')
self.log.info("Hotplug all memory in Numa Node %s", node)
mem_blocks = get_hotpluggable_blocks((
'/sys/devices/system/node/node%s/memory*' % node), self.memratio)
for block in mem_blocks:
self.log.info(
"offline memory%s in numa node%s", block, node)
err = offline(block)
if err:
self.log.error(err)
self.run_stress()
self.__error_check()
def tearDown(self):
self.hotplug_all(self.blocks_hotpluggable)
| gpl-2.0 | 5,729,532,073,415,297,000 | 36.336283 | 130 | 0.580351 | false |
rebost/django | tests/regressiontests/utils/crypto.py | 41 | 4931 |
import math
import timeit
import hashlib
from django.utils import unittest
from django.utils.crypto import pbkdf2
class TestUtilsCryptoPBKDF2(unittest.TestCase):
# http://tools.ietf.org/html/draft-josefsson-pbkdf2-test-vectors-06
rfc_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "0c60c80f961f0e71f3a9b524af6012062fe037a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 2,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 4096,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "4b007901b765489abead49d926f721d065a429c1",
},
# # this takes way too long :(
# {
# "args": {
# "password": "password",
# "salt": "salt",
# "iterations": 16777216,
# "dklen": 20,
# "digest": hashlib.sha1,
# },
# "result": "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984",
# },
{
"args": {
"password": "passwordPASSWORDpassword",
"salt": "saltSALTsaltSALTsaltSALTsaltSALTsalt",
"iterations": 4096,
"dklen": 25,
"digest": hashlib.sha1,
},
"result": "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038",
},
{
"args": {
"password": "pass\0word",
"salt": "sa\0lt",
"iterations": 4096,
"dklen": 16,
"digest": hashlib.sha1,
},
"result": "56fa6aa75548099dcc37d7f03425e0c3",
},
]
regression_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha256,
},
"result": "120fb6cffcf8b32c43e7225256c4f837a86548c9",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha512,
},
"result": "867f70cf1ade02cff3752599a3a53dc4af34c7a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1000,
"dklen": 0,
"digest": hashlib.sha512,
},
"result": ("afe6c5530785b6cc6b1c6453384731bd5ee432ee"
"549fd42fb6695779ad8a1c5bf59de69c48f774ef"
"c4007d5298f9033c0241d5ab69305e7b64eceeb8d"
"834cfec"),
},
# Check leading zeros are not stripped (#17481)
{
"args": {
"password": chr(186),
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": '0053d3b91a7f1e54effebd6d68771e8a6e0b2c5b',
},
]
def test_public_vectors(self):
for vector in self.rfc_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(result.encode('hex'), vector['result'])
def test_regression_vectors(self):
for vector in self.regression_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(result.encode('hex'), vector['result'])
def test_performance_scalability(self):
"""
Theory: If you run with 100 iterations, it should take 100
times as long as running with 1 iteration.
"""
# These values are chosen as a reasonable tradeoff between time
# to run the test suite and false positives caused by imprecise
# measurement.
n1, n2 = 200000, 800000
elapsed = lambda f: timeit.Timer(f,
'from django.utils.crypto import pbkdf2').timeit(number=1)
t1 = elapsed('pbkdf2("password", "salt", iterations=%d)' % n1)
t2 = elapsed('pbkdf2("password", "salt", iterations=%d)' % n2)
measured_scale_exponent = math.log(t2 / t1, n2 / n1)
# This should be less than 1. We allow up to 1.2 so that tests don't
# fail nondeterministically too often.
self.assertLess(measured_scale_exponent, 1.2)
| bsd-3-clause | -2,955,672,078,975,782,400 | 31.873333 | 78 | 0.46887 | false |
drcapulet/sentry | src/sentry/tasks/email.py | 27 | 1947 | """
sentry.tasks.email
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from django.core.mail import get_connection
from sentry.tasks.base import instrumented_task
logger = logging.getLogger(__name__)
def _get_user_from_email(group, email):
from sentry.models import Project, User
# TODO(dcramer): we should encode the userid in emails so we can avoid this
for user in User.objects.filter(email__iexact=email):
# Make sure that the user actually has access to this project
if group.project not in Project.objects.get_for_user(
team=group.team, user=user):
logger.warning('User %r does not have access to group %r', user, group)
continue
return user
@instrumented_task(
name='sentry.tasks.email.process_inbound_email',
queue='email')
def process_inbound_email(mailfrom, group_id, payload):
"""
"""
from sentry.models import Event, Group
from sentry.web.forms import NewNoteForm
try:
group = Group.objects.select_related('project', 'team').get(pk=group_id)
except Group.DoesNotExist:
logger.warning('Group does not exist: %d', group_id)
return
user = _get_user_from_email(group, mailfrom)
if user is None:
logger.warning('Inbound email from unknown address: %s', mailfrom)
return
event = group.get_latest_event() or Event()
Event.objects.bind_nodes([event], 'data')
event.group = group
event.project = group.project
form = NewNoteForm({'text': payload})
if form.is_valid():
form.save(event, user)
@instrumented_task(
name='sentry.tasks.email.send_email',
queue='email')
def send_email(message):
connection = get_connection()
connection.send_messages([message])
| bsd-3-clause | 8,166,124,854,748,269,000 | 26.814286 | 83 | 0.665639 | false |
raphaelm/python-sepadd | tests/debit/test_00800302.py | 1 | 5356 | import datetime
import pytest
from sepaxml import SepaDD
from tests.utils import clean_ids, validate_xml
@pytest.fixture
def sdd():
return SepaDD({
"name": "TestCreditor",
"IBAN": "NL50BANK1234567890",
"BIC": "BANKNL2A",
"batch": True,
"creditor_id": "DE26ZZZ00000000000",
"currency": "EUR"
}, schema="pain.008.003.02")
SAMPLE_RESULT = b"""
<Document xmlns="urn:iso:std:iso:20022:tech:xsd:pain.008.003.02" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<CstmrDrctDbtInitn>
<GrpHdr>
<MsgId>20012017014921-ba2dab283fdd</MsgId>
<CreDtTm>2017-01-20T13:49:21</CreDtTm>
<NbOfTxs>2</NbOfTxs>
<CtrlSum>60.12</CtrlSum>
<InitgPty>
<Nm>TestCreditor</Nm>
<Id>
<OrgId>
<Othr>
<Id>DE26ZZZ00000000000</Id>
</Othr>
</OrgId>
</Id>
</InitgPty>
</GrpHdr>
<PmtInf>
<PmtInfId>TestCreditor-ecd6a2f680ce</PmtInfId>
<PmtMtd>DD</PmtMtd>
<BtchBookg>true</BtchBookg>
<NbOfTxs>1</NbOfTxs>
<CtrlSum>10.12</CtrlSum>
<PmtTpInf>
<SvcLvl>
<Cd>SEPA</Cd>
</SvcLvl>
<LclInstrm>
<Cd>CORE</Cd>
</LclInstrm>
<SeqTp>FRST</SeqTp>
</PmtTpInf>
<ReqdColltnDt>2017-01-20</ReqdColltnDt>
<Cdtr>
<Nm>TestCreditor</Nm>
</Cdtr>
<CdtrAcct>
<Id>
<IBAN>NL50BANK1234567890</IBAN>
</Id>
</CdtrAcct>
<CdtrAgt>
<FinInstnId>
<BIC>BANKNL2A</BIC>
</FinInstnId>
</CdtrAgt>
<ChrgBr>SLEV</ChrgBr>
<CdtrSchmeId>
<Id>
<PrvtId>
<Othr>
<Id>DE26ZZZ00000000000</Id>
<SchmeNm>
<Prtry>SEPA</Prtry>
</SchmeNm>
</Othr>
</PrvtId>
</Id>
</CdtrSchmeId>
<DrctDbtTxInf>
<PmtId>
<EndToEndId>TestCreditor-4431989789fb</EndToEndId>
</PmtId>
<InstdAmt Ccy="EUR">10.12</InstdAmt>
<DrctDbtTx>
<MndtRltdInf>
<MndtId>1234</MndtId>
<DtOfSgntr>2017-01-20</DtOfSgntr>
</MndtRltdInf>
</DrctDbtTx>
<DbtrAgt>
<FinInstnId>
<BIC>BANKNL2A</BIC>
</FinInstnId>
</DbtrAgt>
<Dbtr>
<Nm>Test von Testenstein</Nm>
</Dbtr>
<DbtrAcct>
<Id>
<IBAN>NL50BANK1234567890</IBAN>
</Id>
</DbtrAcct>
<RmtInf>
<Ustrd>Test transaction1</Ustrd>
</RmtInf>
</DrctDbtTxInf>
</PmtInf>
<PmtInf>
<PmtInfId>TestCreditor-d547a1b3882f</PmtInfId>
<PmtMtd>DD</PmtMtd>
<BtchBookg>true</BtchBookg>
<NbOfTxs>1</NbOfTxs>
<CtrlSum>50.00</CtrlSum>
<PmtTpInf>
<SvcLvl>
<Cd>SEPA</Cd>
</SvcLvl>
<LclInstrm>
<Cd>CORE</Cd>
</LclInstrm>
<SeqTp>RCUR</SeqTp>
</PmtTpInf>
<ReqdColltnDt>2017-01-20</ReqdColltnDt>
<Cdtr>
<Nm>TestCreditor</Nm>
</Cdtr>
<CdtrAcct>
<Id>
<IBAN>NL50BANK1234567890</IBAN>
</Id>
</CdtrAcct>
<CdtrAgt>
<FinInstnId>
<BIC>BANKNL2A</BIC>
</FinInstnId>
</CdtrAgt>
<ChrgBr>SLEV</ChrgBr>
<CdtrSchmeId>
<Id>
<PrvtId>
<Othr>
<Id>DE26ZZZ00000000000</Id>
<SchmeNm>
<Prtry>SEPA</Prtry>
</SchmeNm>
</Othr>
</PrvtId>
</Id>
</CdtrSchmeId>
<DrctDbtTxInf>
<PmtId>
<EndToEndId>TestCreditor-7e989083e265</EndToEndId>
</PmtId>
<InstdAmt Ccy="EUR">50.00</InstdAmt>
<DrctDbtTx>
<MndtRltdInf>
<MndtId>1234</MndtId>
<DtOfSgntr>2017-01-20</DtOfSgntr>
</MndtRltdInf>
</DrctDbtTx>
<DbtrAgt>
<FinInstnId>
<BIC>BANKNL2A</BIC>
</FinInstnId>
</DbtrAgt>
<Dbtr>
<Nm>Test du Test</Nm>
</Dbtr>
<DbtrAcct>
<Id>
<IBAN>NL50BANK1234567890</IBAN>
</Id>
</DbtrAcct>
<RmtInf>
<Ustrd>Test transaction2</Ustrd>
</RmtInf>
</DrctDbtTxInf>
</PmtInf>
</CstmrDrctDbtInitn>
</Document>
"""
def test_two_debits(sdd):
payment1 = {
"name": "Test von Testenstein",
"IBAN": "NL50BANK1234567890",
"BIC": "BANKNL2A",
"amount": 1012,
"type": "FRST",
"collection_date": datetime.date.today(),
"mandate_id": "1234",
"mandate_date": datetime.date.today(),
"description": "Test transaction1"
}
payment2 = {
"name": "Test du Test",
"IBAN": "NL50BANK1234567890",
"BIC": "BANKNL2A",
"amount": 5000,
"type": "RCUR",
"collection_date": datetime.date.today(),
"mandate_id": "1234",
"mandate_date": datetime.date.today(),
"description": "Test transaction2"
}
sdd.add_payment(payment1)
sdd.add_payment(payment2)
xmlout = sdd.export()
xmlpretty = validate_xml(xmlout, "pain.008.003.02")
assert clean_ids(xmlpretty.strip()) == clean_ids(SAMPLE_RESULT.strip())
| mit | 500,254,502,575,483,400 | 23.911628 | 119 | 0.50224 | false |
piotr-rusin/url-shortener | test/unit/test_views.py | 1 | 9267 | # -*- coding: utf-8 -*-
# pylint: disable=C0103
"""Tests for view classes and functions."""
import unittest
from unittest.mock import Mock, patch, MagicMock
from nose_parameterized import parameterized
from werkzeug.exceptions import HTTPException
from url_shortener.views import shorten_url, ShowURL
class BaseViewTest(object):
"""A class providing mocks used by all tested view functions."""
def setUp(self):
self.render_template_patcher = patch(
'url_shortener.views.render_template'
)
self.render_template_mock = self.render_template_patcher.start()
self.redirect_patcher = patch('url_shortener.views.redirect')
self.redirect_mock = self.redirect_patcher.start()
self.target_url_class_mock = Mock()
def tearDown(self):
self.render_template_patcher.stop()
self.redirect_patcher.stop()
class ShortenURLTest(BaseViewTest, unittest.TestCase):
"""Tests for shorten_url function."""
def setUp(self):
self.form_class_mock = Mock()
self.form_mock = self.form_class_mock()
self.form_mock.errors.values = MagicMock()
self.commit_changes_mock = Mock()
self.markup_patcher = patch('url_shortener.views.Markup')
self.markup_mock = self.markup_patcher.start()
self.url_for_patcher = patch('url_shortener.views.url_for')
self.url_for_mock = self.url_for_patcher.start()
self.flash_patcher = patch('url_shortener.views.flash')
self.flash_mock = self.flash_patcher.start()
super(ShortenURLTest, self).setUp()
def tearDown(self):
self.markup_patcher.stop()
self.url_for_patcher.stop()
self.flash_patcher.stop()
super(ShortenURLTest, self).tearDown()
def _call(self):
"""Call tested function with all arguments."""
return shorten_url(
self.target_url_class_mock,
self.form_class_mock,
self.commit_changes_mock
)
def test_gets_or_creates_a_target_url(self):
"""Test if get_or_create method of target URL class is called."""
self._call()
self.target_url_class_mock.get_or_create.assert_called_once_with(
self.form_mock.url.data
)
def test_registers_new_short_url(self):
"""Test if commit_changes function is called."""
self._call()
self.assertTrue(self.commit_changes_mock.called)
def test_redirects_to_the_same_route(self):
"""Test if a user is redirected to form page."""
self._call()
self.url_for_mock.assert_called_once_with('url_shortener.shorten_url')
redirect_url = self.url_for_mock.return_value
self.redirect_mock.assert_called_once_with(redirect_url)
def test_returns_redirect_response(self):
"""Test if a redirection result is returned."""
expected = self.redirect_mock.return_value
actual = self._call()
self.assertEqual(expected, actual)
def test_prepares_success_message(self):
"""Test if a message with specified elements is prepared."""
url_mock = self.target_url_class_mock.get_or_create.return_value
self._call()
assert_called = (
self.markup_mock.return_value.format.assert_any_call
)
assert_called('Original URL', url_mock, ' class=truncated')
assert_called('Short URL', url_mock.short_url, '')
assert_called('Preview available at', url_mock.preview_url, '')
def test_flashes_success_message(self):
"""Test if all elements of the success message are flashed."""
message_mock = self.markup_mock.return_value.format.return_value
self._call()
self.flash_mock.assert_called_with(message_mock)
self.assertEqual(3, self.flash_mock.call_count)
def test_renders_form_template(self):
"""Test if render_template is called for a GET request."""
self.form_mock.validate_on_submit.return_value = False
self._call()
self.render_template_mock.assert_called_once_with(
'shorten_url.html',
form=self.form_mock
)
def test_returns_rendered_template(self):
"""Test if rendered template is returned for a GET request."""
self.form_mock.validate_on_submit.return_value = False
expected = self.render_template_mock.return_value
actual = self._call()
self.assertEqual(expected, actual)
class TestShowURL(BaseViewTest, unittest.TestCase):
"""Tests for ShowURL class view.
:cvar PREVIEW_NOT_PREVIEW_SETUP: parameters for tests differing only
with the value of 'preview' constructor argument
:cvar WHEN_PREVIEW_SETUP: parameters for tests differing in
combinations of conditions expected to lead to rendering and
returning of a preview template
:ivar validator_mock: mock for a BlacklistValidator instance to be
used by the view instance
:ivar get_msg_if_blacklisted_mock: a mock for get_msg_if_blacklisted
method of blacklist validator.
"""
PREVIEW_NOT_PREVIEW_SETUP = [
('preview', True),
('redirect', False)
]
WHEN_PREVIEW_SETUP = [
('always', True, ''),
('always_and_with_spam_message', True, 'This is spam'),
('with_spam_message', False, 'This is spam.')
]
def setUp(self):
bval = Mock()
self.validator_mock = bval
self.get_msg_if_blacklisted_mock = bval.get_msg_if_blacklisted
self.get_msg_if_blacklisted_mock.return_value = ''
super(TestShowURL, self).setUp()
self.get_or_404_mock = self.target_url_class_mock.query.get_or_404
def create_view_and_call_dispatch_request(self, preview, alias='abc'):
"""Prepare view instance and call dispatch request method.
:param preview: a preview parameter of ShowURL constructor
:param alias: an alias parameter to be passed to the method
"""
obj = ShowURL(
preview,
self.target_url_class_mock,
self.validator_mock
)
return obj.dispatch_request(alias)
@parameterized.expand(PREVIEW_NOT_PREVIEW_SETUP)
def test_dispatch_request_queries_for_target_url_to(self, _, preview):
"""Test if the method queries for target URL with the alias.
:param preview: a preview parameter for ShowURL constructor
"""
alias = 'xyz'
self.create_view_and_call_dispatch_request(preview, alias)
self.get_or_404_mock.assert_called_once_with(alias)
@parameterized.expand(PREVIEW_NOT_PREVIEW_SETUP)
def test_dispatch_request_raises_http_error_for(self, _, preview):
"""Test for a HTTPError occurence.
:param preview: a preview parameter for ShowURL constructor
"""
self.get_or_404_mock.side_effect = HTTPException
with self.assertRaises(HTTPException):
self.create_view_and_call_dispatch_request(preview)
@parameterized.expand(PREVIEW_NOT_PREVIEW_SETUP)
def test_dispatch_request_validates_url(self, _, preview):
"""Test if the URL is validated.
:param preview: a preview parameter for ShowURL constructor
"""
self.create_view_and_call_dispatch_request(preview)
target_url = self.get_or_404_mock()
self.get_msg_if_blacklisted_mock.assert_called_once_with(
str(target_url)
)
@parameterized.expand(WHEN_PREVIEW_SETUP)
def test_dispatch_request_renders_preview(self, _, preview, spam_msg):
"""Test if the method calls render_preview.
:param preview: a preview parameter for ShowURL constructor
:param spam_msg: a message to be provided by the validator
"""
self.get_msg_if_blacklisted_mock.return_value = spam_msg
self.create_view_and_call_dispatch_request(preview)
self.render_template_mock.assert_called_once_with(
'preview.html',
target_url=self.get_or_404_mock(),
warning=spam_msg
)
@parameterized.expand(WHEN_PREVIEW_SETUP)
def test_dispatch_request_shows_preview(self, _, preview, spam_msg):
"""Test if the method returns preview.
:param preview: a preview parameter for ShowURL constructor
:param spam_msg: a message to be provided by the validator
"""
self.get_msg_if_blacklisted_mock.return_value = spam_msg
expected = self.render_template_mock()
actual = self.create_view_and_call_dispatch_request(preview)
self.assertEqual(expected, actual)
def test_dispatch_request_redirects(self):
"""Test if redirect function is called."""
self.create_view_and_call_dispatch_request(False)
self.redirect_mock.assert_called_once_with(self.get_or_404_mock())
def test_dispatch_request_returns_redirect(self):
"""Test if the method returns result of redirection."""
self.get_msg_if_blacklisted_mock.return_value = None
expected = self.redirect_mock()
actual = self.create_view_and_call_dispatch_request(False)
self.assertEqual(expected, actual)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit | 6,918,351,025,694,210,000 | 33.578358 | 78 | 0.64843 | false |
SUSE/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/compute/v2016_03_30/models/hardware_profile.py | 2 | 2068 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class HardwareProfile(Model):
"""Describes a hardware profile.
:param vm_size: The virtual machine size name. Possible values include:
'Basic_A0', 'Basic_A1', 'Basic_A2', 'Basic_A3', 'Basic_A4', 'Standard_A0',
'Standard_A1', 'Standard_A2', 'Standard_A3', 'Standard_A4', 'Standard_A5',
'Standard_A6', 'Standard_A7', 'Standard_A8', 'Standard_A9',
'Standard_A10', 'Standard_A11', 'Standard_D1', 'Standard_D2',
'Standard_D3', 'Standard_D4', 'Standard_D11', 'Standard_D12',
'Standard_D13', 'Standard_D14', 'Standard_D1_v2', 'Standard_D2_v2',
'Standard_D3_v2', 'Standard_D4_v2', 'Standard_D5_v2', 'Standard_D11_v2',
'Standard_D12_v2', 'Standard_D13_v2', 'Standard_D14_v2',
'Standard_D15_v2', 'Standard_DS1', 'Standard_DS2', 'Standard_DS3',
'Standard_DS4', 'Standard_DS11', 'Standard_DS12', 'Standard_DS13',
'Standard_DS14', 'Standard_DS1_v2', 'Standard_DS2_v2', 'Standard_DS3_v2',
'Standard_DS4_v2', 'Standard_DS5_v2', 'Standard_DS11_v2',
'Standard_DS12_v2', 'Standard_DS13_v2', 'Standard_DS14_v2',
'Standard_DS15_v2', 'Standard_G1', 'Standard_G2', 'Standard_G3',
'Standard_G4', 'Standard_G5', 'Standard_GS1', 'Standard_GS2',
'Standard_GS3', 'Standard_GS4', 'Standard_GS5'
:type vm_size: str or :class:`VirtualMachineSizeTypes
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualMachineSizeTypes>`
"""
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
}
def __init__(self, vm_size=None):
self.vm_size = vm_size
| mit | 1,034,907,025,080,892,800 | 46 | 79 | 0.609284 | false |
danielballan/scikit-xray | skbeam/core/speckle.py | 7 | 12322 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Developed at the NSLS-II, Brookhaven National Laboratory #
# Developed by Sameera K. Abeykoon and Yugang Zhang, June 2015 #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
X-ray speckle visibility spectroscopy(XSVS) - Dynamic information of
the speckle patterns are obtained by analyzing the speckle statistics
and calculating the speckle contrast in single scattering patterns.
This module will provide XSVS analysis tools
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import time
from . import roi
from .utils import bin_edges_to_centers, geometric_series
import logging
logger = logging.getLogger(__name__)
def xsvs(image_sets, label_array, number_of_img, timebin_num=2,
max_cts=None):
"""
This function will provide the probability density of detecting photons
for different integration times.
The experimental probability density P(K) of detecting photons K is
obtained by histogramming the speckle counts over an ensemble of
equivalent pixels and over a number of speckle patterns recorded
with the same integration time T under the same condition.
Bad images need to be represented as an array filled with np.nan.
Using bad_to_nan function in mask.py the bad images can be converted
into np.nan arrays.
Parameters
----------
image_sets : array
sets of images
label_array : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
number_of_img : int
number of images (how far to go with integration times when finding
the time_bin, using skbeam.utils.geometric function)
timebin_num : int, optional
integration time; default is 2
max_cts : int, optional
the brightest pixel in any ROI in any image in the image set.
defaults to using skbeam.core.roi.roi_max_counts to determine
the brightest pixel in any of the ROIs
Returns
-------
prob_k_all : array
probability density of detecting photons
prob_k_std_dev : array
standard deviation of probability density of detecting photons
Notes
-----
These implementation is based on following references
References: text [1]_, text [2]_
.. [1] L. Li, P. Kwasniewski, D. Oris, L Wiegart, L. Cristofolini,
C. Carona and A. Fluerasu , "Photon statistics and speckle visibility
spectroscopy with partially coherent x-rays" J. Synchrotron Rad.,
vol 21, p 1288-1295, 2014.
.. [2] R. Bandyopadhyay, A. S. Gittings, S. S. Suh, P.K. Dixon and
D.J. Durian "Speckle-visibilty Spectroscopy: A tool to study
time-varying dynamics" Rev. Sci. Instrum. vol 76, p 093110, 2005.
There is an example in https://github.com/scikit-beam/scikit-beam-examples
It will demonstrate the use of these functions in this module for
experimental data.
"""
if max_cts is None:
max_cts = roi.roi_max_counts(image_sets, label_array)
# find the label's and pixel indices for ROI's
labels, indices = roi.extract_label_indices(label_array)
# number of ROI's
u_labels = list(np.unique(labels))
num_roi = len(u_labels)
# create integration times
time_bin = geometric_series(timebin_num, number_of_img)
# number of times in the time bin
num_times = len(time_bin)
# probability density of detecting photons
prob_k_all = np.zeros([num_times, num_roi], dtype=np.object)
# square of probability density of detecting photons
prob_k_pow_all = np.zeros_like(prob_k_all)
# standard deviation of probability density of detecting photons
prob_k_std_dev = np.zeros_like(prob_k_all)
# get the bin edges for each time bin for each ROI
bin_edges = np.zeros(prob_k_all.shape[0], dtype=prob_k_all.dtype)
for i in range(num_times):
bin_edges[i] = np.arange(max_cts*2**i)
start_time = time.time() # used to log the computation time (optionally)
for i, images in enumerate(image_sets):
# Ring buffer, a buffer with periodic boundary conditions.
# Images must be keep for up to maximum delay in buf.
buf = np.zeros([num_times, timebin_num],
dtype=np.object) # matrix of buffers
# to track processing each time level
track_level = np.zeros(num_times)
# to track bad images in each time level
track_bad = np.zeros(num_times)
# bad images, represented as an array filled with np.nan
# (using bad_to_nan function in mask.py all the bad
# images are converted into np.nan arrays)
# to increment buffer
cur = np.full(num_times, timebin_num)
# to track how many images processed in each level
img_per_level = np.zeros(num_times, dtype=np.int64)
prob_k = np.zeros_like(prob_k_all)
prob_k_pow = np.zeros_like(prob_k_all)
for n, img in enumerate(images):
cur[0] = (1 + cur[0]) % timebin_num
# read each frame
# Put the image into the ring buffer.
buf[0, cur[0] - 1] = (np.ravel(img))[indices]
_process(num_roi, 0, cur[0] - 1, buf, img_per_level, labels,
max_cts, bin_edges[0], prob_k, prob_k_pow, track_bad)
# check whether the number of levels is one, otherwise
# continue processing the next level
level = 1
while level < num_times:
if not track_level[level]:
track_level[level] = 1
else:
prev = 1 + (cur[level - 1] - 2) % timebin_num
cur[level] = 1 + cur[level] % timebin_num
buf[level, cur[level]-1] = (buf[level-1,
prev-1] +
buf[level-1,
cur[level - 1] - 1])
track_level[level] = 0
_process(num_roi, level, cur[level]-1, buf, img_per_level,
labels, max_cts, bin_edges[level], prob_k,
prob_k_pow, track_bad)
level += 1
prob_k_all += (prob_k - prob_k_all)/(i + 1)
prob_k_pow_all += (prob_k_pow - prob_k_pow_all)/(i + 1)
prob_k_std_dev = np.power((prob_k_pow_all -
np.power(prob_k_all, 2)), .5)
logger.info("Processing time for XSVS took %s seconds."
"", (time.time() - start_time))
return prob_k_all, prob_k_std_dev
def _process(num_roi, level, buf_no, buf, img_per_level, labels,
max_cts, bin_edges, prob_k, prob_k_pow, track_bad):
"""
Internal helper function. This modifies inputs in place.
This helper function calculate probability of detecting photons for
each integration time.
.. warning :: This function mutates the input values.
Parameters
----------
num_roi : int
number of ROI's
level : int
current time level(integration time)
buf_no : int
current buffer number
buf : array
image data array to use for XSVS
img_per_level : int
to track how many images processed in each level
labels : array
labels of the required region of interests(ROI's)
max_cts: int
maximum pixel count
bin_edges : array
bin edges for each integration times and each ROI
prob_k : array
probability density of detecting photons
prob_k_pow : array
squares of probability density of detecting photons
track_bad : array
to track bad images in each level
"""
img_per_level[level] += 1
u_labels = list(np.unique(labels))
# Check if there are any bad images, represented as an array filled
# with np.nan (using bad_to_nan function in mask.py all the bad
# images are converted into np.nan arrays)
if np.isnan(buf[level, buf_no]).any():
track_bad[level] += 1
return
for j, label in enumerate(u_labels):
roi_data = buf[level, buf_no][labels == label]
spe_hist, bin_edges = np.histogram(roi_data, bins=bin_edges,
density=True)
spe_hist = np.nan_to_num(spe_hist)
prob_k[level, j] += ((spe_hist - prob_k[level, j]) /
(img_per_level[level] - track_bad[level]))
prob_k_pow[level, j] += ((np.power(spe_hist, 2) -
prob_k_pow[level, j]) /
(img_per_level[level] -
track_bad[level]))
def normalize_bin_edges(num_times, num_rois, mean_roi, max_cts):
"""
This will provide the normalized bin edges and bin centers for each
integration time.
Parameters
----------
num_times : int
number of integration times for XSVS
num_rois : int
number of ROI's
mean_roi : array
mean intensity of each ROI
shape (number of ROI's)
max_cts : int
maximum pixel counts
Returns
-------
norm_bin_edges : array
normalized speckle count bin edges
shape (num_times, num_rois)
norm_bin_centers :array
normalized speckle count bin centers
shape (num_times, num_rois)
"""
norm_bin_edges = np.zeros((num_times, num_rois), dtype=object)
norm_bin_centers = np.zeros_like(norm_bin_edges)
for i in range(num_times):
for j in range(num_rois):
norm_bin_edges[i, j] = np.arange(max_cts*2**i)/(mean_roi[j]*2**i)
norm_bin_centers[i, j] = bin_edges_to_centers(norm_bin_edges[i, j])
return norm_bin_edges, norm_bin_centers
| bsd-3-clause | 4,274,667,901,027,297,000 | 39.4 | 79 | 0.581967 | false |
harymitchell/mscs-ml | MLWorker/worker.py | 1 | 9607 | import os
import pprint
import pandas
import numpy as np
from pymongo import MongoClient
import gridfs
from bson import ObjectId
import Queue
from keras_evaluator import KerasEvaluator
from keras.models import load_model
from sklearn.externals import joblib
from evaluation_service import evaluation_service
from model_service import model_service
from dataset_service import dataset_service
from settings import MONGO_HOST, MONGO_PORT, MONGO_USERNAME, MONGO_PASSWORD, MONGO_DBNAME, WORKER_ID, SLEEP_TIME, DEPLOY_DIRECTORY
import time, sys, os, traceback
from sklearn.pipeline import Pipeline
class Worker (object):
"""Object which processes OPEN evaluations from the DB and writes back results"""
def __init__(self, mongo_uri=None, db=None, worker_id=None, client=None):
self.serviceURL = os.environ.get('SERVICE_URL', None)
self.worker_id = worker_id
self.mongo_uri = mongo_uri
if client:
self.client = client
else:
self.client = MongoClient(mongo_uri)
self.evaluation_service = evaluation_service(client=self.client, db=db, worker_id=worker_id)
self.model_service = model_service(db=db, client=self.evaluation_service.client)
self.dataset_service = dataset_service(db=db, client=self.evaluation_service.client)
# gridFS setup
self.db = self.client[db]
self.fs = gridfs.GridFS(self.db)
def run(self, in_q=None, out_q=None):
"""Run application"""
print ("starting worker node")
while True:
if in_q:
try:
got = in_q.get(block=False)
if got and len(got) == 3:
modelID, input_data, input_columns = got
prediction = self.predictFromModel(modelID, input_data, input_columns)
out_q.put({'prediction': prediction})
except Queue.Empty:
pass
except Exception as e:
traceback.print_exc()
out_q.put({'error': e})
self.run_once()
time.sleep(SLEEP_TIME)
def run_once(self):
"""Attempt to retrieve a single open evaluation"""
self.evaluation = self.evaluation_service.retrieveOpenEvaluation()
if self.evaluation:
self.process_current_evaluation()
def process_current_evaluation(self):
"""Process the current evaluation"""
try:
print ("Processing evaluation: {}".format(self.evaluation['_id']))
self.model = self.evaluation['model_ref']
self.dataset = self.dataset_service.getDatasetByID(self.model['dataset'])
self.keras_evaluator = KerasEvaluator(self.dataset, self.model, self.evaluation)#, gridfs=self.fs, model_service=self.model_service)
evaluated_model = self.keras_evaluator.build_and_evaluate_new_model()
print 'model evaluated'
self.saveModel(evaluated_model)
if len(self.keras_evaluator.errors) > 0:
self.handle_errored_evaluation(self.keras_evaluator.errors)
else:
self.handle_successful_evaluation()
except Exception as e:
type_, value_, traceback_ = sys.exc_info()
ex = traceback.format_exception(type_, value_, traceback_)
print (ex)
self.handle_errored_evaluation(ex)
def saveModel(self, evaluated_model):
"""write back the h5 file to the DB"""
print 'saving model'
if not os.path.exists(DEPLOY_DIRECTORY):
print 'creating deploy directory'
os.makedirs(DEPLOY_DIRECTORY)
model_file_name = str(self.model.get('_id'))+'.h5'
model_full_path = os.path.join(DEPLOY_DIRECTORY, model_file_name)
print 'saving to file '+ model_full_path
evaluated_model.save(model_full_path)
try:
# save weights to gridfs
f = open(model_full_path, 'r')
fileId = self.fs.put(f)
print self.model
print self.model['_id']
print {'$set': {'serviceURL': self.serviceURL, 'pathToHDF5': model_full_path, 'deployID': fileId}}
res = self.model_service.updateModel(self.model, {'$set': {'serviceURL': self.serviceURL, 'pathToHDF5': model_full_path, 'deployID': fileId}})
print 'model updated'
print res.raw_result
except Exception as e:
print 'error saving file'
print e
finally:
f.close()
def savePipeline(self, pipeline):
# Save the Keras model first:
pipeline.named_steps['keras_model'].model.save('deploys/keras_model.h5')
# This hack allows us to save the sklearn pipeline:
pipeline.named_steps['keras_model'].model = None
# Finally, save the pipeline:
joblib.dump(pipeline, 'deploys/sklearn_pipeline.pkl')
def saveWeightsJson(self, evaluated_model):
###
## write back the h5 file and json separately
###
if not os.path.exists(DEPLOY_DIRECTORY):
os.makedirs(DEPLOY_DIRECTORY)
model_file_name = str(self.model.get('_id'))+'.h5'
model_full_path = os.path.join(DEPLOY_DIRECTORY, model_file_name)
json_file_name = str(self.model.get('_id'))+'.json'
json_full_path = os.path.join(DEPLOY_DIRECTORY, json_file_name)
# evaluated_model.save(model_full_path)
# save architecture
model_json = evaluated_model.to_json()
with open(json_full_path, "w") as json_file:
json_file.write(model_json)
# save weights
evaluated_model.save_weights(model_full_path)
try:
# save weights to gridfs
f = open(model_full_path, 'r')
fileId = self.fs.put(f)
# save architecture to gridfs
f_json = open(json_full_path, 'r')
fileId_json = self.fs.put(f_json)
self.model_service.updateModel(self.model, {'$set': {'serviceURL': self.serviceURL, 'pathToHDF5': model_full_path, 'deployID': fileId, 'jsonFileID': fileId_json}})
finally:
f.close()
def handle_successful_evaluation(self):
"""Handles successful evaluation by writing to DB with DONE status and
metrics"""
self.evaluation_service.updateEvaluation(self.evaluation, {
'$set': {
'status': 'DONE',
'metrics_names': self.keras_evaluator.model.metrics_names,
'scores': self.keras_evaluator.scores,
'model_ref': self.model
}
})
def handle_errored_evaluation(self, errors):
"""Handles failure in processing
write evaluation to DB with FAILED status and errors"""
self.evaluation_service.updateEvaluation(self.evaluation, {
'$set': {
'status': 'FAILED',
'errors': errors
}
})
def predictFromModel(self, modelID, input_data, input_columns):
"""Return a prediction for modelID"""
print modelID, input_data, input_columns
# setup input data
if not isinstance(input_data, list):
input_data = [input_data]
df = pandas.DataFrame(input_data)[input_columns]
X = df.as_matrix().astype(np.float)
if not os.path.exists(DEPLOY_DIRECTORY):
os.makedirs(DEPLOY_DIRECTORY)
model_file_name = str(modelID)+'.h5'
model_full_path = os.path.join(DEPLOY_DIRECTORY, model_file_name)
# json_file_name = str(modelID)+'.json'
# json_full_path = os.path.join(DEPLOY_DIRECTORY, json_file_name)
if not os.path.isfile(model_full_path):
print 'loading model from gridfs'
model_ref = self.model_service.getModelByID(ObjectId(modelID))
# load and save weights
grid_out = self.fs.get(model_ref.get('deployID'))
f = open(model_full_path, 'w')
f.write(grid_out.read())
f.close()
# load and save json
# grid_out = self.fs.get(model_ref.get('jsonFileID'))
# f = open(json_full_path, 'w')
# f.write(grid_out.read())
f.close()
else:
print 'loading model from file'
# load json and create model
# json_file = open(json_full_path, 'r')
# loaded_model_json = json_file.read()
# json_file.close()
# model = model_from_json(loaded_model_json)
# # load weights into new model
# model.load_weights(model_full_path)
model = load_model(model_full_path)
# model._make_predict_function()
predictions = model.predict(X)
return predictions
if __name__ == '__main__':
mongo_uri = os.environ.get('MONGOLAB_URI', "mongodb://{username}:{password}@{host}:{port}/{database}".format(
username=MONGO_USERNAME, password=MONGO_PASSWORD, host=MONGO_HOST, port=MONGO_PORT, database=MONGO_DBNAME))
print ("starting against "+mongo_uri)
worker = Worker(mongo_uri=mongo_uri, db=os.environ.get('MONGO_DBNAME', MONGO_DBNAME), worker_id=WORKER_ID)
worker.run_once() | mit | -6,986,750,958,680,871,000 | 41.089686 | 175 | 0.577183 | false |
rgardler/acs-cli | tests/commands/test_demo.py | 2 | 1547 | """Tests for `acs demo` subcommand."""
import pytest
import urllib.request
class TestDemo():
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run"
)
def test_lbweb(self, demo, service):
"""Tests the creation of the lbweb demo. This version of the test will fail if the test cluster dows not already exist.
"""
assert(service.exists())
demo.args = {'<command>': 'lbweb',
"--remove": False}
try:
result = demo.lbweb()
assert("Application deployed" in result)
assert(self.isSimpleWebUp(service))
except RuntimeWarning as e:
demo.logger.warning("The application was already installed so the test was not as thorough as it could have been")
# remove the appliction
demo.args["--remove"] = True
result = demo.lbweb()
assert("Application removed" in result)
def isSimpleWebUp(self, service):
isConnected = False
attempts = 0
while not isConnected and attempts < 50:
req = urllib.request.Request("http://" + service.getAgentEndpoint())
try:
with urllib.request.urlopen(req) as response:
html = response.read()
if "Real Visit Results" in html:
isConnected = True
except urllib.error.URLError as e:
isConnected = False
attempts = attempts + 1
time.sleep(0.1)
| apache-2.0 | -8,817,229,131,061,007,000 | 32.630435 | 127 | 0.581125 | false |
prometheanfire/openstack-guest-agents-unix | install_modules.py | 4 | 5003 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import shutil
import re
import sys
import zipfile
import commands.command_list
# Other modules here that get lazy loaded.. :-/
import bz2
import gzip
import httplib
import zlib
# Make sure we get at least one of these
try:
import anyjson
except Exception:
pass
try:
import json
except Exception:
pass
try:
import simplejson
except Exception:
pass
def install_modules(system_paths, installdir):
c = commands.init(testmode=True)
to_install = set()
def copy_tree(srcdir, destdir):
if not os.path.exists(destdir):
os.mkdir(destdir)
for root, dirs, files in os.walk(srcdir):
for d in dirs:
if not os.path.exists(os.path.join(destdir, d)):
os.mkdir(os.path.join(destdir, d))
d = destdir + root[len(srcdir):]
if not os.path.exists(d):
os.mkdir(d)
for f in files:
# Only install .pyc or .sos, etc
if not f.endswith('.py'):
fname = os.path.join(d, f)
shutil.copy2(os.path.join(root, f), fname)
def _do_install(src, destdir, subdirs_only=False):
print "Installing %s" % src
if os.path.isdir(src):
if not subdirs_only:
subdir = src.rsplit('/', 1)[1]
copy_tree(src, os.path.join(destdir, subdir))
return
for d in os.listdir(src):
if d == "EGG-INFO":
continue
path = os.path.join(src, d)
if os.path.isdir(path):
copy_tree(path, os.path.join(destdir, d))
else:
shutil.copy2(path, destdir)
else:
shutil.copy2(src, destdir)
for modname in sys.modules:
if modname == "__main__":
continue
try:
mod_fn = sys.modules[modname].__file__
except:
continue
mod_fn = os.path.normpath(mod_fn)
base_dir = ''
for p in system_paths:
p_len = len(p)
if mod_fn.startswith(p) and p > len(base_dir):
base_dir = p
# Only install modules that are in the system paths. We install
# our command modules separately.
if base_dir:
# Turn /usr/lib/python2.6/Crypto/Cipher/AES into:
# /usr/lib/python2.6/Crypto
rest_dir = mod_fn[len(base_dir) + 1:]
if '/' in rest_dir:
rest_dir = rest_dir.split('/', 1)[0]
if base_dir.endswith('site-packages'):
idir = installdir + '/site-packages'
else:
idir = installdir
if re.match('.*\.egg', rest_dir):
full_srcdir = os.path.join(base_dir, rest_dir)
if os.path.isdir(full_srcdir):
_do_install(os.path.join(base_dir, rest_dir),
idir, True)
else:
z = zipfile.ZipFile(full_srcdir)
files = z.infolist()
for f in files:
if f.filename == "EGG-INFO" or \
f.filename.startswith("EGG-INFO/"):
continue
z.extract(f, idir)
z.close()
else:
_do_install(os.path.join(base_dir, rest_dir),
idir)
if __name__ == "__main__":
prog_name = sys.argv[0]
if len(sys.argv) != 2:
print "Usage: %s <install_dir>" % prog_name
sys.exit(1)
installdir = sys.argv[1]
sys_paths = sys.path
# Pop off the first directory, which is the directory of this script.
# We do this so we can ignore *our* modules, which are installed
# separately
sys_paths.pop(0)
if not os.path.exists(installdir):
os.makedirs(installdir)
if not os.path.exists(installdir + '/site-packages'):
os.mkdir(installdir + '/site-packages')
if not os.path.isdir(installdir + '/site-packages'):
print "Error: '%s/site-packages' exists and is not a directory" % \
installdir
sys.exit(1)
install_modules(sys_paths, installdir)
| apache-2.0 | -6,336,809,742,655,173,000 | 29.882716 | 79 | 0.538277 | false |
gabstopper/smc-python | smc-monitoring/smc_monitoring/monitors/connections.py | 1 | 5154 | """
A connection query returns all currently connected sessions on the
given target.
Create a query to obtain all connections for a given engine::
query = ConnectionQuery('sg_vm')
Add a timezone to the query::
query.format.timezone('CST')
Add a filter to only get connections if the source address is 172.18.1.252::
query.add_in_filter(FieldValue(LogField.SRC), [IPValue('172.18.1.252')])
Only connections that match a specific service::
query.add_in_filter(FieldValue(LogField.SERVICE), [ServiceValue('TCP/443', 'UDP/53')])
Execute query and return raw results::
for records in query.fetch_raw():
...
Execute query and return as an :class:`.Connection` element::
for records in query.fetch_as_element():
...
Retrieving live streaming results::
for records in query.fetch_live():
...
.. seealso:: :class:`smc_monitoring.models.filters` for more information on creating filters
"""
from smc_monitoring.models.query import Query
from smc_monitoring.models.constants import LogField
class ConnectionQuery(Query):
"""
Show all current connections on the specified target.
:ivar list field_ids: field IDs are the default fields for this entry type
and are constants found in :class:`smc_monitoring.models.constants.LogField`
:param str target: name of target engine/cluster
"""
location = '/monitoring/session/socket'
field_ids = [
LogField.TIMESTAMP,
LogField.NODEID,
LogField.SRC,
LogField.SPORT,
LogField.SRCZONE,
LogField.DST,
LogField.DPORT,
LogField.DSTZONE,
LogField.SERVICE,
LogField.IPSAPPID,
LogField.PROTOCOL,
LogField.STATE]
def __init__(self, target, **kw):
super(ConnectionQuery, self).__init__('CONNECTIONS', target, **kw)
def fetch_as_element(self, **kw):
"""
Fetch the results and return as a Connection element. The original
query is not modified.
:return: generator of elements
:rtype: :class:`.Connection`
"""
clone = self.copy()
clone.format.field_format('id')
for custom_field in ['field_ids', 'field_names']:
clone.format.data.pop(custom_field, None)
for list_of_results in clone.fetch_raw(**kw):
for entry in list_of_results:
yield Connection(**entry)
class Connection(object):
"""
Connection represents a state table entry. This is the result of
making a :class:`~ConnectionQuery` and using
:meth:`~ConnectionQuery.fetch_as_element`.
"""
def __init__(self, **data):
self.cxn = data
@property
def timestamp(self):
"""
Timestamp of this connection. It is recommended to set the timezone
on the query to view this timestamp in the systems local time.
For example::
query.format.timezone('CST')
:return: timestamp in string format
:rtype: str
"""
return self.cxn.get(str(LogField.TIMESTAMP))
@property
def engine(self):
"""
The engine/cluster for this state table entry
:return: engine or cluster for this entry
:rtype: str
"""
return self.cxn.get(str(LogField.NODEID))
@property
def source_addr(self):
"""
Source address for this entry
:rtype: str
"""
return self.cxn.get(str(LogField.SRC))
@property
def dest_addr(self):
"""
Destination address for this entry
:rtype: str
"""
return self.cxn.get(str(LogField.DST))
@property
def service(self):
"""
Service for this entry
:return: service (HTTP/HTTPS, etc)
:rtype: str
"""
return self.cxn.get(str(LogField.SERVICE))
@property
def protocol(self):
"""
Protocol for this entry
:return: protocol (UDP/TCP/ICMP, etc)
:rtype: str
"""
return self.cxn.get(str(LogField.PROTOCOL), 'ANY')
@property
def source_port(self):
"""
Source port for the entry.
:rtype: int
"""
return int(self.cxn.get(str(LogField.SPORT), 0))
@property
def dest_port(self):
"""
Destination port for the entry.
:rtype: int
"""
return int(self.cxn.get(str(LogField.DPORT),0))
@property
def state(self):
"""
State of the connection.
:return: state, i.e. UDP established, TCP established, etc.
:rtype: str
"""
return self.cxn.get(str(LogField.STATE))
def __str__(self):
return '{}(src={},dst={},proto={},dst_port={},state={})'.format(
self.__class__.__name__,
self.source_addr, self.dest_addr, self.protocol,
self.dest_port, self.state)
def __repr__(self):
return str(self) | apache-2.0 | -1,224,112,347,497,343,500 | 25.435897 | 92 | 0.574699 | false |
sposs/DIRAC | Core/scripts/dirac-stop-component.py | 10 | 1357 | #!/usr/bin/env python
# $HeadURL: svn+ssh://svn.cern.ch/reps/dirac/DIRAC/trunk/DIRAC/Core/scripts/dirac-install.py $
"""
Do the initial installation and configuration of the DIRAC MySQL server
"""
__RCSID__ = "$Id: dirac-install.py 26844 2010-07-16 08:44:22Z rgracian $"
#
from DIRAC.Core.Base import Script
Script.disableCS()
Script.setUsageMessage( '\n'.join( ['Stop DIRAC component using runsvctrl utility',
'Usage:',
' %s [option|cfgfile] ... [system [service|agent]]' % Script.scriptName,
'Arguments:',
' system: Name of the system for the component (default *: all)',
' service|agent: Name of the particular component (default *: all)' ] ) )
Script.parseCommandLine()
args = Script.getPositionalArgs()
if len( args ) > 2:
Script.showHelp()
exit( -1 )
system = '*'
component = '*'
if len( args ) > 0:
system = args[0]
if system != '*':
if len( args ) > 1:
component = args[1]
#
from DIRAC.Core.Utilities import InstallTools
#
InstallTools.exitOnError = True
#
result = InstallTools.runsvctrlComponent( system, component, 'd' )
if not result['OK']:
print 'ERROR:', result['Message']
exit( -1 )
InstallTools.printStartupStatus( result['Value'] )
| gpl-3.0 | 5,934,246,798,086,425,000 | 33.794872 | 110 | 0.596905 | false |
spring01/libPSI | lib/python/qmmm.py | 1 | 5432 | #
#@BEGIN LICENSE
#
# PSI4: an ab initio quantum chemistry software package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#@END LICENSE
#
"""Module with classes to integrate MM charges into
a QM calculation.
"""
import psi4
import re
import os
import math
import p4const
from molutil import *
from driver import *
class Diffuse(object):
def __init__(self, molecule, basisname, ribasisname):
self.molecule = molecule
self.basisname = basisname
self.ribasisname = ribasisname
self.basis = None
self.ribasis = None
self.da = None
self.Da = None
self.wfn = None
def __str__(self):
s = ' => Diffuse <=\n\n'
s = s + ' ' + str(self.molecule) + '\n'
s = s + ' ' + self.basisname + '\n'
s = s + ' ' + self.ribasisname + '\n'
s = s + '\n'
return s
def fitScf(self):
"""Function to run scf and fit a system of diffuse charges to
resulting density.
"""
basisChanged = psi4.has_option_changed("BASIS")
ribasisChanged = psi4.has_option_changed("DF_BASIS_SCF")
scftypeChanged = psi4.has_option_changed("SCF_TYPE")
basis = psi4.get_option("BASIS")
ribasis = psi4.get_option("DF_BASIS_SCF")
scftype = psi4.get_option("SCF_TYPE")
psi4.print_out(" => Diffuse SCF (Determines Da) <=\n\n")
activate(self.molecule)
psi4.set_global_option("BASIS", self.basisname)
psi4.set_global_option("DF_BASIS_SCF", self.ribasisname)
psi4.set_global_option("SCF_TYPE", "DF")
energy('scf')
psi4.print_out("\n")
self.fitGeneral()
psi4.clean()
psi4.set_global_option("BASIS", basis)
psi4.set_global_option("DF_BASIS_SCF", ribasis)
psi4.set_global_option("SCF_TYPE", scftype)
if not basisChanged:
psi4.revoke_option_changed("BASIS")
if not ribasisChanged:
psi4.revoke_option_changed("DF_BASIS_SCF")
if not scftypeChanged:
psi4.revoke_option_changed("SCF_TYPE")
def fitGeneral(self):
"""Function to perform a general fit of diffuse charges
to wavefunction density.
"""
psi4.print_out(" => Diffuse Charge Fitting (Determines da) <=\n\n")
self.wfn = psi4.wavefunction()
self.Da = self.wfn.Da()
self.basis = self.wfn.basisset()
parser = psi4.Gaussian94BasisSetParser()
self.ribasis = psi4.BasisSet.construct(parser, self.molecule, "DF_BASIS_SCF")
fitter = psi4.DFChargeFitter()
fitter.setPrimary(self.basis)
fitter.setAuxiliary(self.ribasis)
fitter.setD(self.Da)
self.da = fitter.fit()
self.da.scale(2.0)
def populateExtern(self, extern):
# Electronic Part
extern.addBasis(self.ribasis, self.da)
# Nuclear Part
for A in range(0, self.molecule.natom()):
extern.addCharge(self.molecule.Z(A), self.molecule.x(A), self.molecule.y(A), self.molecule.z(A))
class QMMM(object):
def __init__(self):
self.charges = []
self.diffuses = []
self.extern = psi4.ExternalPotential()
def addDiffuse(self, diffuse):
"""Function to add a diffuse charge field *diffuse*."""
self.diffuses.append(diffuse)
def addChargeBohr(self, Q, x, y, z):
"""Function to add a point charge of magnitude *Q* at
position (*x*, *y*, *z*) Bohr.
"""
self.charges.append([Q, x, y, z])
def addChargeAngstrom(self, Q, x, y, z):
"""Function to add a point charge of magnitude *Q* at
position (*x*, *y*, *z*) Angstroms.
"""
self.charges.append([Q, x / p4const.psi_bohr2angstroms, y / p4const.psi_bohr2angstroms, z / p4const.psi_bohr2angstroms])
def __str__(self):
s = ' ==> QMMM <==\n\n'
s = s + ' => Charges (a.u.) <=\n\n'
s = s + ' %11s %11s %11s %11s\n' % ('Z', 'x', 'y', 'z')
for k in range(0, len(self.charges)):
s = s + ' %11.7f %11.3E %11.3E %11.3E\n' % (self.charges[k][0], self.charges[k][1], self.charges[k][2], self.charges[k][3])
s = s + '\n'
s = s + ' => Diffuses <=\n\n'
for k in range(0, len(self.diffuses)):
s = s + str(self.diffuses[k])
return s
def populateExtern(self):
"""Function to define a charge field external to the
molecule through point and diffuse charges.
"""
# Charges
for charge in self.charges:
self.extern.addCharge(charge[0], charge[1], charge[2], charge[3])
# Diffuses
for diffuse in self.diffuses:
diffuse.populateExtern(self.extern)
| gpl-2.0 | 3,787,478,945,669,114,000 | 30.04 | 138 | 0.594993 | false |
BNUCNL/FreeROI | doc/conf.py | 5 | 8241 | # -*- coding: utf-8 -*-
#
# FreeROI documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 6 09:54:19 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinxcontrib.googleanalytics']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FreeROI'
copyright = u'2012-2014, Neuroinformatic Team in LiuLab from Beijing Normal University'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
html_logo = '_static/logo_200.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FreeROIdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'FreeROI.tex', u'FreeROI Documentation',
u'FreeROI Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'freeroi', u'FreeROI Documentation',
[u'FreeROI Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FreeROI', u'FreeROI Documentation',
u'FreeROI Team', 'FreeROI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# google analytics extension config
googleanalytics_id = 'UA-51205611-1'
googleanalytics_enable = True
| bsd-3-clause | 8,521,173,106,514,029,000 | 31.573123 | 160 | 0.70756 | false |
MoritzS/django | django/core/management/utils.py | 44 | 3490 | import os
from subprocess import PIPE, Popen
from django.apps import apps as installed_apps
from django.utils.crypto import get_random_string
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_text
from .base import CommandError
def popen_wrapper(args, os_err_exc_type=CommandError, stdout_encoding='utf-8'):
"""
Friendly wrapper around Popen.
Return stdout output, stderr output, and OS status code.
"""
try:
p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt')
except OSError as err:
raise os_err_exc_type('Error executing %s' % args[0]) from err
output, errors = p.communicate()
return (
force_text(output, stdout_encoding, strings_only=True, errors='strict'),
force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True, errors='replace'),
p.returncode
)
def handle_extensions(extensions):
"""
Organize multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
{'.html', '.js', '.py'}
>>> handle_extensions(['.html, txt,.tpl'])
{'.html', '.tpl', '.txt'}
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, str):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
def get_random_secret_key():
"""
Return a 50 character random string usable as a SECRET_KEY setting value.
"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def parse_apps_and_model_labels(labels):
"""
Parse a list of "app_label.ModelName" or "app_label" strings into actual
objects and return a two-element tuple:
(set of model classes, set of app_configs).
Raise a CommandError if some specified models or apps don't exist.
"""
apps = set()
models = set()
for label in labels:
if '.' in label:
try:
model = installed_apps.get_model(label)
except LookupError:
raise CommandError('Unknown model: %s' % label)
models.add(model)
else:
try:
app_config = installed_apps.get_app_config(label)
except LookupError as e:
raise CommandError(str(e))
apps.add(app_config)
return models, apps
| bsd-3-clause | -892,076,316,083,853,700 | 31.314815 | 89 | 0.605444 | false |
popazerty/beyonwiz-sh4 | lib/python/Components/Harddisk.py | 1 | 31986 | import os
import time
from Tools.CList import CList
from SystemInfo import SystemInfo
from Components.Console import Console
import Task
from boxbranding import getMachineName
def readFile(filename):
file = open(filename)
data = file.read().strip()
file.close()
return data
def getPartitionNames():
partitions = []
try:
f = open('/proc/partitions', 'r')
for line in f.readlines():
parts = line.strip().split()
if not parts:
continue
device = parts[3]
if device in partitions or not device[-1].isdigit():
continue
partitions.append(device)
except IOError, ex:
print "[Harddisk] Failed to open /proc/partitions", ex
return partitions
def getProcMounts():
try:
mounts = open("/proc/mounts", 'r')
result = []
tmp = [line.strip().split(' ') for line in mounts]
mounts.close()
for item in tmp:
# Spaces are encoded as \040 in mounts
item[1] = item[1].replace('\\040', ' ')
result.append(item)
return result
except IOError, ex:
print "[Harddisk] Failed to open /proc/mounts", ex
return []
def isFileSystemSupported(filesystem):
try:
file = open('/proc/filesystems', 'r')
for fs in file:
if fs.strip().endswith(filesystem):
file.close()
return True
file.close()
return False
except Exception, ex:
print "[Harddisk] Failed to read /proc/filesystems:", ex
def findMountPoint(path):
""" Example: findMountPoint("/media/hdd/some/file") returns "/media/hdd\" """
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
DEVTYPE_UDEV = 0
DEVTYPE_DEVFS = 1
class Harddisk:
def __init__(self, device, removable = False):
self.device = device
if os.access("/dev/.udev", 0):
self.type = DEVTYPE_UDEV
elif os.access("/dev/.devfsd", 0):
self.type = DEVTYPE_DEVFS
else:
print "[Harddisk] Unable to determine structure of /dev"
self.max_idle_time = 0
self.idle_running = False
self.last_access = time.time()
self.last_stat = 0
self.timer = None
self.is_sleeping = False
self.dev_path = ''
self.disk_path = ''
self.mount_path = None
self.mount_device = None
self.phys_path = os.path.realpath(self.sysfsPath('device'))
if self.type == DEVTYPE_UDEV:
self.dev_path = '/dev/' + self.device
self.disk_path = self.dev_path
elif self.type == DEVTYPE_DEVFS:
tmp = readFile(self.sysfsPath('dev')).split(':')
s_major = int(tmp[0])
s_minor = int(tmp[1])
for disc in os.listdir("/dev/discs"):
dev_path = os.path.realpath('/dev/discs/' + disc)
disk_path = dev_path + '/disc'
try:
rdev = os.stat(disk_path).st_rdev
except OSError:
continue
if s_major == os.major(rdev) and s_minor == os.minor(rdev):
self.dev_path = dev_path
self.disk_path = disk_path
break
print "[Harddisk] new Harddisk", self.device, '->', self.dev_path, '->', self.disk_path
if not removable:
self.startIdle()
def __lt__(self, ob):
return self.device < ob.device
def partitionPath(self, n):
if self.type == DEVTYPE_UDEV:
return self.dev_path + n
elif self.type == DEVTYPE_DEVFS:
return self.dev_path + '/part' + n
def sysfsPath(self, filename):
return os.path.join('/sys/block/', self.device, filename)
def stop(self):
if self.timer:
self.timer.stop()
self.timer.callback.remove(self.runIdle)
def bus(self):
ret = _("External")
# SD/MMC(F1 specific)
if self.type == DEVTYPE_UDEV:
card = "sdhci" in self.phys_path
type_name = " (SD/MMC)"
# CF(7025 specific)
elif self.type == DEVTYPE_DEVFS:
card = self.device[:2] == "hd" and "host0" not in self.dev_path
type_name = " (CF)"
internal = ("pci" or "ahci") in self.phys_path
if card:
ret += type_name
elif internal:
ret = _("Internal")
return ret
def diskSize(self):
cap = 0
try:
line = readFile(self.sysfsPath('size'))
cap = int(line)
except:
dev = self.findMount()
if dev:
stat = os.statvfs(dev)
cap = int(stat.f_blocks * stat.f_bsize)
return cap / 1000 / 1000
else:
return cap
return cap / 1000 * 512 / 1000
def capacity(self):
cap = self.diskSize()
if cap == 0:
return ""
if cap < 1000:
return "%03d MB" % cap
return "%d.%03d GB" % (cap/1000, cap%1000)
def model(self):
try:
if self.device[:2] == "hd":
return readFile('/proc/ide/' + self.device + '/model')
elif self.device[:2] == "sd":
vendor = readFile(self.phys_path + '/vendor')
model = readFile(self.phys_path + '/model')
return vendor + '(' + model + ')'
elif self.device.startswith('mmcblk0'):
return readFile(self.sysfsPath('device/name'))
else:
raise Exception, "no hdX or sdX or mmcX"
except Exception, e:
print "[Harddisk] Failed to get model:", e
return "-?-"
def free(self):
dev = self.findMount()
if dev:
if not os.path.exists(dev):
os.mkdir(dev)
stat = os.statvfs(dev)
return int((stat.f_bfree/1000) * (stat.f_bsize/1000))
return -1
def numPartitions(self):
numPart = -1
if self.type == DEVTYPE_UDEV:
try:
devdir = os.listdir('/dev')
except OSError:
return -1
for filename in devdir:
if filename.startswith(self.device):
numPart += 1
elif self.type == DEVTYPE_DEVFS:
try:
idedir = os.listdir(self.dev_path)
except OSError:
return -1
for filename in idedir:
if filename.startswith("disc"):
numPart += 1
if filename.startswith("part"):
numPart += 1
return numPart
def mountDevice(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
self.mount_device = parts[0]
self.mount_path = parts[1]
return parts[1]
def enumMountDevices(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
yield parts[1]
def findMount(self):
if self.mount_path is None:
return self.mountDevice()
return self.mount_path
def unmount(self):
dev = self.mountDevice()
if dev is None:
# not mounted, return OK
return 0
cmd = 'umount ' + dev
print "[Harddisk]", cmd
res = os.system(cmd)
return res >> 8
def createPartition(self):
cmd = 'printf "8,\n;0,0\n;0,0\n;0,0\ny\n" | sfdisk -f -uS ' + self.disk_path
res = os.system(cmd)
return res >> 8
def mkfs(self):
# No longer supported, use createInitializeJob instead
return 1
def mount(self):
# try mounting through fstab first
if self.mount_device is None:
dev = self.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.mount_device
try:
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
except IOError:
return -1
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if fspath == dev:
print "[Harddisk] mounting:", fspath
cmd = "mount -t auto " + fspath
res = os.system(cmd)
return res >> 8
# device is not in fstab
res = -1
if self.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
res = os.system('sfdisk -R ' + self.disk_path)
# give udev some time to make the mount, which it will do asynchronously
from time import sleep
sleep(3)
return res >> 8
def fsck(self):
# No longer supported, use createCheckJob instead
return 1
def killPartitionTable(self):
zero = 512 * '\0'
h = open(self.dev_path, 'wb')
# delete first 9 sectors, which will likely kill the first partition too
for i in range(9):
h.write(zero)
h.close()
def killPartition(self, n):
zero = 512 * '\0'
part = self.partitionPath(n)
h = open(part, 'wb')
for i in range(3):
h.write(zero)
h.close()
def createInitializeJob(self):
job = Task.Job(_("Initializing storage device..."))
size = self.diskSize()
print "[Harddisk] size: %s MB" % size
task = UnmountTask(job, self)
task = Task.PythonTask(job, _("Removing partition table"))
task.work = self.killPartitionTable
task.weighting = 1
task = Task.LoggingTask(job, _("Rereading partition table"))
task.weighting = 1
task.setTool('sfdisk')
task.args.append('-R')
task.args.append(self.disk_path)
task = Task.ConditionTask(job, _("Waiting for partition"), timeoutCount=20)
task.check = lambda: not os.path.exists(self.partitionPath("1"))
task.weighting = 1
if os.path.exists('/usr/sbin/parted'):
use_parted = True
else:
if size > 2097151:
addInstallTask(job, 'parted')
use_parted = True
else:
use_parted = False
task = Task.LoggingTask(job, _("Creating partition"))
task.weighting = 5
if use_parted:
task.setTool('parted')
if size < 1024:
# On very small devices, align to block only
alignment = 'min'
else:
# Prefer optimal alignment for performance
alignment = 'opt'
task.args += ['-a', alignment, '-s', self.disk_path, 'mklabel', 'gpt', 'mkpart', 'primary', '0%', '100%']
else:
task.setTool('sfdisk')
task.args.append('-f')
task.args.append('-uS')
task.args.append(self.disk_path)
if size > 128000:
# Start at sector 8 to better support 4k aligned disks
print "[Harddisk] Detected >128GB disk, using 4k alignment"
task.initial_input = "8,\n;0,0\n;0,0\n;0,0\ny\n"
else:
# Smaller disks (CF cards, sticks etc) don't need that
task.initial_input = "0,\n;\n;\n;\ny\n"
task = Task.ConditionTask(job, _("Waiting for partition"))
task.check = lambda: os.path.exists(self.partitionPath("1"))
task.weighting = 1
task = MkfsTask(job, _("Creating file system"))
big_o_options = ["dir_index", "filetype"]
if isFileSystemSupported("ext4"):
task.setTool("mkfs.ext4")
big_o_options +=["extent", "flex_bg", "uninit_bg"]
else:
task.setTool("mkfs.ext3")
if size > 250000:
# No more than 256k i-nodes (prevent problems with fsck memory requirements)
task.args += ["-T", "largefile", "-N", "262144"]
big_o_options.append("sparse_super")
elif size > 16384:
# between 16GB and 250GB: 1 i-node per megabyte
task.args += ["-T", "largefile"]
big_o_options.append("sparse_super")
elif size > 2048:
# Over 2GB: 32 i-nodes per megabyte
task.args += ["-T", "largefile", "-N", str(size * 32)]
task.args += ["-L", getMachineName(), "-m0", "-O", ",".join(big_o_options), self.partitionPath("1")]
task = MountTask(job, self)
task.weighting = 3
task = Task.ConditionTask(job, _("Waiting for mount"), timeoutCount=20)
task.check = self.mountDevice
task.weighting = 1
return job
def initialize(self):
# no longer supported
return -5
def check(self):
# no longer supported
return -5
def createCheckJob(self):
job = Task.Job(_("Checking file system..."))
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
if isFileSystemSupported("ext4"):
task.setTool("fsck.ext4")
else:
task.setTool('fsck.ext3')
# fsck.ext? return codes less than 4 are not real errors
class FsckReturncodePostCondition(Task.ReturncodePostcondition):
def check(self, task):
return task.returncode < 4
task.postconditions = [FsckReturncodePostCondition()]
task.args += ["-D", "-f", "-p", dev]
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def createExt4ConversionJob(self):
if not isFileSystemSupported('ext4'):
raise Exception, _("You system does not support ext4")
job = Task.Job(_("Converting ext3 to ext4..."))
if not os.path.exists('/sbin/tune2fs'):
addInstallTask(job, 'e2fsprogs-tune2fs')
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext3')
task.args.append('-p')
task.args.append(dev)
task = Task.LoggingTask(job, "tune2fs")
task.setTool('tune2fs')
task.args.append('-O')
task.args.append('extent,flex_bg,uninit_bg,dir_index,filetype')
task.args.append('-o')
task.args.append('journal_data_writeback')
task.args.append(dev)
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext4')
task.postconditions = [] # ignore result, it will always "fail"
task.args.append('-f')
task.args.append('-p')
task.args.append('-D')
task.args.append(dev)
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def getDeviceDir(self):
return self.dev_path
def getDeviceName(self):
return self.disk_path
def getDevicePhysicalName(self):
return self.phys_path
# the HDD idle poll daemon.
# as some harddrives have a buggy standby timer, we are doing this by hand here.
# first, we disable the hardware timer. then, we check every now and then if
# any access has been made to the disc. If there has been no access over a specifed time,
# we set the hdd into standby.
def readStats(self):
if os.path.exists("/sys/block/%s/stat" % self.device):
f = open("/sys/block/%s/stat" % self.device)
l = f.read()
f.close()
data = l.split(None,5)
return int(data[0]), int(data[4])
else:
return -1,-1
def startIdle(self):
from enigma import eTimer
# disable HDD standby timer
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--set=SCT=0", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-S0", self.disk_path))
self.timer = eTimer()
self.timer.callback.append(self.runIdle)
self.idle_running = True
self.setIdleTime(self.max_idle_time) # kick the idle polling loop
def runIdle(self):
if not self.max_idle_time:
return
t = time.time()
idle_time = t - self.last_access
stats = self.readStats()
l = sum(stats)
if l != self.last_stat and l >= 0: # access
self.last_stat = l
self.last_access = t
idle_time = 0
self.is_sleeping = False
if idle_time >= self.max_idle_time and not self.is_sleeping:
self.setSleep()
self.is_sleeping = True
def setSleep(self):
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--flexible", "--readonly", "--command=stop", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-y", self.disk_path))
def setIdleTime(self, idle):
self.max_idle_time = idle
if self.idle_running:
if not idle:
self.timer.stop()
else:
self.timer.start(idle * 100, False) # poll 10 times per period.
def isSleeping(self):
return self.is_sleeping
class Partition:
# for backward compatibility, force_mounted actually means "hotplug"
def __init__(self, mountpoint, device = None, description = "", shortdescription="", force_mounted = False):
self.mountpoint = mountpoint
self.description = description
if not shortdescription:
shortdescription = description
self.shortdescription = shortdescription
self.force_mounted = mountpoint and force_mounted
self.is_hotplug = force_mounted # so far; this might change.
self.device = device
def __str__(self):
return "Partition(mountpoint=%s,description=%s,shortdescription=%s,device=%s)" % (self.mountpoint,self.description,self.shortdescription,self.device)
def stat(self):
if self.mountpoint:
return os.statvfs(self.mountpoint)
else:
raise OSError, "Device %s is not mounted" % self.device
def free(self):
try:
s = self.stat()
return s.f_bavail * s.f_bsize
except OSError:
return None
def total(self):
try:
s = self.stat()
return s.f_blocks * s.f_bsize
except OSError:
return None
def tabbedDescription(self):
if self.mountpoint.startswith('/media/net') or self.mountpoint.startswith('/media/autofs'):
# Network devices have a user defined name
return self.description
return self.description + '\t' + self.mountpoint
def tabbedShortDescription(self):
if self.mountpoint.startswith('/media/net') or self.mountpoint.startswith('/media/autofs'):
# Network devices have a user defined name
return self.shortdescription
return self.shortdescription + '\t' + self.mountpoint
def mounted(self, mounts = None):
# THANK YOU PYTHON FOR STRIPPING AWAY f_fsid.
# TODO: can os.path.ismount be used?
if self.force_mounted:
return True
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for parts in mounts:
if self.mountpoint.startswith(parts[1]): # use startswith so a mount not ending with '/' is also detected.
return True
return False
def filesystem(self, mounts = None):
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for fields in mounts:
if self.mountpoint.endswith('/') and not self.mountpoint == '/':
if fields[1] + '/' == self.mountpoint:
return fields[2]
else:
if fields[1] == self.mountpoint:
return fields[2]
return ''
DEVICEDB = {
"dm8000":
{
# dm8000:
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1.1/1-1.1:1.0": "Front USB Slot",
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1.2/1-1.2:1.0": "Back, upper USB Slot",
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1.3/1-1.3:1.0": "Back, lower USB Slot",
"/devices/platform/brcm-ehci-1.1/usb2/2-1/2-1:1.0/host1/target1:0:0/1:0:0:0": "DVD Drive",
},
"dm800":
{
# dm800:
"/devices/platform/brcm-ehci.0/usb1/1-2/1-2:1.0": "Upper USB Slot",
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1:1.0": "Lower USB Slot",
},
"dm800se":
{
# USB-1
"/devices/platform/ohci-brcm.1/usb4/4-1/": "Front USB Slot",
"/devices/platform/ohci-brcm.0/usb3/3-2/": "Back, upper USB Slot",
"/devices/platform/ohci-brcm.0/usb3/3-1/": "Back, lower USB Slot",
# USB-2
"/devices/platform/ehci-brcm.1/usb2/2-1/": "Front USB Slot",
"/devices/platform/ehci-brcm.0/usb1/1-2/": "Back, upper USB Slot",
"/devices/platform/ehci-brcm.0/usb1/1-1/": "Back, lower USB Slot",
"/devices/pci0000:01/0000:01:00.0/ata1/": "Internal HDD",
"/devices/pci0000:01/0000:01:00.0/ata2/": "eSATA HDD",
},
"dm7025":
{
# dm7025:
"/devices/pci0000:00/0000:00:14.1/ide1/1.0": "CF Card Slot", #hdc
"/devices/pci0000:00/0000:00:14.1/ide0/0.0": "Internal Harddisk",
},
}
def addInstallTask(job, package):
task = Task.LoggingTask(job, "update packages")
task.setTool('opkg')
task.args.append('update')
task = Task.LoggingTask(job, "Install " + package)
task.setTool('opkg')
task.args.append('install')
task.args.append(package)
class VolumeLabels:
def __init__(self):
self.stale = True
self.volume_labels = {}
def fetchVolumeLabels(self):
import subprocess
self.volume_labels = {}
try:
lines = subprocess.check_output(["blkid", "-s", "LABEL"]).split("\n")
except Exception, e:
print "[HarddiskManager] fetchVolumeLabels", str(e)
for l in lines:
if l:
l = l.strip()
l = l.replace('"', "")
l = l.replace("LABEL=", "").replace("/dev/", "")
d = l.split()
if len(d) == 2 and d[0][-1] == ':':
d[0] = d[0][:-1]
self.volume_labels[d[0]] = d[1]
print "[Harddisk] volume labels:", self.volume_labels
self.stale = False
def getVolumeLabel(self, device):
if self.stale:
self.fetchVolumeLabels()
if device in self.volume_labels:
return self.volume_labels[device]
return None
def makeStale(self):
self.stale = True
class HarddiskManager:
def __init__(self):
self.hdd = []
self.cd = ""
# Partitions should always have a trailing /
self.partitions = [ ]
self.volume_labels = VolumeLabels()
self.devices_scanned_on_init = [ ]
self.on_partition_list_change = CList()
self.enumerateBlockDevices()
# Find stuff not detected by the enumeration
self.enumerateNetworkMounts()
# Find stuff not detected by the enumeration
p = [("/", _("Internal Flash")),("/media/upnp/", _("DLNA")),]
self.partitions.extend([ Partition(mountpoint = x[0], description = x[1], shortdescription=x[1]) for x in p ])
def getBlockDevInfo(self, blockdev):
devpath = "/sys/block/" + blockdev
error = False
removable = False
blacklisted = False
is_cdrom = False
partitions = []
try:
if os.path.exists(devpath + "/removable"):
removable = bool(int(readFile(devpath + "/removable")))
if os.path.exists(devpath + "/dev"):
dev = int(readFile(devpath + "/dev").split(':')[0])
else:
dev = None
if dev in (1, 7, 31, 253): # ram, loop, mtdblock, romblock
blacklisted = True
if blockdev[0:2] == 'sr':
is_cdrom = True
if blockdev[0:2] == 'hd':
try:
media = readFile("/proc/ide/%s/media" % blockdev)
if "cdrom" in media:
is_cdrom = True
except IOError:
error = True
# check for partitions
if not is_cdrom and os.path.exists(devpath):
for partition in os.listdir(devpath):
if partition[0:len(blockdev)] != blockdev:
continue
partitions.append(partition)
else:
self.cd = blockdev
except IOError:
error = True
# check for medium
medium_found = True
try:
if os.path.exists("/dev/" + blockdev):
open("/dev/" + blockdev).close()
except IOError, err:
if err.errno == 159: # no medium present
medium_found = False
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def enumerateBlockDevices(self):
print "[Harddisk] enumerating block devices..."
self.volume_labels.makeStale()
for blockdev in os.listdir("/sys/block"):
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.addHotplugPartition(blockdev, makestale=False)
if not error and not blacklisted and medium_found:
for part in partitions:
self.addHotplugPartition(part, makestale=False)
self.devices_scanned_on_init.append((blockdev, removable, is_cdrom, medium_found))
def enumerateNetworkMounts(self):
print "[Harddisk] enumerating network mounts..."
netmount = (os.path.exists('/media/net') and os.listdir('/media/net')) or ""
if len(netmount) > 0:
for fil in netmount:
if os.path.ismount('/media/net/' + fil):
print "[Harddisk] new Network Mount", fil, '->', os.path.join('/media/net/',fil)
self.partitions.append(Partition(mountpoint = os.path.join('/media/net/',fil + '/'), description = fil, shortdescription = fil))
autofsmount = (os.path.exists('/media/autofs') and os.listdir('/media/autofs')) or ""
if len(autofsmount) > 0:
for fil in autofsmount:
if os.path.ismount('/media/autofs/' + fil) or os.path.exists('/media/autofs/' + fil):
print "[Harddisk] new Network Mount", fil, '->', os.path.join('/media/autofs/',fil)
self.partitions.append(Partition(mountpoint = os.path.join('/media/autofs/',fil + '/'), description = fil, shortdescription = fil))
if os.path.ismount('/media/hdd') and '/media/hdd/' not in [p.mountpoint for p in self.partitions]:
print "[Harddisk] new Network Mount being used as HDD replacement -> /media/hdd/"
self.partitions.append(Partition(mountpoint = '/media/hdd/', description = '/media/hdd', shortdescription = '/media/hdd'))
def getAutofsMountpoint(self, device):
return "/autofs/%s" % device
def getMountpoint(self, device):
dev = "/dev/%s" % device
for item in getProcMounts():
if item[0] == dev:
return item[1] + '/'
return None
def addHotplugPartition(self, device, physdev = None, makestale=True):
# device is the device name, without /dev
# physdev is the physical device path, which we (might) use to determine the userfriendly name
if not physdev:
dev, part = self.splitDeviceName(device)
try:
physdev = os.path.realpath('/sys/block/' + dev + '/device')[4:]
except OSError:
physdev = dev
print "[Harddisk] couldn't determine physdev for device", device
else:
physdev = os.path.realpath('/sys' + physdev)[4:]
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.getBlockDevInfo(self.splitDeviceName(device)[0])
if not blacklisted and medium_found:
if makestale:
self.volume_labels.makeStale()
(description, shortdescription) = self._getUserfriendlyDeviceName(device, physdev)
p = Partition(mountpoint = self.getMountpoint(device), description = description, shortdescription = shortdescription, force_mounted = True, device = device)
self.partitions.append(p)
if p.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("add", p)
# see if this is a harddrive
l = len(device)
if l and (not device[l-1].isdigit() or device == 'mmcblk0'):
self.hdd.append(Harddisk(device, removable))
self.hdd.sort()
SystemInfo["Harddisk"] = True
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def removeHotplugPartition(self, device):
for x in self.partitions[:]:
if x.device == device:
self.partitions.remove(x)
if x.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("remove", x)
l = len(device)
if l and not device[l-1].isdigit():
for hdd in self.hdd:
if hdd.device == device:
hdd.stop()
self.hdd.remove(hdd)
break
SystemInfo["Harddisk"] = len(self.hdd) > 0
def HDDCount(self):
return len(self.hdd)
def HDDList(self):
list = [ ]
for hd in self.hdd:
try:
hdd = self.getUserfriendlyDeviceName(hd.disk_path, os.path.realpath(hd.phys_path))
except Exception as ex:
print "[Harddisk] couldn't get friendly name for %s: %s" % (hd.phys_path, ex)
hdd = hd.model() + " - " + hd.bus()
cap = hd.capacity()
if cap != "":
hdd += " (" + cap + ")"
list.append((hdd, hd))
return list
def getCD(self):
return self.cd
def getMountedPartitions(self, onlyhotplug = False, mounts=None):
if mounts is None:
mounts = getProcMounts()
parts = [x for x in self.partitions if (x.is_hotplug or not onlyhotplug) and x.mounted(mounts)]
devs = set([x.device for x in parts])
for devname in devs.copy():
if not devname:
continue
dev, part = self.splitDeviceName(devname)
if part and dev in devs: # if this is a partition and we still have the wholedisk, remove wholedisk
devs.remove(dev)
# return all devices which are not removed due to being a wholedisk when a partition exists
return [x for x in parts if not x.device or x.device in devs]
def splitDeviceName(self, devname):
# this works for: sdaX, hdaX, sr0 (which is in fact dev="sr0", part=""). It doesn't work for other names like mtdblock3, but they are blacklisted anyway.
dev = devname[:3]
part = devname[3:]
for p in part:
if not p.isdigit():
return devname, 0
return dev, part and int(part) or 0
def getPhysicalDeviceLocation(self, phys):
from Tools.HardwareInfo import HardwareInfo
if phys.startswith("/sys"):
phys = phys[4:]
for physdevprefix, pdescription in DEVICEDB.get(HardwareInfo().device_name,{}).items():
if phys.startswith(physdevprefix):
return pdescription
return None
def _getUserfriendlyDeviceName(self, device, phys):
dev, part = self.splitDeviceName(device)
if phys.startswith("/sys"):
phys = phys[4:]
shortdescription = description = "External Storage %s" % dev
volume_label = self.volume_labels.getVolumeLabel(device)
if volume_label:
shortdescription = description = volume_label
if not volume_label:
try:
description = readFile("/sys" + phys + "/model")
except IOError, s:
print "[Harddisk] couldn't read %s: %s" % ("/sys" + phys + "/model", s)
pdescription = self.getPhysicalDeviceLocation(phys)
if pdescription is not None:
if volume_label:
description = "%s (%s)" % (description, pdescription)
else:
description = "%s (%s)" % (pdescription, description)
shortdescription = pdescription
# not wholedisk and not partition 1
if not volume_label and part and part != 1:
description += _(" (Partition %d)") % part
return (description, shortdescription)
def getUserfriendlyDeviceName(self, device, phys):
return self._getUserfriendlyDeviceName(device, phys)[0]
def getUserfriendlyDeviceShortName(self, device, phys):
return self._getUserfriendlyDeviceName(device, phys)[1]
def addMountedPartition(self, device, desc):
# Ensure we have a trailing /
if device and device[-1] != "/":
device += "/"
for x in self.partitions:
if x.mountpoint == device:
#already_mounted
return
self.partitions.append(Partition(mountpoint=device, description=desc, shortdescription=desc))
def removeMountedPartition(self, mountpoint):
if mountpoint and dmountpoint[-1] != "/":
mountpoint += "/"
for x in self.partitions[:]:
if x.mountpoint == mountpoint:
self.partitions.remove(x)
self.on_partition_list_change("remove", x)
def setDVDSpeed(self, device, speed = 0):
ioctl_flag=int(0x5322)
if not device.startswith('/'):
device = "/dev/" + device
try:
from fcntl import ioctl
cd = open(device)
ioctl(cd.fileno(), ioctl_flag, speed)
cd.close()
except Exception, ex:
print "[Harddisk] Failed to set %s speed to %s" % (device, speed), ex
class UnmountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Unmount"))
self.hdd = hdd
self.mountpoints = []
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
open('/dev/nomount.%s' % dev, "wb").close()
except Exception, e:
print "[Harddisk] Failed to create /dev/nomount.%s:" % dev, e
self.setTool('umount')
self.args.append('-f')
for dev in self.hdd.enumMountDevices():
self.args.append(dev)
self.postconditions.append(Task.ReturncodePostcondition())
self.mountpoints.append(dev)
if not self.mountpoints:
print "[Harddisk] UnmountTask: No mountpoints found?"
self.cmd = 'true'
self.args = [self.cmd]
def afterRun(self):
for path in self.mountpoints:
try:
os.rmdir(path)
except Exception, ex:
print "[Harddisk] Failed to remove path '%s':" % path, ex
class MountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Mount"))
self.hdd = hdd
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
os.unlink('/dev/nomount.%s' % dev)
except Exception, e:
print "[Harddisk] Failed to remove /dev/nomount.%s:" % dev, e
# try mounting through fstab first
if self.hdd.mount_device is None:
dev = self.hdd.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.hdd.mount_device
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if os.path.realpath(fspath) == dev:
self.setCmdline("mount -t auto " + fspath)
self.postconditions.append(Task.ReturncodePostcondition())
return
# device is not in fstab
if self.hdd.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
# Sorry for the sleep 2 hack...
self.setCmdline('sleep 2; sfdisk -R ' + self.hdd.disk_path)
self.postconditions.append(Task.ReturncodePostcondition())
class MkfsTask(Task.LoggingTask):
def prepare(self):
self.fsck_state = None
def processOutput(self, data):
print "[Harddisk] mkfs", data
if 'Writing inode tables:' in data:
self.fsck_state = 'inode'
elif 'Creating journal' in data:
self.fsck_state = 'journal'
self.setProgress(80)
elif 'Writing superblocks ' in data:
self.setProgress(95)
elif self.fsck_state == 'inode':
if '/' in data:
try:
d = data.strip(' \x08\r\n').split('/',1)
if '\x08' in d[1]:
d[1] = d[1].split('\x08',1)[0]
self.setProgress(80*int(d[0])/int(d[1]))
except Exception, e:
print "[Harddisk] mkfs E:", e
return # don't log the progess
self.log.append(data)
def internalHDDNotSleeping():
if harddiskmanager.HDDCount():
for hdd in harddiskmanager.HDDList():
if ("pci" in hdd[1].phys_path or "ahci" in hdd[1].phys_path) and hdd[1].max_idle_time and not hdd[1].isSleeping():
return True
return False
harddiskmanager = HarddiskManager()
SystemInfo["ext4"] = isFileSystemSupported("ext4")
| gpl-2.0 | 277,355,841,702,303,140 | 29.462857 | 160 | 0.664853 | false |
bswartz/manila | manila/policies/share_snapshot.py | 1 | 3967 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from manila.policies import base
BASE_POLICY_NAME = 'share_snapshot:%s'
share_snapshot_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'get_snapshot',
check_str=base.RULE_DEFAULT,
description="Get share snapshot.",
operations=[
{
'method': 'GET',
'path': '/snapshots/{snapshot_id}'
}
]),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'get_all_snapshots',
check_str=base.RULE_DEFAULT,
description="Get all share snapshots.",
operations=[
{
'method': 'GET',
'path': '/snapshots'
},
{
'method': 'GET',
'path': '/snapshots/detail'
},
{
'method': 'GET',
'path': '/snapshots?{query}'
},
{
'method': 'GET',
'path': '/snapshots/detail?{query}'
}
]),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'force_delete',
check_str=base.RULE_ADMIN_API,
description="Force Delete a share snapshot.",
operations=[
{
'method': 'DELETE',
'path': '/snapshots/{snapshot_id}'
}
]),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'manage_snapshot',
check_str=base.RULE_ADMIN_API,
description="Manage share snapshot.",
operations=[
{
'method': 'POST',
'path': '/snapshots/manage'
}
]),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'unmanage_snapshot',
check_str=base.RULE_ADMIN_API,
description="Unmanage share snapshot.",
operations=[
{
'method': 'POST',
'path': '/snapshots/{snapshot_id}/action'
}
]),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'reset_status',
check_str=base.RULE_ADMIN_API,
description="Reset status.",
operations=[
{
'method': 'POST',
'path': '/snapshots/{snapshot_id}/action',
}
]),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'access_list',
check_str=base.RULE_DEFAULT,
description="List access rules of a share snapshot.",
operations=[
{
'method': 'GET',
'path': '/snapshots/{snapshot_id}/access-list'
}
]),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'allow_access',
check_str=base.RULE_DEFAULT,
description="Allow access to a share snapshot.",
operations=[
{
'method': 'POST',
'path': '/snapshots/{snapshot_id}/action'
}
]),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'deny_access',
check_str=base.RULE_DEFAULT,
description="Deny access to a share snapshot.",
operations=[
{
'method': 'POST',
'path': '/snapshots/{snapshot_id}/action'
}
]),
]
def list_rules():
return share_snapshot_policies
| apache-2.0 | 215,645,173,443,318,100 | 29.992188 | 78 | 0.522309 | false |
sujoykroy/motion-picture | editor/MotionPicture/shapes/curve_shape.py | 1 | 50789 | from ..commons import *
from .shape import Shape
from .shape_list import ShapeList
from .curves_form import CurvesForm
from .curve_point_group_shape import CurvePointGroupShape
from xml.etree.ElementTree import Element as XmlElement
from .mirror import *
REL_ABS_ANCHOR_AT = "rel_abs_anchor_at"
class CurveShape(Shape, Mirror):
TYPE_NAME = "curve_shape"
def __init__(self, anchor_at=None,
border_color="000000", border_width=1,
fill_color=None, width=1., height=1.):
if anchor_at is None:
anchor_at = Point(width*.5, height*.5)
Shape.__init__(self, anchor_at, border_color, border_width, fill_color, width, height)
Mirror.__init__(self)
self.curves = []
self.forms = dict()
self.show_points = True
self.point_group_shapes = ShapeList()
self.baked_points = None
self.form_pixbufs = dict()
self.curve_point_map = dict()
self.exposure = 1.
def get_form_pixbuf(self, form_name):
if form_name not in self.form_pixbufs:
curve_shape = self.copy()
curve_shape.set_form_raw(self.get_form_by_name(form_name))
curve_shape.reset_transformations()
curve_shape.parent_shape = None
pixbuf = curve_shape.get_pixbuf(64, 64)
self.form_pixbufs[form_name] = pixbuf
return self.form_pixbufs[form_name]
def delete_form_pixbuf(self, form_name):
if form_name in self.form_pixbufs:
del self.form_pixbufs[form_name]
def get_interior_shapes(self):
return self.point_group_shapes
def has_poses(self):
return True
@classmethod
def get_pose_prop_names(cls):
prop_names = super(CurveShape, cls).get_pose_prop_names()
prop_names.extend(["form_raw"])
return prop_names
def replace_curves(self, curves):
del self.curves[:]
self.forms.clear()
self.show_points = True
self.point_group_shapes.clear()
self.curves.extend(curves)
def add_new_point_group_shape(self, point_group):
point_group_shape = CurvePointGroupShape(curve_shape=self, curve_point_group=point_group)
point_group_shape.build()
self.point_group_shapes.add(point_group_shape)
self.rebuild_curve_point_map()
return point_group_shape
def delete_point_group_shape(self, point_group_shape):
for curve_point in point_group_shape.curve_point_group.points.values():
self.delete_curve_point(curve_point)
self.point_group_shapes.remove(point_group_shape)
self.rebuild_curve_point_map()
return True
def add_curve_point(self, curve_point, shape):
self.curve_point_map[curve_point.get_key()] = shape
def delete_curve_point(self, curve_point):
if curve_point.get_key() in self.curve_point_map:
curve_point_shape = self.curve_point_map[curve_point.get_key()]
if curve_point_shape != self:
location = self.get_point_location(curve_point)
self.curve_point_map[curve_point.get_key()] = self
self.set_point_location(curve_point, location)
self.rebuild_curve_point_map()
def is_curve_point_owned(self, curve_point):
if not self.curve_point_map:
return True
owner_shape = self.curve_point_map.get(curve_point.get_key())
return owner_shape == self
def rebuild_curve_point_map(self):
self.curve_point_map.clear()
if not self.point_group_shapes:
return
for i in range(len(self.curves)):
curve = self.curves[i]
self.add_curve_point(
CurvePoint(i, -1, CurvePoint.POINT_TYPE_ORIGIN), self)
for j in range(len(curve.bezier_points)):
self.add_curve_point(
CurvePoint(i, j, CurvePoint.POINT_TYPE_CONTROL_1), self)
self.add_curve_point(
CurvePoint(i, j, CurvePoint.POINT_TYPE_CONTROL_2), self)
self.add_curve_point(
CurvePoint(i, j, CurvePoint.POINT_TYPE_DEST), self)
for point_group_shape in self.point_group_shapes:
point_group = point_group_shape.curve_point_group
for curve_point in point_group.points.values():
self.add_curve_point(curve_point, point_group_shape)
def rename_shape(self, shape, name):
old_name = shape.get_name()
if self.point_group_shapes.rename(old_name, name):
for form in self.forms.values():
if not form.shapes_props:
continue
if old_name in form.shapes_props:
form.shapes_props[name] = form.shapes_props[old_name]
del form.shapes_props[old_name]
return True
def get_point_group_shapes_model(self):
model = [["", None]]
for shape in self.point_group_shapes:
model.append([shape.get_name(), shape])
return model
def copy_data_from_linked(self, build_lock=True):
super(CurveShape, self).copy_data_from_linked(build_lock)
if not self.linked_to: return
self.forms = copy_value(self.linked_to.forms)
self.form_pixbufs.clear()
del self.curves[:]
if self.linked_to.point_group_shapes:
abs_anchor_at = self.get_abs_anchor_at()
self.anchor_at.copy_from(self.linked_to.anchor_at)
for curve in self.linked_to.curves:
self.curves.append(curve.copy())
fresh_pgs_list = []
lock_list = []
for pgs in self.linked_to.point_group_shapes:
pgs = pgs.copy(copy_name=True, deep_copy=True)
pgs.set_curve_shape(self)
exist_pgs = self.point_group_shapes.get_item_by_name(pgs.get_name())
pre_lock = None
if exist_pgs:
if exist_pgs.locked_to_shape:
pre_lock = exist_pgs.get_locked_to()
if exist_pgs.locked_shapes:
for locked_shape in exist_pgs.locked_shapes:
if self.point_group_shapes.contain(locked_shape):
continue#ignore the sibling locking
locked_shape.set_locked_to(None)
lock_list.append((locked_shape, pgs))
fresh_pgs_list.append((pgs, pre_lock))
self.point_group_shapes.clear(destroy_items=True)
for pgs, pre_lock in fresh_pgs_list:
pgs.set_pre_locked_to(pre_lock)
self.point_group_shapes.add(pgs)
if build_lock:
self.build_locked_to(up=-1000000)
for locked_shape, locked_to_shape in lock_list:
locked_shape.set_locked_to(locked_to_shape)
self.move_to(abs_anchor_at.x, abs_anchor_at.y)
else:
linked_to_anchor_at = self.linked_to.anchor_at.copy()
linked_to_anchor_at.scale(1./self.linked_to.width, 1./self.linked_to.height)
self_anchor_at = self.anchor_at.copy()
self_anchor_at.scale(1./self.width, 1./self.height)
diff_x = self_anchor_at.x-linked_to_anchor_at.x
diff_y = self_anchor_at.y-linked_to_anchor_at.y
for curve in self.linked_to.curves:
curve = curve.copy()
curve.translate(diff_x, diff_y)
self.curves.append(curve)
self.fit_size_to_include_all()
self.rebuild_curve_point_map()
def get_form_by_name(self, form):
if form in self.forms:
return self.forms[form]
return None
def get_form_raw(self):
curves = []
anchor_at = self.anchor_at.copy()
anchor_at.scale(1./self.width, 1./self.height)
for curve in self.curves:
curve = curve.copy()
curve.translate(-anchor_at.x, -anchor_at.y)
curves.append(curve)
if self.point_group_shapes:
shapes_props = dict()
for point_group_shape in self.point_group_shapes:
prop_dict = point_group_shape.get_pose_prop_dict()
shapes_props[point_group_shape.get_name()] = prop_dict
if not point_group_shape.locked_to_shape:
rel_abs_anchor_at = point_group_shape.get_abs_anchor_at()
rel_abs_anchor_at.translate(-self.anchor_at.x, -self.anchor_at.y)
prop_dict[REL_ABS_ANCHOR_AT] = rel_abs_anchor_at
else:
shapes_props = None
form = CurvesForm(width=self.width, height=self.height, curves=curves, shapes_props=shapes_props)
return form
def save_form(self, form_name):
if form_name is None:
i = len(self.forms)
while True:
i += 1
form_name = "Form_{0}".format(i)
if form_name not in self.forms:
break
form = self.get_form_raw()
form.set_name(form_name)
self.forms[form_name] = form
self.delete_form_pixbuf(form_name)
return form_name
def delete_form(self, form_name):
if form_name in self.forms:
del self.forms[form_name]
self.delete_form_pixbuf(form_name)
def set_form_raw(self, form):
diff_width = form.width - self.width
diff_height = form.height - self.height
abs_anchor_at = self.get_abs_anchor_at()
self.width = form.width
self.height = form.height
form_curves = form.curves
anchor_at = self.anchor_at.copy()
anchor_at.scale(1./self.width, 1./self.height)
for i in range(min(len(form_curves), len(self.curves))):
self_curve = self.curves[i]
form_curve = form_curves[i]
self_curve.copy_from(form_curve)
self_curve.translate(anchor_at.x, anchor_at.y)
self_curve.adjust_origin()
if form.shapes_props:
for point_group_shape in self.point_group_shapes:
shape_name = point_group_shape.get_name()
prop_dict = form.shapes_props.get(shape_name)
if prop_dict is None:
continue
point_group_shape.set_pose_prop_from_dict(prop_dict)
if not point_group_shape.locked_to_shape:
if REL_ABS_ANCHOR_AT in prop_dict:
abs_anchor_at = prop_dict[REL_ABS_ANCHOR_AT].copy()
abs_anchor_at.translate(self.anchor_at.x, self.anchor_at.y)
point_group_shape.move_to(abs_anchor_at.x, abs_anchor_at.y)
self.fit_size_to_include_all()
#self.move_to(abs_anchor_at.x, abs_anchor_at.y)
def set_form(self, form_name):
if form_name not in self.forms:
return
form = self.forms[form_name]
self.set_form_raw(form)
#wrapper around set_form
def set_pose(self, pose_name):
self.set_form(pose_name)
def set_form_name(self, form_name):
self.set_form(form_name)
def get_form_list(self):
forms = []
for form_name in sorted(self.forms.keys()):
forms.append([self.get_form_pixbuf(form_name), form_name])
return forms
#wrapper around get_form_list
def get_pose_list(self, interior_shape=None):
return self.get_form_list()
def update_forms_for_point_group(self, point_group_shape, old_translation, old_anchor_at):
translation_shift = point_group_shape.translation.diff(old_translation)
anchor_at_shift = point_group_shape.anchor_at.diff(old_anchor_at)
shape_name = point_group_shape.get_name()
for form in self.forms.values():
if not form.shapes_props:
continue
prop_dict = form.shapes_props.get(shape_name)
if not prop_dict:
continue
#prop_dict["translation"].translate(translation_shift.x, translation_shift.y)
prop_dict["anchor_at"].translate(anchor_at_shift.x, anchor_at_shift.y)
prop_dict["width"] = point_group_shape.get_width()
prop_dict["height"] = point_group_shape.get_height()
#wrapper around form transition
def set_pose_transition(self, start_pose, end_pose, value):
prop_data = dict(start_form=start_pose, end_form=end_pose)
self.set_prop_value("internal", value, prop_data)
def set_prop_value(self, prop_name, value, prop_data=None):
if prop_name == "internal":
if "start_form" in prop_data:
start_form_name = prop_data["start_form"]
end_form_name = prop_data.get("end_form")
if end_form_name is None or end_form_name not in self.forms:
self.set_form(start_form_name)
return
start_form = self.forms[start_form_name]
end_form = self.forms[end_form_name]
else:
start_form = prop_data["start_form_raw"]
end_form = prop_data.get("end_form_raw")
new_width = start_form.width + (end_form.width-start_form.width)*value
new_height = start_form.height + (end_form.height-start_form.height)*value
diff_width = new_width - self.width
diff_height = new_height - self.height
abs_anchor_at = self.get_abs_anchor_at()
self.width = new_width
self.height = new_height
start_form_curves = start_form.curves
end_form_curves = end_form.curves
anchor_at = self.anchor_at.copy()
anchor_at.scale(1./self.width, 1./self.height)
minc = min(len(start_form_curves), len(end_form_curves), len(self.curves))
i = 0
start_curves = []
end_curves = []
while i<minc:
self_curve = self.curves[i]
start_form_curve = start_form_curves[i]
end_form_curve = end_form_curves[i]
i += 1
self_curve.set_inbetween(
start_form_curve, (start_form.width, start_form.height),
end_form_curve, (end_form.width, end_form.height),
value, (self.width, self.height))
self_curve.translate(anchor_at.x, anchor_at.y)
if start_form.shapes_props and end_form.shapes_props:
start_shapes_props = start_form.shapes_props
end_shapes_props = end_form.shapes_props
for point_group_shape in self.point_group_shapes:
shape_name = point_group_shape.get_name()
start_prop_dict = start_form.shapes_props.get(shape_name)
end_prop_dict = end_form.shapes_props.get(shape_name)
if not start_prop_dict or not end_prop_dict:
continue
point_group_shape.set_transition_pose_prop_from_dict(
start_prop_dict, end_prop_dict, frac=value)
if not point_group_shape.locked_to_shape and \
REL_ABS_ANCHOR_AT in start_prop_dict and \
REL_ABS_ANCHOR_AT in end_prop_dict:
start_rel_abs_anchor_at = start_prop_dict[REL_ABS_ANCHOR_AT].copy()
end_rel_abs_anchor_at = end_prop_dict[REL_ABS_ANCHOR_AT].copy()
abs_anchor_at = Point(0, 0)
abs_anchor_at.set_inbetween(start_rel_abs_anchor_at, end_rel_abs_anchor_at, value)
abs_anchor_at.translate(self.anchor_at.x, self.anchor_at.y)
point_group_shape.move_to(abs_anchor_at.x, abs_anchor_at.y)
self.fit_size_to_include_all()
else:
Shape.set_prop_value(self, prop_name, value, prop_data)
def rename_form(self, old_form, new_form):
if new_form in self.forms: return False
self.forms[new_form] = self.forms[old_form]
self.forms[new_form].set_name(new_form)
del self.forms[old_form]
return True
def get_xml_element(self):
elm = Shape.get_xml_element(self)
for curve in self.curves:
elm.append(curve.get_xml_element())
if not self.show_points:
elm.attrib["show_points"] = "False"
for form_name, form in self.forms.items():
elm.append(form.get_xml_element())
for point_group_shape in self.point_group_shapes:
elm.append(point_group_shape.get_xml_element())
return elm
@classmethod
def create_from_xml_element(cls, elm):
arr = Shape.get_params_array_from_xml_element(elm)
shape = cls(*arr)
shape.show_points = (elm.attrib.get("show_points", "True") != "False")
default_point = Point(0,0)
for curve_elm in elm.findall(Curve.TAG_NAME):
curve = Curve.create_from_xml_element(curve_elm)
shape.curves.append(curve)
for form_elm in elm.findall(CurvesForm.TAG_NAME):
form = CurvesForm.create_from_xml_element(form_elm)
shape.forms[form.name] = form
for point_group_elm in elm.findall(CurvePointGroupShape.TAG_NAME):
point_group_shape = CurvePointGroupShape.create_from_xml_element(point_group_elm, shape)
if point_group_shape:
shape.point_group_shapes.add(point_group_shape)
shape.assign_params_from_xml_element(elm)
shape.rebuild_curve_point_map()
return shape
def build_locked_to(self, up=0):
super(CurveShape, self).build_locked_to(up)
self.build_interior_locked_to(up+1)
def build_interior_locked_to(self, up=0):
if self.point_group_shapes:
for point_group_shape in self.point_group_shapes:
point_group_shape.build_locked_to(up)
def copy(self, copy_name=False, deep_copy=False, form=None):
newob = CurveShape(self.anchor_at.copy(), copy_value(self.border_color), self.border_width,
copy_value(self.fill_color), self.width, self.height)
self.copy_into(newob, copy_name)
for curve in self.curves:
newob.curves.append(curve.copy())
if deep_copy:
newob.forms = copy_value(self.forms)
newob.show_points = self.show_points
for point_group_shape in self.point_group_shapes:
point_group_shape = point_group_shape.copy(copy_name=True, deep_copy=True)
point_group_shape.set_curve_shape(newob)
newob.point_group_shapes.add(point_group_shape)
newob.build_interior_locked_to()
newob.rebuild_curve_point_map()
return newob
def is_empty(self):
return len(self.curves) == 0
def add_curve(self, curve):
self.curves.append(curve)
self.fit_size_to_include_all()
def get_curve_point_location(self, curve_point):
point = curve_point.get_point(self.curves)
if not point:
return Point(0., 0.)
point = point.copy()
point.scale(self.width, self.height)
return point
def set_curve_point_location(self, curve_point, location):
point = curve_point.get_point(self.curves)
location = location.copy()
location.scale(1./self.width, 1./self.height)
point.copy_from(location)
def get_point_location(self, curve_point):
if self.curve_point_map:
curve_point_shape = self.curve_point_map[curve_point.get_key()]
location = curve_point_shape.get_curve_point_location(curve_point)
if curve_point_shape != self:
location = self.transform_locked_shape_point(
location, root_shape=curve_point_shape, exclude_last=False)
return location
return self.get_curve_point_location(curve_point)
def break_points_into_point_shapes(self):
curve_points = []
for i in range(len(self.curves)):
curve = self.curves[i]
curve_points.append(CurvePoint(i, -1, CurvePoint.POINT_TYPE_ORIGIN))
for j in range(len(curve.bezier_points)):
curve_points.append(CurvePoint(i, j, CurvePoint.POINT_TYPE_CONTROL_1))
curve_points.append(CurvePoint(i, j, CurvePoint.POINT_TYPE_CONTROL_2))
curve_points.append(CurvePoint(i, j, CurvePoint.POINT_TYPE_DEST))
for curve_point in curve_points:
if self.curve_point_map:
curve_point_shape = self.curve_point_map[curve_point.get_key()]
if curve_point_shape != self and \
len(curve_point_shape.curve_point_group.points)==1:
continue
else:
curve_point_shape = None
curve_point_group = CurvePointGroup()
curve_point_group.add_point(curve_point)
new_point_group_shape = self.add_new_point_group_shape(curve_point_group)
new_point_group_shape.set_locked_to(curve_point_shape)
attempt = 0
while True:
name = curve_point.get_formatted_name()
if attempt>0:
name = u"{0}_{1}".formatted(name, attempt)
if not self.point_group_shapes.contain(name):
break
attemp += 1
self.point_group_shapes.rename(new_point_group_shape.get_name(), name)
def set_point_location(self, curve_point, location):
if self.curve_point_map:
curve_point_shape = self.curve_point_map[curve_point.get_key()]
location = curve_point_shape.transform_locked_shape_point(
location, root_shape=self, exclude_last=False)
curve_point_shape.set_curve_point_location(curve_point, location)
return
self.set_curve_point_location(curve_point, location)
def adjust_origins(self):
for i in range(len(self.curves)):
curve = self.curves[i]
if not curve.closed:
continue
origin = CurvePoint(i, -1, CurvePoint.POINT_TYPE_ORIGIN)
last_dest = CurvePoint(i, len(curve.bezier_points)-1,
CurvePoint.POINT_TYPE_DEST)
location = self.get_point_location(last_dest)
self.set_point_location(origin, location)
def get_shape_of_curve_point(self, curve_point):
shape = self.curve_point_map.get(curve_point.get_key())
if shape is None:
shape = self
return shape
def draw_curve(self, ctx, curve_index, scale=None, angle=None,
new_path=True, reverse=False, line_to=False):
ctx.save()
if angle is not None:
ctx.translate(self.anchor_at.x, self.anchor_at.y)
ctx.rotate(angle*RAD_PER_DEG)
ctx.translate(-self.anchor_at.x, -self.anchor_at.y)
if curve_index>=len(self.curves):
return
curve = self.curves[curve_index]
if self.point_group_shapes:
#ctx.scale(1./self.width, 1./self.height)
origin_curve_point = CurvePoint(curve_index, -1, CurvePoint.POINT_TYPE_ORIGIN)
origin_shape = self.get_shape_of_curve_point(origin_curve_point)
origin = origin_shape.get_curve_point_location(origin_curve_point)
origin = self.transform_locked_shape_point(origin, root_shape=origin_shape, exclude_last=False)
if reverse:
dest_curve_point = CurvePoint(
curve_index, len(curve.bezier_points)-1, CurvePoint.POINT_TYPE_DEST)
dest_shape = self.get_shape_of_curve_point(dest_curve_point)
dest = dest_shape.get_curve_point_location(dest_curve_point)
dest = self.transform_locked_shape_point(
dest, root_shape=dest_shape, exclude_last=False)
start_point = dest
else:
start_point = origin
if new_path:
ctx.new_path()
if line_to:
ctx.line_to(start_point.x, start_point.y)
else:
ctx.move_to(start_point.x, start_point.y)
if reverse:
range_object = range(len(curve.bezier_points)-1, -2, -1)
else:
range_object = range(len(curve.bezier_points))
for point_index in range_object:
if reverse and point_index==-1:
dest = origin
else:
dest_curve_point = CurvePoint(curve_index, point_index, CurvePoint.POINT_TYPE_DEST)
dest_shape = self.get_shape_of_curve_point(dest_curve_point)
dest = dest_shape.get_curve_point_location(dest_curve_point)
dest = self.transform_locked_shape_point(
dest, root_shape=dest_shape, exclude_last=False)
if reverse:
if point_index<len(curve.bezier_points)-1:
ctx.curve_to( c2.x, c2.y, c1.x, c1.y, dest.x, dest.y)
if point_index==-1:
break
c1_curve_point = CurvePoint(curve_index, point_index, CurvePoint.POINT_TYPE_CONTROL_1)
c1_shape = self.get_shape_of_curve_point(c1_curve_point)
c1 = c1_shape.get_curve_point_location(c1_curve_point)
c1 = self.transform_locked_shape_point(c1, root_shape=c1_shape, exclude_last=False)
c2_curve_point = CurvePoint(curve_index, point_index, CurvePoint.POINT_TYPE_CONTROL_2)
c2_shape = self.get_shape_of_curve_point(c2_curve_point)
c2 = c2_shape.get_curve_point_location(c2_curve_point)
c2 = self.transform_locked_shape_point(c2, root_shape=c2_shape, exclude_last=False)
if not reverse:
ctx.curve_to( c1.x, c1.y, c2.x, c2.y, dest.x, dest.y)
if new_path and curve.closed:
ctx.close_path()
else:
ctx.scale(self.width, self.height)
if scale:
if scale[0] == -1 and scale[1] == 1:
ctx.translate(2*self.anchor_at.x/self.width, 0)
elif scale[0] == 1 and scale[1] == -1:
ctx.translate(0, 2*self.anchor_at.y/self.height)
elif scale[0] == -1 and scale[1] == -1:
ctx.translate(2*self.anchor_at.x/self.width, 2*self.anchor_at.y/self.height)
ctx.scale(*scale)
if reverse:
curve.reverse_draw_path(ctx, new_path=new_path, line_to=line_to)
else:
if self.exposure<1.0:
self.draw_through_baked_points(ctx, curve_index)
else:
curve.draw_path(ctx, new_path=new_path, line_to=line_to)
ctx.restore()
def draw_through_baked_points(self, ctx, curve_index):
self.build_baked_points(curve_index)
baked_points = self.baked_points[curve_index]
count = int(round(baked_points.shape[0]*self.exposure))
for i in range(count):
x = baked_points[i][0]
y = baked_points[i][1]
if i == 0:
ctx.move_to(x, y)
else:
ctx.line_to(x, y)
def draw_path(self, ctx, for_fill=False):
if for_fill and not self.fill_color:
return
if not for_fill and not self.border_color:
return
paths = []
for curve_index in range(len(self.curves)):
self.draw_curve(ctx, curve_index)
paths.append(ctx.copy_path())
if self.mirror != 0:
scales, rotations = self.get_scales_n_rotations()
for scale in scales:
for curve_index in range(len(self.curves)):
curve = self.curves[curve_index]
if not for_fill or (for_fill and curve.closed):
self.draw_curve(ctx, curve_index, scale=scale)
paths.append(ctx.copy_path())
for angle in rotations:
for curve_index in range(len(self.curves)):
curve = self.curves[curve_index]
if not for_fill or (for_fill and curve.closed):
self.draw_curve(ctx, curve_index, angle=angle)
paths.append(ctx.copy_path())
ctx.new_path()
for path in paths:
ctx.append_path(path)
def get_curve_outline(self, curve_index):
curve = self.curves[curve_index]
if self.curve_point_map:
points = CurvePoint.get_curve_points_for_curve(curve_index, self.curves)
for i in range(len(points)):
points[i] = self.get_point_location(points[i])
outline = Polygon(points=points).get_outline()
else:
outline = curve.get_outline()
if outline:
outline.scale(self.width, self.height)
return outline
def translate_curve(self, curve_index, dx, dy):
curve = self.curves[curve_index]
if self.curve_point_map:
curve_points = CurvePoint.get_curve_points_for_curve(curve_index, self.curves)
for curve_point in curve_points:
if self.curve_point_map[curve_point.get_key()] == self:
point = curve_point.get_point(self.curves)
if point:
point.translate(dx, dy)
else:
curve.translate(dx, dy)
def scale_curve(self, curve_index, sx, sy):
curve = self.curves[curve_index]
if self.curve_point_map:
curve_points = CurvePoint.get_curve_points_for_curve(curve_index, self.curves)
for curve_point in curve_points:
if self.curve_point_map[curve_point.get_key()] == self:
point = curve_point.get_point(self.curves)
if point:
point.scale(sx, sy)
else:
curve.scale(sx, sy)
def fit_size_to_include_all(self):
self.adjust_origins()
outline = None
for curve_index in range(len(self.curves)):
if outline is None:
outline = self.get_curve_outline(curve_index)
else:
outline.expand_include(self.get_curve_outline(curve_index))
if not outline: return
abs_anchor_at = self.get_abs_anchor_at()
shift = Point(-outline.left, -outline.top)
self.anchor_at.translate(shift.x, shift.y)
self.move_to(abs_anchor_at.x, abs_anchor_at.y)
if outline.height==0:
sy = None
else:
sy = self.height/outline.height
if outline.width==0:
sx = None
else:
sx = self.width/outline.width
dx = -outline.left/self.width
dy = -outline.top/self.height
for curve_index in range(len(self.curves)):
self.translate_curve(curve_index, dx, dy)
if sx is not None and sy is not None:
self.scale_curve(curve_index, sx, sy)
for point_group_shape in self.point_group_shapes:
if point_group_shape.locked_to_shape:
continue
point_group_shape.shift_abs_anchor_at(shift)
if self.locked_shapes:
for shape in self.locked_shapes:
shape.shift_abs_anchor_at(shift)
self.set_width(outline.width, fixed_anchor=False)
self.set_height(outline.height, fixed_anchor=False)
self.baked_points = None
def build_baked_points(self, curve_index):
if self.baked_points is None:
self.baked_points = dict()
if self.baked_points.get(curve_index) is None:
self.baked_points[curve_index] = \
self.curves[curve_index].get_baked_points(self.width, self.height)
def get_baked_point(self, frac, curve_index=0):
self.build_baked_points(curve_index)
baked_points = self.baked_points[curve_index]
if frac<0:
frac += 1
if frac>1:
frac %= 1
pos = int(baked_points.shape[0]*frac)
if pos>=baked_points.shape[0]:
pos=baked_points.shape[0]-1
x, y = list(baked_points[pos])
point = self.reverse_transform_point(Point(x*self.width, y*self.height))
if pos<baked_points.shape[0]-1:
x, y = list(baked_points[pos+1])
point2 = self.reverse_transform_point(Point(x*self.width, y*self.height))
diffp = point2.diff(point)
angle = diffp.get_angle()
else:
angle = 0.
return point, angle
def get_baked_points(self, curve_index=0):
self.build_baked_points(curve_index)
baked_points = self.baked_points[curve_index]
return baked_points*(self.width, self.height)
def find_point_location(self, point):
point = point.copy()
point.scale(1./self.width, 1./self.height)
tolerance = 5./max(self.width, self.height)
for curve_index in range(len(self.curves)):
curve = self.curves[curve_index]
found = curve.get_closest_control_point(point, self.width, self.height, tolerance)
if found:
bezier_point_index, t = found
return (curve_index, bezier_point_index, t)
return None
def insert_point_at(self, point):
found = self.find_point_location(point)
if not found: return False
curve_index, bezier_point_index, t = found
curve = self.curves[curve_index]
curve.insert_point_at(bezier_point_index, t)
for point_group_shape in self.point_group_shapes:
curve_point_group = point_group_shape.curve_point_group
curve_point_group.shift(
curve_index=curve_index,
from_point_index=bezier_point_index,
point_index_shift=1)
self.rebuild_curve_point_map()
return True
def insert_break_at(self, curve_index, bezier_point_index):
if curve_index>=len(self.curves): return False
prev_curve = self.curves[curve_index]
if bezier_point_index>= len(prev_curve.bezier_points): return False
if bezier_point_index == len(prev_curve.bezier_points)-1:
if prev_curve.closed:
#Just open up the closed curve
prev_curve.closed = False
return True
else:
return False
bezier_points_count = len(prev_curve.bezier_points)
if prev_curve.closed:
prev_curve.closed = False
prev_curve.add_bezier_points(prev_curve.bezier_points[:bezier_point_index+1])
prev_curve.remove_bezier_point_indices(0, bezier_point_index)
prev_curve.origin.copy_from(prev_curve.bezier_points[0].dest)
prev_curve.remove_bezier_point_index(0)
for point_group_shape in self.point_group_shapes:
curve_point_group = point_group_shape.curve_point_group
curve_point_group.shift(
curve_index=curve_index,
from_point_index=0, to_point_index=bezier_point_index+1,
point_index_shift=bezier_points_count)
curve_point_group.shift(
curve_index=curve_index,
from_point_index=0,
point_index_shift=-bezier_point_index-1)
else:
bezier_point = prev_curve.bezier_points[bezier_point_index]
new_curve = Curve(origin=bezier_point.dest.copy(),
bezier_points=prev_curve.bezier_points[bezier_point_index+1:])
prev_curve.remove_bezier_point_indices(bezier_point_index+1, len(prev_curve.bezier_points))
self.curves.insert(curve_index+1, new_curve)
for point_group_shape in self.point_group_shapes:
curve_point_group = point_group_shape.curve_point_group
curve_point_group.shift(
curve_index=curve_index,
from_point_index=bezier_point_index+1,
curve_index_shift=1,
point_index_shift=-bezier_point_index-1)
self.rebuild_curve_point_map()
return True
def join_points(self, curve_index_1, is_start_1, curve_index_2, is_start_2):
if curve_index_1>=len(self.curves): return False
if curve_index_1>=len(self.curves): return False
curve_1 = self.curves[curve_index_1]
curve_2 = self.curves[curve_index_2]
if curve_index_1 == curve_index_2:
if is_start_1 != is_start_2:
curve_1.closed = True
curve_1.origin.x = (curve_1.origin.x+curve_1.bezier_points[-1].dest.x)*.5
curve_1.origin.y = (curve_1.origin.y+curve_1.bezier_points[-1].dest.y)*.5
curve_1.bezier_points[-1].dest.copy_from(curve_1.origin)
return True
return False
if curve_1.closed: return False
if curve_2.closed: return False
dist_lapse = .01
if is_start_1 == is_start_2:#reverse curve_2
rev_curve = curve_2.reverse_copy()
curve_2.origin.copy_from(rev_curve.origin)
for bpi in range(len(rev_curve.bezier_points)):
curve_2.bezier_points[bpi].control_1.copy_from(rev_curve.bezier_points[bpi].control_1)
curve_2.bezier_points[bpi].control_2.copy_from(rev_curve.bezier_points[bpi].control_2)
curve_2.bezier_points[bpi].dest.copy_from(rev_curve.bezier_points[bpi].dest)
for point_group_shape in self.point_group_shapes:
point_group_shape.curve_point_group.reverse_shift(
curve_index=curve_index_2,
point_index_max=len(curve_2.bezier_points)-1)
if is_start_1:#swap curves
curve_1, curve_2 = curve_2, curve_1
curve_index_1, curve_index_2 = curve_index_2, curve_index_1
#curve_2 get attached at the end of curve_1
curve_1.bezier_points[-1].dest.x = (curve_1.bezier_points[-1].dest.x + curve_2.origin.x)*.5
curve_1.bezier_points[-1].dest.y = (curve_1.bezier_points[-1].dest.y + curve_2.origin.y)*.5
for point_group_shape in self.point_group_shapes:
point_group_shape.curve_point_group.shift(
curve_index=curve_index_2,
point_index_shift=len(curve_1.bezier_points))
for point_group_shape in self.point_group_shapes:
point_group_shape.curve_point_group.shift(
curve_index=curve_index_2,
curve_index_shift=curve_index_1-curve_index_2)
curve_1.add_bezier_points(curve_2.bezier_points)
del self.curves[curve_index_2]
return True
def extend_point(self, curve_index, is_start, point_index):
if curve_index>=len(self.curves): return False
curve = self.curves[curve_index]
#if curve.closed: return False
if is_start:
curve.insert_point_at(0, t=0.0)
else:
curve.insert_point_at(point_index, t=1.0)
return True
def delete_point_group_curve(self, curve_index):
for point_group_shape in self.point_group_shapes:
point_group_shape.curve_point_group.delete_curve(curve_index)
self.cleanup_point_groups()
def delete_point_group_point(self, curve_index, point_index):
for point_group_shape in self.point_group_shapes:
point_group_shape.curve_point_group.delete_point_index(curve_index, point_index)
self.cleanup_point_groups()
def cleanup_point_groups(self):
i = 0
while i <len(self.point_group_shapes):
point_group_shape = self.point_group_shapes.get_at_index(i)
point_group = point_group_shape.curve_point_group
if len(point_group.points)<1:
self.point_group_shapes.remove(point_group_shape)
else:
i += 1
def delete_point_at(self, curve_index, bezier_point_index, break_allowed=False):
if curve_index>=len(self.curves): return False
curve = self.curves[curve_index]
if bezier_point_index>=len(curve.bezier_points): return False
if bezier_point_index<-1: return False
if len(curve.bezier_points)>1:
if bezier_point_index == -1:
curve.origin.copy_from(curve.bezier_points[0].dest)
curve.update_origin()
curve.remove_bezier_point_index(0)
self.delete_point_group_point(curve_index, 0)
if curve.closed:
curve.bezier_points[-1].dest.copy_from(curve.origin)
curve.update_bezier_point_index(-1)#
elif bezier_point_index == len(curve.bezier_points)-1:
if curve.closed and curve.bezier_points:
curve.origin.copy_from(curve.bezier_points[0].dest)
curve.bezier_points[-1].dest.copy_from(curve.origin)
curve.update_bezier_point_index(-1)#
curve.remove_bezier_point_index(0)
self.delete_point_group_point(curve_index, 0)
else:
curve.remove_bezier_point_index(-1)
self.delete_point_group_point(curve_index, len(curve.bezier_points)-1)
else:
if break_allowed:
new_curve = Curve(origin=curve.bezier_points[bezier_point_index].dest.copy())
new_curve.add_bezier_points(curve.bezier_points[bezier_point_index+1:])
curve.remove_bezier_point_indices(
bezier_point_index+1, len(curve.bezier_points))
self.curves.insert(curve_index+1, new_curve)
curve.remove_bezier_point_index(bezier_point_index)
self.delete_point_group_point(curve_index, bezier_point_index)
if len(curve.bezier_points)<3:
curve.closed = False
if len(self.curves)>1:
if (len(curve.bezier_points)<=1 and curve.closed) or len(curve.bezier_points)==0:
del self.curves[curve_index]
self.delete_point_group_curve(curve_index)
elif len(self.curves)>1:
del self.curves[curve_index]
self.delete_point_group_curve(curve_index)
self.rebuild_curve_point_map()
return True
def delete_dest_points_inside_rect(self, center, radius):
center = self.transform_point(center)
radius /= (self.width+self.height)*.5
center.scale(1./self.width, 1./self.height)
curve_point_indices = dict()
for curve_index in range(len(self.curves)):
curve = self.curves[curve_index]
curve_point_indices[curve_index] = curve.get_indices_within(center, radius)
#for bezier_point_index in range(-1, len(curve.bezier_points)):
# if bezier_point_index == -1:
# point = curve.origin.copy()
# else:
# point = curve.bezier_points[bezier_point_index].dest.copy()
# if point.distance(center)<radius:
# if curve_index not in curve_point_indices:
# curve_point_indices[curve_index] = []
# curve_point_indices[curve_index].append(bezier_point_index)
delete_count = 0
for curve_index in reversed(sorted(curve_point_indices.keys())):
for bezier_point_index in reversed(sorted(curve_point_indices[curve_index])):
if self.delete_point_at(curve_index, bezier_point_index, break_allowed=True):
delete_count += 1
return delete_count>0
@staticmethod
def create_from_rectangle_shape(rectangle_shape):
if rectangle_shape.corner_radius==0: return None
curve_shape = CurveShape(Point(0,0), None, None, None, None, None)
crsx = rectangle_shape.corner_radius/rectangle_shape.width
crsy = rectangle_shape.corner_radius/rectangle_shape.height
k = .5522847498*.5#magic number
#crsx = crsy = .5
curved_points = [
BezierPoint(control_1=Point(.5+k, 0), control_2=Point(1., .5-k), dest=Point(1., .5)),
BezierPoint(control_1=Point(1., .5+k), control_2=Point(.5+k, 1.), dest=Point(.5, 1.)),
BezierPoint(control_1=Point(.5-k, 1.), control_2=Point(0, .5+k), dest=Point(0., .5)),
BezierPoint(control_1=Point(0., .5-k), control_2=Point(0.5-k, 0.), dest=Point(.5, 0.))
]
curved_points[0].scale(2*crsx, 2*crsy).translate(1.-2*crsx, 0)
curved_points[1].scale(2*crsx, 2*crsy).translate(1.-2*crsx, 1-2*crsy)
curved_points[2].scale(2*crsx, 2*crsy).translate(0, 1-2*crsy)
curved_points[3].scale(2*crsx, 2*crsy).translate(0, 0)
p1 = Point(1., 1-crsy)
p2 = Point(crsx, 1.)
p3 = Point(0., crsy)
p4 = Point(1.-crsx, 0)
final_points= [
curved_points[0],
BezierPoint(control_1=p1.copy(), control_2=p1.copy(), dest=p1.copy()),
curved_points[1],
BezierPoint(control_1=p2.copy(), control_2=p2.copy(), dest=p2.copy()),
curved_points[2],
BezierPoint(control_1=p3.copy(), control_2=p3.copy(), dest=p3.copy()),
curved_points[3],
BezierPoint(control_1=p4.copy(), control_2=p4.copy(), dest=p4.copy()),
]
final_points[1].align_straight_with(final_points[0].dest)
final_points[3].align_straight_with(final_points[2].dest)
final_points[5].align_straight_with(final_points[4].dest)
final_points[7].align_straight_with(final_points[6].dest)
curve_shape.curves.append(Curve(
origin=Point(1.-crsx, 0),
bezier_points=final_points, closed=True))
rectangle_shape.copy_into(curve_shape, all_fields=True, copy_name=False)
curve_shape.fit_size_to_include_all()
return curve_shape
@staticmethod
def create_from_oval_shape(oval_shape):
curve_shape = CurveShape(Point(0,0), None, None, None, None, None)
k = .5522847498*.5#magic number
bezier_points = [
BezierPoint(control_1=Point(.5+k, 0), control_2=Point(1., .5-k), dest=Point(1., .5)),
BezierPoint(control_1=Point(1., .5+k), control_2=Point(.5+k, 1.), dest=Point(.5, 1.)),
BezierPoint(control_1=Point(.5-k, 1.), control_2=Point(0, .5+k), dest=Point(0., .5)),
BezierPoint(control_1=Point(0., .5-k), control_2=Point(0.5-k, 0.), dest=Point(.5, 0.))
]
#curve_shape.curves.append(Curve(origin=Point(.5, 0.), bezier_points=bezier_points, closed=True))
curve_shape.curves.append(Curve.create_circle(sweep_angle=oval_shape.sweep_angle))
oval_shape.copy_into(curve_shape, all_fields=True, copy_name=False)
curve_shape.fit_size_to_include_all()
return curve_shape
@staticmethod
def create_from_polygon_shape(polygon_shape):
curve_shape = CurveShape(Point(0,0), None, None, None, None, None)
for polygon in polygon_shape.polygons:
curve = None
for i in range(len(polygon.points)):
point = polygon.points[i]
if i == 0:
curve = Curve(origin=point.copy())
else:
bzp = BezierPoint(
control_1 = point.copy(), control_2 = point.copy(), dest = point.copy())
curve.add_bezier_point(bzp)
bzp.align_straight_with(polygon.points[i-1])
curve.closed = polygon.closed
if polygon.closed:
point = polygon.points[0]
bzp = BezierPoint(
control_1 = point.copy(), control_2 = point.copy(), dest = point.copy())
curve.add_bezier_point(bzp)
bzp.align_straight_with(polygon.points[-1])
curve_shape.curves.append(curve)
polygon_shape.copy_into(curve_shape, all_fields=True, copy_name=False)
curve_shape.fit_size_to_include_all()
return curve_shape
def flip(self, direction):
percent_anchor_at = self.anchor_at.copy()
percent_anchor_at.scale(1./self.width, 1./self.height)
for curve in self.curves:
if direction == "x":
curve.origin.x = 2*percent_anchor_at.x-curve.origin.x
elif direction == "y":
curve.origin.y = 2*percent_anchor_at.y-curve.origin.y
for bezier_point in curve.bezier_points:
if direction == "x":
bezier_point.control_1.x = 2*percent_anchor_at.x-bezier_point.control_1.x
bezier_point.control_2.x = 2*percent_anchor_at.x-bezier_point.control_2.x
bezier_point.dest.x = 2*percent_anchor_at.x-bezier_point.dest.x
elif direction == "y":
bezier_point.control_1.y = 2*percent_anchor_at.y-bezier_point.control_1.y
bezier_point.control_2.y = 2*percent_anchor_at.y-bezier_point.control_2.y
bezier_point.dest.y = 2*percent_anchor_at.y-bezier_point.dest.y
self.fit_size_to_include_all()
def _transform_point_from_shape(self, shape, point):
point.scale(shape.width, shape.height)
point = shape.reverse_transform_point(point)
point = self.transform_point(point)
point.scale(1./self.width, 1./self.height)
return point
def include_inside(self, shape):
if not isinstance(shape, CurveShape): return False
for curve in shape.curves:
curve = curve.copy()
curve.origin.copy_from(self._transform_point_from_shape(shape, curve.origin))
for i in range(len(curve.bezier_points)):
bp = curve.bezier_points[i]
bp.control_1.copy_from(self._transform_point_from_shape(shape, bp.control_1))
bp.control_2.copy_from(self._transform_point_from_shape(shape, bp.control_2))
bp.dest.copy_from(self._transform_point_from_shape(shape, bp.dest))
self.curves.append(curve)
return True
| gpl-3.0 | -5,911,548,169,782,067,000 | 42.595708 | 107 | 0.572151 | false |
0x00ach/zer0m0n | signatures/recon_systeminfo.py | 6 | 1252 | # Copyright (C) 2012 Claudio "nex" Guarnieri (@botherder)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class SystemInfo(Signature):
name = "recon_systeminfo"
description = "Collects information on the system (ipconfig, netstat, systeminfo)"
severity = 3
categories = ["recon"]
authors = ["nex"]
minimum = "1.0"
evented = True
def on_call(self, call, process):
return self.check_argument_call(
call, pattern="(^cmd\.exe).*[(systeminfo)|(ipconfig)|(netstat)]",
name="CommandLine",
category="process",
regex=True
)
| gpl-3.0 | 1,236,607,287,380,904,400 | 36.939394 | 86 | 0.691693 | false |
FreekingDean/home-assistant | tests/components/test_configurator.py | 29 | 4435 | """The tests for the Configurator component."""
# pylint: disable=protected-access
import unittest
import homeassistant.components.configurator as configurator
from homeassistant.const import EVENT_TIME_CHANGED, ATTR_FRIENDLY_NAME
from tests.common import get_test_home_assistant
class TestConfigurator(unittest.TestCase):
"""Test the Configurator component."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_request_least_info(self):
"""Test request config with least amount of data."""
request_id = configurator.request_config(
self.hass, "Test Request", lambda _: None)
self.assertEqual(
1, len(self.hass.services.services.get(configurator.DOMAIN, [])),
"No new service registered")
states = self.hass.states.all()
self.assertEqual(1, len(states), "Expected a new state registered")
state = states[0]
self.assertEqual(configurator.STATE_CONFIGURE, state.state)
self.assertEqual(
request_id, state.attributes.get(configurator.ATTR_CONFIGURE_ID))
def test_request_all_info(self):
"""Test request config with all possible info."""
exp_attr = {
ATTR_FRIENDLY_NAME: "Test Request",
configurator.ATTR_DESCRIPTION: "config description",
configurator.ATTR_DESCRIPTION_IMAGE: "config image url",
configurator.ATTR_SUBMIT_CAPTION: "config submit caption",
configurator.ATTR_FIELDS: [],
configurator.ATTR_LINK_NAME: "link name",
configurator.ATTR_LINK_URL: "link url",
configurator.ATTR_ENTITY_PICTURE: "config entity picture",
configurator.ATTR_CONFIGURE_ID: configurator.request_config(
self.hass,
name="Test Request",
callback=lambda _: None,
description="config description",
description_image="config image url",
submit_caption="config submit caption",
fields=None,
link_name="link name",
link_url="link url",
entity_picture="config entity picture",
)
}
states = self.hass.states.all()
self.assertEqual(1, len(states))
state = states[0]
self.assertEqual(configurator.STATE_CONFIGURE, state.state)
assert exp_attr == dict(state.attributes)
def test_callback_called_on_configure(self):
"""Test if our callback gets called when configure service called."""
calls = []
request_id = configurator.request_config(
self.hass, "Test Request", lambda _: calls.append(1))
self.hass.services.call(
configurator.DOMAIN, configurator.SERVICE_CONFIGURE,
{configurator.ATTR_CONFIGURE_ID: request_id})
self.hass.block_till_done()
self.assertEqual(1, len(calls), "Callback not called")
def test_state_change_on_notify_errors(self):
"""Test state change on notify errors."""
request_id = configurator.request_config(
self.hass, "Test Request", lambda _: None)
error = "Oh no bad bad bad"
configurator.notify_errors(request_id, error)
state = self.hass.states.all()[0]
self.assertEqual(error, state.attributes.get(configurator.ATTR_ERRORS))
def test_notify_errors_fail_silently_on_bad_request_id(self):
"""Test if notify errors fails silently with a bad request id."""
configurator.notify_errors(2015, "Try this error")
def test_request_done_works(self):
"""Test if calling request done works."""
request_id = configurator.request_config(
self.hass, "Test Request", lambda _: None)
configurator.request_done(request_id)
self.assertEqual(1, len(self.hass.states.all()))
self.hass.bus.fire(EVENT_TIME_CHANGED)
self.hass.block_till_done()
self.assertEqual(0, len(self.hass.states.all()))
def test_request_done_fail_silently_on_bad_request_id(self):
"""Test that request_done fails silently with a bad request id."""
configurator.request_done(2016)
| mit | -5,437,386,868,304,473,000 | 37.565217 | 79 | 0.629989 | false |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py2/solnlib/packages/schematics/contrib/enum_type.py | 3 | 2419 | """Type supporting native Python3 enum. It depends either on Py3.4+ or e.g. enum34.
"""
from __future__ import unicode_literals, absolute_import
try:
from enum import Enum
except ImportError:
pass
from ..exceptions import ConversionError
from ..translator import _
from ..types import BaseType
from ..compat import string_type
class EnumType(BaseType):
"""A field type allowing to use native enums as values.
Restricts values to enum members and (optionally) enum values.
`use_values` - if set to True allows do assign enumerated values to the field.
>>> import enum
>>> class E(enum.Enum):
... A = 1
... B = 2
>>> from schematics import Model
>>> class AModel(Model):
... foo = EnumType(E)
>>> a = AModel()
>>> a.foo = E.A
>>> a.foo.value == 1
"""
MESSAGES = {
'convert': _("Couldn't interpret '{0}' as member of {1}."),
}
def __init__(self, enum, use_values=False, **kwargs):
"""
:param enum: Enum class to which restrict values assigned to the field.
:param use_values: If true, also values of the enum (right-hand side) can be assigned here.
Other args are passed to superclass.
"""
self._enum_class = enum
self._use_values = use_values
super(EnumType, self).__init__(**kwargs)
def to_native(self, value, context=None):
if isinstance(value, self._enum_class):
return value
else:
by_name = self._find_by_name(value)
if by_name:
return by_name
by_value = self._find_by_value(value)
if by_value:
return by_value
raise ConversionError(self.messages['convert'].format(value, self._enum_class))
def _find_by_name(self, value):
if isinstance(value, string_type):
try:
return self._enum_class[value]
except KeyError:
pass
def _find_by_value(self, value):
if not self._use_values:
return
for member in self._enum_class:
if member.value == value:
return member
def to_primitive(self, value, context=None):
if isinstance(value, Enum):
if self._use_values:
return value.value
else:
return value.name
else:
return str(value)
| isc | -4,532,396,976,938,578,400 | 29.620253 | 99 | 0.57131 | false |
AndreasMadsen/tensorflow | tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py | 4 | 11253 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities supporting export to SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import re
import time
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import gc
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
# A key for use in the input_alternatives dict indicating the default input.
# This is the input that will be expected when a serving request does not
# specify a specific signature.
# The default input alternative specifies placeholders that the input_fn
# requires to be fed (in the typical case, a single placeholder for a
# serialized tf.Example).
DEFAULT_INPUT_ALTERNATIVE_KEY = 'default_input_alternative'
# A key for use in the input_alternatives dict indicating the features input.
# The features inputs alternative specifies the feature Tensors provided as
# input to the model_fn, i.e. the outputs of the input_fn.
FEATURES_INPUT_ALTERNATIVE_KEY = 'features_input_alternative'
# A key for use in the output_alternatives dict indicating the default output.
# This is the output that will be provided when a serving request does not
# specify a specific signature.
# In a single-headed model, the single output is automatically the default.
# In a multi-headed model, the name of the desired default head should be
# provided to get_output_alternatives.
DEFAULT_OUTPUT_ALTERNATIVE_KEY = 'default_output_alternative'
def build_standardized_signature_def(
input_tensors, output_tensors, problem_type):
"""Build a SignatureDef using problem type and input and output Tensors.
Note that this delegates the actual creation of the signatures to methods in
//third_party/tensorflow/python/saved_model/signature_def_utils.py, which may
assign names to the input and output tensors (depending on the problem type)
that are standardized in the context of SavedModel.
Args:
input_tensors: a dict of string key to `Tensor`
output_tensors: a dict of string key to `Tensor`
problem_type: an instance of constants.ProblemType, specifying
classification, regression, etc.
Returns:
A SignatureDef using SavedModel standard keys where possible.
Raises:
ValueError: if input_tensors or output_tensors is None or empty.
"""
if not input_tensors:
raise ValueError('input_tensors must be provided.')
if not output_tensors:
raise ValueError('output_tensors must be provided.')
# Per-method signature_def functions will standardize the keys if possible
if _is_classification_problem(problem_type, input_tensors, output_tensors):
(_, examples), = input_tensors.items()
classes = output_tensors.get(prediction_key.PredictionKey.CLASSES)
scores = output_tensors.get(prediction_key.PredictionKey.SCORES)
if not (classes or scores):
(_, classes), = output_tensors.items()
return signature_def_utils.classification_signature_def(
examples, classes, scores)
elif _is_regression_problem(problem_type, input_tensors, output_tensors):
(_, examples), = input_tensors.items()
(_, predictions), = output_tensors.items()
return signature_def_utils.regression_signature_def(examples, predictions)
else:
return signature_def_utils.predict_signature_def(
input_tensors, output_tensors)
def _is_classification_problem(problem_type, input_tensors, output_tensors):
classes = output_tensors.get(prediction_key.PredictionKey.CLASSES)
scores = output_tensors.get(prediction_key.PredictionKey.SCORES)
return ((problem_type == constants.ProblemType.CLASSIFICATION or
problem_type == constants.ProblemType.LOGISTIC_REGRESSION)
and len(input_tensors) == 1
and (classes or scores or len(output_tensors) == 1))
def _is_regression_problem(problem_type, input_tensors, output_tensors):
return (problem_type == constants.ProblemType.LINEAR_REGRESSION
and len(input_tensors) == 1
and len(output_tensors) == 1)
def get_input_alternatives(input_ops):
"""Obtain all input alternatives using the input_fn output and heuristics."""
input_alternatives = {}
if isinstance(input_ops, input_fn_utils.InputFnOps):
features, unused_labels, default_inputs = input_ops
input_alternatives[DEFAULT_INPUT_ALTERNATIVE_KEY] = default_inputs
else:
features, unused_labels = input_ops
if not features:
raise ValueError('Features must be defined.')
# Add the "features" input_signature in any case.
# Note defensive copy because model_fns alter the features dict.
input_alternatives[FEATURES_INPUT_ALTERNATIVE_KEY] = (
copy.copy(features))
return input_alternatives, features
def get_output_alternatives(
model_fn_ops,
default_output_alternative_key=DEFAULT_OUTPUT_ALTERNATIVE_KEY):
"""Obtain all output alternatives using the model_fn output and heuristics."""
output_alternatives = model_fn_ops.output_alternatives
# Identify the default outputs, creating them if needed.
if (output_alternatives
and default_output_alternative_key not in output_alternatives):
raise ValueError('default_output_alternative_key not in '
'output_alternatives: %s' % default_output_alternative_key)
if (output_alternatives
and default_output_alternative_key in output_alternatives):
# If a default head is provided, use it.
actual_default_output_alternative_key = default_output_alternative_key
return output_alternatives, actual_default_output_alternative_key
if output_alternatives and len(output_alternatives) == 1:
# If there is only one head, use it as the default.
(actual_default_output_alternative_key, _), = output_alternatives.items()
return output_alternatives, actual_default_output_alternative_key
# Lacking provided output alternatives, the best we can do is to
# interpret the model as single-headed of unknown type.
default_problem_type = constants.ProblemType.UNSPECIFIED
default_outputs = model_fn_ops.predictions
actual_default_output_alternative_key = DEFAULT_OUTPUT_ALTERNATIVE_KEY
output_alternatives = {actual_default_output_alternative_key:
(default_problem_type, default_outputs)}
return output_alternatives, actual_default_output_alternative_key
def build_all_signature_defs(input_alternatives, output_alternatives,
actual_default_output_alternative_key):
"""Build `SignatureDef`s from all pairs of input and output alternatives."""
signature_def_map = {
('%s:%s' % (input_key, output_key or 'None')):
build_standardized_signature_def(
inputs, outputs, problem_type)
for input_key, inputs in input_alternatives.items()
for output_key, (problem_type, outputs)
in output_alternatives.items()}
# Add the default SignatureDef
default_inputs = input_alternatives[DEFAULT_INPUT_ALTERNATIVE_KEY]
if not default_inputs:
default_inputs = input_alternatives[FEATURES_INPUT_ALTERNATIVE_KEY]
# default outputs are guaranteed to exist above
(default_problem_type, default_outputs) = (
output_alternatives[actual_default_output_alternative_key])
signature_def_map[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = (
build_standardized_signature_def(
default_inputs, default_outputs, default_problem_type))
return signature_def_map
def get_timestamped_export_dir(export_dir_base):
"""Builds a path to a new subdirectory within the base directory.
Each export is written into a new subdirectory named using the
current time. This guarantees monotonically increasing version
numbers even across multiple runs of the pipeline.
The timestamp used is the number of milliseconds since epoch UTC.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
Returns:
The full path of the new subdirectory (which is not actually created yet).
"""
export_timestamp = int(time.time() * 1e3)
export_dir = os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(str(export_timestamp)))
return export_dir
def garbage_collect_exports(export_dir_base, exports_to_keep):
"""Deletes older exports, retaining only a given number of the most recent.
Export subdirectories are assumed to be named with monotonically increasing
integers; the most recent are taken to be those with the largest values.
Args:
export_dir_base: the base directory under which each export is in a
versioned subdirectory.
exports_to_keep: the number of recent exports to retain.
"""
if exports_to_keep is None:
return
keep_filter = gc.largest_export_versions(exports_to_keep)
delete_filter = gc.negation(keep_filter)
# Export dir must not end with / or it will break the re match below.
if export_dir_base.endswith('/'):
export_dir_base = export_dir_base[:-1]
# create a simple parser that pulls the export_version from the directory.
def parser(path):
match = re.match('^' + export_dir_base + '/(\\d{13})$', path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
for p in delete_filter(gc.get_paths(export_dir_base, parser=parser)):
gfile.DeleteRecursively(p.path)
def make_export_strategy(export_input_fn,
default_output_alternative_key='default',
assets_extra=None,
export_as_text=False,
exports_to_keep=None):
"""Create an ExportStrategy for use with Experiment."""
def export_fn(estimator, export_dir_base):
"""Exports the given Estimator as a SavedModel."""
export_result = estimator.export_savedmodel(
export_dir_base,
export_input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
export_as_text=export_as_text,
exports_to_keep=exports_to_keep)
garbage_collect_exports(export_dir_base, exports_to_keep)
return export_result
return export_strategy.ExportStrategy('Servo', export_fn)
| apache-2.0 | -7,599,987,610,211,274,000 | 40.371324 | 80 | 0.730916 | false |
salfab/CouchPotatoServer | couchpotato/core/providers/torrent/yify/__init__.py | 6 | 1482 | from main import Yify
def start():
return Yify()
config = [{
'name': 'yify',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'Yify',
'description': 'Free provider, less accurate. Small HD movies, encoded by <a href="https://yify-torrents.com/">Yify</a>.',
'wizard': False,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': 0
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
}
]
}]
| gpl-3.0 | 3,132,631,932,905,831,000 | 31.217391 | 134 | 0.360999 | false |
SUSE/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/compute/v2015_06_15/models/virtual_machine_scale_set_vm_extensions_summary.py | 2 | 1407 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetVMExtensionsSummary(Model):
"""Extensions summary for virtual machines of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: The extension name.
:vartype name: str
:ivar statuses_summary: The extensions information.
:vartype statuses_summary: list of :class:`VirtualMachineStatusCodeCount
<azure.mgmt.compute.compute.v2015_06_15.models.VirtualMachineStatusCodeCount>`
"""
_validation = {
'name': {'readonly': True},
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(self):
self.name = None
self.statuses_summary = None
| mit | -8,594,863,412,857,004,000 | 34.175 | 98 | 0.616205 | false |
fintech-circle/edx-platform | common/djangoapps/third_party_auth/settings.py | 2 | 4345 | """Settings for the third-party auth module.
The flow for settings registration is:
The base settings file contains a boolean, ENABLE_THIRD_PARTY_AUTH, indicating
whether this module is enabled. startup.py probes the ENABLE_THIRD_PARTY_AUTH.
If true, it:
a) loads this module.
b) calls apply_settings(), passing in the Django settings
"""
from openedx.features.enterprise_support.api import insert_enterprise_pipeline_elements
_FIELDS_STORED_IN_SESSION = ['auth_entry', 'next']
_MIDDLEWARE_CLASSES = (
'third_party_auth.middleware.ExceptionMiddleware',
'third_party_auth.middleware.PipelineQuarantineMiddleware',
)
_SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/dashboard'
def apply_settings(django_settings):
"""Set provider-independent settings."""
# Whitelisted URL query parameters retrained in the pipeline session.
# Params not in this whitelist will be silently dropped.
django_settings.FIELDS_STORED_IN_SESSION = _FIELDS_STORED_IN_SESSION
# Inject exception middleware to make redirects fire.
django_settings.MIDDLEWARE_CLASSES += _MIDDLEWARE_CLASSES
# Where to send the user if there's an error during social authentication
# and we cannot send them to a more specific URL
# (see middleware.ExceptionMiddleware).
django_settings.SOCIAL_AUTH_LOGIN_ERROR_URL = '/'
# Where to send the user once social authentication is successful.
django_settings.SOCIAL_AUTH_LOGIN_REDIRECT_URL = _SOCIAL_AUTH_LOGIN_REDIRECT_URL
# Inject our customized auth pipeline. All auth backends must work with
# this pipeline.
django_settings.SOCIAL_AUTH_PIPELINE = [
'third_party_auth.pipeline.parse_query_params',
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'third_party_auth.pipeline.associate_by_email_if_login_api',
'social.pipeline.user.get_username',
'third_party_auth.pipeline.set_pipeline_timeout',
'third_party_auth.pipeline.ensure_user_information',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'third_party_auth.pipeline.set_logged_in_cookies',
'third_party_auth.pipeline.login_analytics',
]
# Add enterprise pipeline elements if the enterprise app is installed
insert_enterprise_pipeline_elements(django_settings.SOCIAL_AUTH_PIPELINE)
# Required so that we can use unmodified PSA OAuth2 backends:
django_settings.SOCIAL_AUTH_STRATEGY = 'third_party_auth.strategy.ConfigurationModelStrategy'
# We let the user specify their email address during signup.
django_settings.SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
# Disable exceptions by default for prod so you get redirect behavior
# instead of a Django error page. During development you may want to
# enable this when you want to get stack traces rather than redirections.
django_settings.SOCIAL_AUTH_RAISE_EXCEPTIONS = False
# Allow users to login using social auth even if their account is not verified yet
# This is required since we [ab]use django's 'is_active' flag to indicate verified
# accounts; without this set to True, python-social-auth won't allow us to link the
# user's account to the third party account during registration (since the user is
# not verified at that point).
# We also generally allow unverified third party auth users to login (see the logic
# in ensure_user_information in pipeline.py) because otherwise users who use social
# auth to register with an invalid email address can become "stuck".
# TODO: Remove the following if/when email validation is separated from the is_active flag.
django_settings.SOCIAL_AUTH_INACTIVE_USER_LOGIN = True
django_settings.SOCIAL_AUTH_INACTIVE_USER_URL = '/auth/inactive'
# Context processors required under Django.
django_settings.SOCIAL_AUTH_UUID_LENGTH = 4
django_settings.DEFAULT_TEMPLATE_ENGINE['OPTIONS']['context_processors'] += (
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
| agpl-3.0 | 6,532,993,894,143,038,000 | 46.228261 | 97 | 0.733257 | false |
jzaremba/sima | runtests.py | 3 | 13217 | #!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
"""
import sys
import os
import shutil
import subprocess
import time
import imp
from argparse import ArgumentParser, REMAINDER
#
# This is a generic test runner script for projects using Numpy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "sima"
PROJECT_ROOT_FILES = ['sima', 'license.txt', 'setup.py']
SAMPLE_TEST = "sima/tests/test_imaging.py:test_ImagingDataset_2d"
SAMPLE_SUBMODULE = "motion"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument(
"--no-build",
"-n",
action="store_true",
default=False,
help="do not build the project (use system installed version)")
parser.add_argument(
"--build-only",
"-b",
action="store_true",
default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument(
"--coverage",
action="store_true",
default=False,
help=(
"report coverage of project code. HTML output goes "
"under build/coverage"))
parser.add_argument(
"--gcov",
action="store_true",
default=False,
help=(
"enable C code coverage via gcov (requires GCC). "
"gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
parser.add_argument(
"--submodule",
"-s",
default=None,
help="Submodule whose tests to run (cluster, constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if not args.no_build:
site_dir = build_project(args)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = site_dir
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = imp.new_module('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec_(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
import IPython
IPython.embed(user_ns={})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + extra_argv)
sys.exit(1)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cover-html',
'--cover-html-dir=' + dst_dir]
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if args.build_only:
sys.exit(0)
elif args.submodule:
modname = PROJECT_MODULE + '.' + args.submodule
try:
__import__(modname)
if args.bench:
test = sys.modules[modname].bench
else:
test = sys.modules[modname].test
except (ImportError, KeyError, AttributeError) as e:
print("Cannot run tests for %s (%s)" % (modname, e))
sys.exit(2)
elif args.tests:
def fix_test_path(x):
# fix up test path
p = x.split(':')
p[0] = os.path.relpath(os.path.abspath(p[0]),
test_dir)
return ':'.join(p)
tests = [fix_test_path(x) for x in args.tests]
def test(*a, **kw):
extra_argv = kw.pop('extra_argv', ())
extra_argv = extra_argv + tests[1:]
kw['extra_argv'] = extra_argv
from numpy.testing import Tester
if args.bench:
return Tester(tests[0]).bench(*a, **kw)
else:
return Tester(tests[0]).test(*a, **kw)
else:
__import__(PROJECT_MODULE)
if args.bench:
test = sys.modules[PROJECT_MODULE].bench
else:
test = sys.modules[PROJECT_MODULE].test
# Run the tests under build/test
try:
shutil.rmtree(test_dir)
except OSError:
pass
try:
os.makedirs(test_dir)
except OSError:
pass
cwd = os.getcwd()
try:
os.chdir(test_dir)
if args.bench:
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv)
else:
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(
EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
import distutils.sysconfig
cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(
cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ["build"]
cmd += ['install', '--prefix=' + dst_dir]
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
return site_dir
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try:
os.unlink(LCOV_OUTPUT_FILE)
except OSError:
pass
try:
shutil.rmtree(LCOV_HTML_DIR)
except OSError:
pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
#
# Python 3 support
#
if sys.version_info[0] >= 3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
if __name__ == "__main__":
main(argv=sys.argv[1:])
| gpl-2.0 | -3,302,051,674,599,771,600 | 30.544153 | 78 | 0.536355 | false |
WhittKinley/aima-python | submissions/Kinley/Actual Project/GUI.py | 2 | 6660 | from tkinter import *
import ConnectFour
from ConnectFour import C4Game
from random import randint
import games
g = C4Game
class GUI:
elementSize = 50
gridBorder = 3
gridColor = "#000000"
p1Color = "#FF0000"
p2Color = "#FFFF00"
backgroundColor = "#add8e6"
gameOn = False
def __init__(self, master):
self.master = master
master.title('Connect Four')
label = Label(master, text="Connect Four", font=("Times New Roman", 50))
label.grid(row=0,column=1)
player1label = Label(master,text="If Player 1 is Computer")
player2label = Label(master,text="If Player 2 is Computer")
player1button1 = Button(master,text="Click Here!", command=self.cpuDrop1)
player2button1 = Button(master,text="Click Here!",command=self.cpuDrop2)
player1label.grid(row=2,column=0,)
player2label.grid(row=2,column=2)
player1button1.grid(row=3,column=0,)
player2button1.grid(row=3,column=2)
button = Button(master, text="New Game!", command=self._newGameButton)
button.grid(row=3,column=1)
self.canvas = Canvas(master, width=200, height=50, background=self.backgroundColor, highlightthickness=0)
self.canvas.grid(row=5,column=1)
self.currentPlayerVar = StringVar(self.master, value="")
self.currentPlayerLabel = Label(self.master, textvariable=self.currentPlayerVar, anchor=W)
self.currentPlayerLabel.grid(row=6,column=1)
self.canvas.bind('<Button-1>', self._canvasClick)
self.newGame()
def cpuDrop1(self):
if(self.gameState.first_player == True):
if not self.gameOn: return
if self.gameState.game_over: return
self.adrop(self)
self.master.update()
self.drawGrid()
self.draw()
self._updateCurrentPlayer()
if self.gameState.game_over:
x = self.canvas.winfo_width() // 2
y = self.canvas.winfo_height() // 2
if self.gameState.game_over == 'draw':
t = 'DRAW!'
else:
winner = self.p1 if self.gameState.first_player else self.p2
t = winner + ' won!'
self.canvas.create_text(x, y, text=t, font=("Helvetica", 32), fill="#333")
def cpuDrop2(self):
if(self.gameState.first_player == False):
if not self.gameOn: return
if self.gameState.game_over: return
self.bdrop(self)
self.master.update()
self.drawGrid()
self.draw()
self._updateCurrentPlayer()
if self.gameState.game_over:
x = self.canvas.winfo_width() // 2
y = self.canvas.winfo_height() // 2
if self.gameState.game_over == 'draw':
t = 'DRAW!'
else:
winner = self.p1 if self.gameState.first_player else self.p2
t = winner + ' won!'
self.canvas.create_text(x, y, text=t, font=("Helvetica", 32), fill="#333")
def draw(self):
for c in range(self.gameState.size['c']):
for r in range(self.gameState.size['r']):
if r >= len(self.gameState.grid[c]): continue
x0 = c * self.elementSize
y0 = r * self.elementSize
x1 = (c + 1) * self.elementSize
y1 = (r + 1) * self.elementSize
fill = self.p1Color if self.gameState.grid[c][r] == self.gameState.players[True] else self.p2Color
self.canvas.create_oval(x0 + 2,
self.canvas.winfo_height() - (y0 + 2),
x1 - 2,
self.canvas.winfo_height() - (y1 - 2),
fill=fill, outline=self.gridColor)
def drawGrid(self):
x0, x1 = 0, self.canvas.winfo_width()
for r in range(1, self.gameState.size['r']):
y = r * self.elementSize
self.canvas.create_line(x0, y, x1, y, fill=self.gridColor)
y0, y1 = 0, self.canvas.winfo_height()
for c in range(1, self.gameState.size['c']):
x = c * self.elementSize
self.canvas.create_line(x, y0, x, y1, fill=self.gridColor)
def drop(self, column):
return self.gameState.drop(column)
def adrop(self,column):
if(self.gameState.first_player):
guess = randint(0,6)
return self.gameState.drop(guess)
else:
return self.gameState.drop(column)
def bdrop(self, column):
if(self.gameState.first_player):
return self.gameState.drop(column)
else:
guess = games.alphabeta_search(self.gameState, self.game, 4)
return self.gameState.drop(guess)
def newGame(self):
self.p1 = 'Player 1'
self.p2 = 'Player 2'
columns = 7
rows = 6
self.gameState = ConnectFour.ConnectFour(columns=columns, rows=rows)
self.game = ConnectFour.C4Game(self.gameState)
self.canvas.delete(ALL)
self.canvas.config(width=(self.elementSize) * self.gameState.size['c'],
height=(self.elementSize) * self.gameState.size['r'])
self.master.update()
self.drawGrid()
self.draw()
self._updateCurrentPlayer()
self.gameOn = True
def _updateCurrentPlayer(self):
p = self.p1 if self.gameState.first_player else self.p2
self.currentPlayerVar.set('Current player: ' + p)
def _canvasClick(self, event):
if not self.gameOn: return
if self.gameState.game_over: return
c = event.x // self.elementSize
if (0 <= c < self.gameState.size['c']):
self.drop(c)
self.draw()
self._updateCurrentPlayer()
if self.gameState.game_over:
x = self.canvas.winfo_width() // 2
y = self.canvas.winfo_height() // 2
if self.gameState.game_over == 'draw':
t = 'DRAW!'
else:
winner = self.p1 if self.gameState.first_player else self.p2
t = winner + ' won!'
self.canvas.create_text(175, y-120, text=t, font=("Times New Roman", 42), fill="#333")
def _newGameButton(self):
self.newGame()
def check_win(self, board):
if board[0] == 0 and board[1] == 0 and board[2] == 0:
return 1
return 0
root = Tk()
app = GUI(root)
root.wm_iconbitmap('4.ico')
root.mainloop() | mit | -5,327,300,413,814,643,000 | 34.057895 | 114 | 0.551201 | false |
naototty/pyflag | src/pyflag/Exgrep.py | 7 | 7228 | #!/usr/bin/env python
# ******************************************************
# Copyright 2004: Commonwealth of Australia.
#
# Developed by the Computer Network Vulnerability Team,
# Information Security Group.
# Department of Defence.
#
# Michael Cohen <scudette@users.sourceforge.net>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ******************************************************
""" An extracting Grep implementation
This module will extract files from an image by using their magic.
"""
import re,types
import pyflag.conf
import pyflag.pyflaglog as pyflaglog
config=pyflag.conf.ConfObject()
## This initialises the cut definition stack:
definitions=[]
def add_definition(i):
i["CStartRE"]=re.compile(i["StartRE"])
try:
i["CEndRE"]=re.compile(i["EndRE"])
except: pass
definitions.append(i)
add_definition(dict(
Extension="jpg",
StartRE="\\xff\\xd8....(JFIF|Exif)",
MaxLength=1500000,
Comment="JPEG picture file type",
))
add_definition(dict(
Extension="gif",
StartRE="GIF8[79]a",
MaxLength=50000,
Comment="GIF picture file type",
))
add_definition(dict(
Extension="png",
StartRE="\\x89PNG\\x0d\\x0a\\x1a\\x0a",
EndRE="\\x45\\x4e\\x44\\xae\\x42\\x60\\x82",
MaxLength=500000,
Comment="PNG picture file type",
))
add_definition(dict(
Extension="tif",
StartRE="\\x4d\\x4d\\x00\\x2a\\x00",
MaxLength=1000000,
Comment="TIF picture file type 2",
))
add_definition(dict(
Extension="doc",
StartRE="\\xd0\\xcf\\x11\\xe0",
MaxLength=500000,
Comment="MS Word document",
))
add_definition(dict(
Extension="pdf",
StartRE="%PDF-",
EndRE=".%%EOF\\x0d",
MaxLength=1000000,
Comment="Portable Document Format",
))
add_definition(dict(
Extension="eps",
StartRE="%!PS-Adobe",
EndRE="end.%%.trailer",
MaxLength=1000000,
Comment='Encapsulated Postscript',
))
add_definition(dict(
Extension="eps",
StartRE="%!PS-Adobe",
EndRE="%%EOF.",
MaxLength=1000000,
Comment='Encapsulated Postscript',
))
add_definition(dict(
Extension="ie_hist",
StartRE="Client UrlCache",
MaxLength=300000,
Comment="Internet Explorer URL cache",
))
add_definition(dict(
Extension="url",
StartRE="URL \\x03\\x00\\x00\\x00",
MaxLength=384,
Comment="Internet Explorer URL cache",
))
add_definition(dict(
Extension="wmv",
StartRE="\\x30\\x26\\xb2\\x75\\x8e\\x66",
MaxLength=1000000,
Comment="Windows movie file",
))
add_definition(dict(
Extension="zip",
StartRE= "PK\\x03\\x04",
EndRE="PK\\x05\\x06.{18}",
MaxLength=1000000,
Comment="Zip file",
))
add_definition(dict(
Extension="pst",
StartRE ="!BDNF",
MaxLength = 10000000,
Comment = "Outlook PST File",
))
add_definition(dict(
Extension = 'gz',
StartRE='\x1F\x8B\x08[\x00\x08]',
MaxLength=10000,
Comment = "Gziped files"
))
def add_definition(i):
i["CStartRE"]=re.compile(i["StartRE"])
try:
i["CEndRE"]=re.compile(i["EndRE"])
except: pass
definitions.append(i)
import pyflag.IO as IO
def process_string(string,extension=None):
""" This is just like process except it operates on a string """
for cut in definitions:
offset=0
if extension and cut['Extension'] not in extension: continue
while 1:
match=cut['CStartRE'].search(string,offset)
if match:
offset=match.start()
length=cut['MaxLength']
## If there is an end RE, we try to read the entire length in, and then look for the end to we can adjust the length acurately. This is essential for certain file types which do not tolerate garbage at the end of the file, e.g. pdfs.
if cut.has_key('CEndRE'):
end_match=cut['CEndRE'].search(string,offset)
if end_match:
length=end_match.end()-offset
yield({'offset':offset,'length':length,'type':cut['Extension']})
offset+=1
else:
break
def process(case,subsys,extension=None):
""" A generator to produce all the recoverable files within the io object identified by identifier
@arg subsys: Either an IO object to use, or the string name of an io object that will be opened using IO.open().
@arg extension: A list of extensions we would like to see
"""
if type(subsys)==types.StringType:
io=IO.open(case,subsys)
else:
io=subsys
blocksize=1024*1024*10
windowsize=100
count=0
bytes_read=0
window=''
while(1):
## This implements a sliding window of window bytes to ensure
## we do not miss a signature that was split across blocksize:
try:
data=io.read(blocksize)
if not len(data): break
except IOError:
break
f=window+data
bytes_read+=len(data)
pyflaglog.log(pyflaglog.INFO,"Processed %u Mb" % (bytes_read/1024/1024))
for cut in definitions:
if extension and cut['Extension'] not in extension: continue
pos=0
while pos<blocksize:
match=cut['CStartRE'].search(f,pos)
if match:
offset=match.start()+count-len(window)
length=cut['MaxLength']
## If there is an end RE, we try to read the entire length in, and then look for the end to we can adjust the length acurately. This is essential for certain file types which do not tolerate garbage at the end of the file, e.g. pdfs.
if cut.has_key('CEndRE'):
tell=io.tell()
io.seek(offset)
file_data=io.read(length)
io.seek(tell)
end_match=cut['CEndRE'].search(file_data,0)
if end_match:
length=end_match.end()
yield({'offset':offset,'length':length,'type':cut['Extension']})
pos=match.start()+1
else:
pos=blocksize
window=f[-windowsize:]
count+=blocksize
io.close()
| gpl-2.0 | -3,298,805,256,687,948,000 | 29.242678 | 253 | 0.585916 | false |
KonradBreitsprecher/espresso | testsuite/observables.py | 1 | 5112 | #
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
from numpy.random import random
from espressomd.interactions import FeneBond
from espressomd.observables import *
class Observables(ut.TestCase):
# Error tolerance when comparing arrays/tuples...
tol = 1E-9
# Handle for espresso system
es = espressomd.System(box_l=[1.0, 1.0, 1.0])
def setUp(self):
if not len(self.es.part):
for i in range(1000):
self.es.part.add(pos=random(3), v=random(3), id=i)
if espressomd.has_features(["MASS"]):
self.es.part[i].mass = random()
if espressomd.has_features(["DIPOLES"]):
self.es.part[i].dip = random(3)
if espressomd.has_features(["ROTATION"]):
self.es.part[i].omega_lab = random(3)
def generate_test_for_pid_observable(
_obs_name, _pprop_name, _agg_type=None):
"""Generates test cases for observables working on particle id lists"""
pprop_name = _pprop_name
obs_name = _obs_name
agg_type = _agg_type
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# Get data from particles
id_list = range(100, 500, 2)
part_data = getattr(self.es.part[id_list], pprop_name)
# Reshape and aggregate to linear array
if len(part_data.shape) > 1:
if (agg_type == "average"):
part_data = average(part_data, 0)
if (agg_type == "sum"):
part_data = sum(part_data, 0)
part_data = part_data.reshape(part_data.size)
# Data from observable
obs_data = obs_name(ids=id_list).calculate()
np.testing.assert_array_almost_equal(
obs_data,
part_data, err_msg="Data did not agree for observable " +
str(obs_name) +
" and particle property " +
pprop_name, decimal=9)
return func
test_pos = generate_test_for_pid_observable(ParticlePositions, "pos")
test_v = generate_test_for_pid_observable(ParticleVelocities, "v")
test_f = generate_test_for_pid_observable(ParticleForces, "f")
com_force = generate_test_for_pid_observable(ComForce, "f", "sum")
if espressomd.has_features(["DIPOLES"]):
test_mag_dip = generate_test_for_pid_observable(
MagneticDipoleMoment, "dip", "sum")
# This is disabled as it does not currently work
# if espressomd.has_features(["ROTATION"]):
# test_omega_body = generate_test_for_pid_observable(ParticleBodyVelocities,"omega_body")
def test_stress_tensor(self):
s = self.es.analysis.stress_tensor()["total"].reshape(9)
obs_data = np.array(StressTensor().calculate())
np.testing.assert_array_almost_equal(
s,
obs_data,
err_msg="Stress tensor from analysis and observable did not agree",
decimal=9)
def test_com_position(self):
if espressomd.has_features(["MASS"]):
com = sum(
(self.es.part[:].mass * self.es.part[:].pos.T).T, 0) / sum(self.es.part[:].mass)
else:
com = sum((self.es.part[:].pos.T).T, 0) / len(self.es.part)
obs_data = ComPosition(ids=range(1000)).calculate()
np.testing.assert_array_almost_equal(
com, obs_data, err_msg="Center of mass observable wrong value", decimal=9)
def test_com_velocity(self):
if espressomd.has_features(["MASS"]):
com_vel = sum(
(self.es.part[:].mass * self.es.part[:].v.T).T, 0) / sum(self.es.part[:].mass)
else:
com_vel = sum((self.es.part[:].v.T).T, 0) / len(self.es.part)
obs_data = ComVelocity(ids=range(1000)).calculate()
np.testing.assert_array_almost_equal(
com_vel,
obs_data,
err_msg="Center of mass velocity observable wrong value",
decimal=9)
if __name__ == "__main__":
#print("Features: ", espressomd.features())
ut.main()
| gpl-3.0 | -8,551,781,302,959,550,000 | 37.43609 | 96 | 0.602504 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/nsg.py | 1 | 3405 |
import dsz
import traceback, sys
import re
import ops.cmd
import os.path
from ops.pprint import pprint
def main():
connection_list = []
proc_list = []
ppid = ''
path = ''
user = ''
if (len(sys.argv) > 1):
pattern = (('.*' + sys.argv[1]) + '.*')
else:
pattern = '.*'
print (('\nFiltering connections with regex:: ' + pattern) + '\n')
regex = re.compile(pattern, (re.I | re.UNICODE))
dsz.control.echo.Off()
cmd = ops.cmd.getDszCommand('netconnections -list')
conn_items = cmd.execute()
if cmd.success:
proc_list = getProcList()
for conn_item in conn_items.initialconnectionlistitem.connectionitem:
type = conn_item.type.encode('utf-8')
pid = str(conn_item.pid)
state = conn_item.state.encode('utf-8')
valid = conn_item.valid
remote_type = str(conn_item.remote.type)
remote_port = str(conn_item.remote.port)
remote_address = str(conn_item.remote.address)
local_type = conn_item.local.type.encode('utf-8')
local_port = str(conn_item.local.port)
local_address = str(conn_item.local.address)
print_local_address = ''
if ((len(local_address) > 0) and (local_address != 'None')):
print_local_address = ((local_address + ':') + local_port)
else:
print_local_address = '*.*'
if ((len(remote_address) > 0) and (remote_address != 'None')):
print_remote_address = ((remote_address + ':') + remote_port)
else:
print_remote_address = '*.*'
connection = [type, print_local_address, print_remote_address, state, pid, ppid, path, user]
mergeProcessInfo(connection, proc_list)
if regex:
tmp_str = ' '.join(connection)
if re.search(regex, tmp_str):
connection_list.append(connection)
if (connection_list > 1):
pprint(connection_list, header=['TYPE', 'LOCAL', 'REMOTE', 'STATE', 'PID', 'PPID', 'PATH', 'USER'])
dsz.control.echo.On()
def getProcList():
cmd = ops.cmd.getDszCommand('processes -list')
proc_items = cmd.execute()
retval = []
if cmd.success:
for proc_item in proc_items.initialprocesslistitem.processitem:
process = [str(proc_item.id), str(proc_item.parentid), str(proc_item.path.encode('utf-8')), str(proc_item.name.encode('utf-8')), str(proc_item.user.encode('utf-8'))]
retval.append(process)
else:
dsz.ui.Echo('Could not find any processes.', dsz.ERROR)
return 0
return retval
def mergeProcessInfo(connection, proc_list):
if (proc_list == 0):
dsz.ui.Echo('Could not find any processes.', dsz.ERROR)
return 0
if (connection != None):
for process in filter((lambda x: (x[0] == connection[4])), proc_list):
connection[5] = process[1].encode('utf-8')
connection[6] = os.path.join(process[2], str(process[3]))
connection[7] = process[4]
else:
dsz.ui.Echo('Could not find any processes.', dsz.ERROR)
return 0
return connection
if (__name__ == '__main__'):
usage = 'nsg [regex]\n '
try:
main()
except RuntimeError as e:
dsz.ui.Echo(('\n RuntimeError Occured: %s' % e), dsz.ERROR) | unlicense | -2,748,614,402,388,846,600 | 38.149425 | 177 | 0.56652 | false |
pfmoore/pip | src/pip/_internal/pyproject.py | 6 | 7061 | import os
from collections import namedtuple
from typing import Any, List, Optional
from pip._vendor import toml
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._internal.exceptions import InstallationError
def _is_list_of_str(obj):
# type: (Any) -> bool
return (
isinstance(obj, list) and
all(isinstance(item, str) for item in obj)
)
def make_pyproject_path(unpacked_source_directory):
# type: (str) -> str
return os.path.join(unpacked_source_directory, 'pyproject.toml')
BuildSystemDetails = namedtuple('BuildSystemDetails', [
'requires', 'backend', 'check', 'backend_path'
])
def load_pyproject_toml(
use_pep517, # type: Optional[bool]
pyproject_toml, # type: str
setup_py, # type: str
req_name # type: str
):
# type: (...) -> Optional[BuildSystemDetails]
"""Load the pyproject.toml file.
Parameters:
use_pep517 - Has the user requested PEP 517 processing? None
means the user hasn't explicitly specified.
pyproject_toml - Location of the project's pyproject.toml file
setup_py - Location of the project's setup.py file
req_name - The name of the requirement we're processing (for
error reporting)
Returns:
None if we should use the legacy code path, otherwise a tuple
(
requirements from pyproject.toml,
name of PEP 517 backend,
requirements we should check are installed after setting
up the build environment
directory paths to import the backend from (backend-path),
relative to the project root.
)
"""
has_pyproject = os.path.isfile(pyproject_toml)
has_setup = os.path.isfile(setup_py)
if has_pyproject:
with open(pyproject_toml, encoding="utf-8") as f:
pp_toml = toml.load(f)
build_system = pp_toml.get("build-system")
else:
build_system = None
# The following cases must use PEP 517
# We check for use_pep517 being non-None and falsey because that means
# the user explicitly requested --no-use-pep517. The value 0 as
# opposed to False can occur when the value is provided via an
# environment variable or config file option (due to the quirk of
# strtobool() returning an integer in pip's configuration code).
if has_pyproject and not has_setup:
if use_pep517 is not None and not use_pep517:
raise InstallationError(
"Disabling PEP 517 processing is invalid: "
"project does not have a setup.py"
)
use_pep517 = True
elif build_system and "build-backend" in build_system:
if use_pep517 is not None and not use_pep517:
raise InstallationError(
"Disabling PEP 517 processing is invalid: "
"project specifies a build backend of {} "
"in pyproject.toml".format(
build_system["build-backend"]
)
)
use_pep517 = True
# If we haven't worked out whether to use PEP 517 yet,
# and the user hasn't explicitly stated a preference,
# we do so if the project has a pyproject.toml file.
elif use_pep517 is None:
use_pep517 = has_pyproject
# At this point, we know whether we're going to use PEP 517.
assert use_pep517 is not None
# If we're using the legacy code path, there is nothing further
# for us to do here.
if not use_pep517:
return None
if build_system is None:
# Either the user has a pyproject.toml with no build-system
# section, or the user has no pyproject.toml, but has opted in
# explicitly via --use-pep517.
# In the absence of any explicit backend specification, we
# assume the setuptools backend that most closely emulates the
# traditional direct setup.py execution, and require wheel and
# a version of setuptools that supports that backend.
build_system = {
"requires": ["setuptools>=40.8.0", "wheel"],
"build-backend": "setuptools.build_meta:__legacy__",
}
# If we're using PEP 517, we have build system information (either
# from pyproject.toml, or defaulted by the code above).
# Note that at this point, we do not know if the user has actually
# specified a backend, though.
assert build_system is not None
# Ensure that the build-system section in pyproject.toml conforms
# to PEP 518.
error_template = (
"{package} has a pyproject.toml file that does not comply "
"with PEP 518: {reason}"
)
# Specifying the build-system table but not the requires key is invalid
if "requires" not in build_system:
raise InstallationError(
error_template.format(package=req_name, reason=(
"it has a 'build-system' table but not "
"'build-system.requires' which is mandatory in the table"
))
)
# Error out if requires is not a list of strings
requires = build_system["requires"]
if not _is_list_of_str(requires):
raise InstallationError(error_template.format(
package=req_name,
reason="'build-system.requires' is not a list of strings.",
))
# Each requirement must be valid as per PEP 508
for requirement in requires:
try:
Requirement(requirement)
except InvalidRequirement:
raise InstallationError(
error_template.format(
package=req_name,
reason=(
"'build-system.requires' contains an invalid "
"requirement: {!r}".format(requirement)
),
)
)
backend = build_system.get("build-backend")
backend_path = build_system.get("backend-path", [])
check = [] # type: List[str]
if backend is None:
# If the user didn't specify a backend, we assume they want to use
# the setuptools backend. But we can't be sure they have included
# a version of setuptools which supplies the backend, or wheel
# (which is needed by the backend) in their requirements. So we
# make a note to check that those requirements are present once
# we have set up the environment.
# This is quite a lot of work to check for a very specific case. But
# the problem is, that case is potentially quite common - projects that
# adopted PEP 518 early for the ability to specify requirements to
# execute setup.py, but never considered needing to mention the build
# tools themselves. The original PEP 518 code had a similar check (but
# implemented in a different way).
backend = "setuptools.build_meta:__legacy__"
check = ["setuptools>=40.8.0", "wheel"]
return BuildSystemDetails(requires, backend, check, backend_path)
| mit | 4,901,165,454,857,345,000 | 37.584699 | 79 | 0.62739 | false |
livoras/feifanote-server | test/test_db.py | 1 | 1192 | from models.user import User
from models.notebook import Notebook
from models.page import Page
from common import db
from app import app
session = db.session
def setup(self):
user = User(**dict(
email="iammfw@163.com",
username="jerry",
password="123456",
active_notebook_id=1
))
notebook1 = Notebook(**dict(
user_id=1,
active_page_id=1,
name="notebook1",
index=1
))
notebook2 = Notebook(**dict(
user_id=1,
active_page_id=1,
name="notebook1",
index=2
))
page1 = Page(**dict(
notebook_id=1,
content="This is my first love",
index=1
))
page2 = Page(**dict(
notebook_id=1,
content="This is my first love",
index=2
))
session.add_all([user, notebook1, notebook2, page1, page2])
session.commit()
def test_db():
u = session.query(User).filter_by(id=1).first()
assert u.username == 'jerry'
assert len(u.notebooks) == 2
notebooks = session.query(Notebook).all()
assert len(notebooks) == 2
notebook1 = session.query(Notebook).first()
assert len(notebook1.pages) == 2
| mit | -5,159,109,710,840,907,000 | 20.285714 | 63 | 0.580537 | false |
Kvoti/ditto | fabfile.py | 1 | 3400 | import os
import smtplib
from email.mime.text import MIMEText
from fabric.api import env, cd, run, shell_env, sudo, hosts, execute, settings, local
from fabric.colors import green
env.hosts = ['134.213.147.235']
env.user = 'root'
env.key_filename = '~/.ssh/id_di'
env.forward_agent = True
def deploy(js=False):
if js:
# TODO automatically figure out if produciton build needs updated
# (we don't run webpack watch with produciton settings as that
# generates files for intermediate states. We only want to run it
# once before deployment)
local('./node_modules/.bin/webpack -p --config webpack.prod.config.js')
local('git add webpack-stats-prod.json ditto/static/dist')
# TODO if last commit isn't pushed we could --amend and avoid
# the extra commit
local('git commit -m "Update production assets"')
changes = local('git log heroku/master.. --oneline --no-color --reverse > /tmp/log; cat /tmp/log', capture=True)
local('git push origin master')
local('git push heroku master')
for line in changes.splitlines():
print green(line)
with settings(warn_only=True):
execute(email, changes)
def builddb():
with cd('/srv/venv/ditto/ditto'):
with shell_env(DJANGO_CONFIGURATION='Production', DJANGO_SETTINGS_MODULE='config.production'):
sudo("echo 'drop database app_data;create database app_data' | ../../bin/python manage.py dbshell",
user="pydev")
sudo("echo 'source /usr/lib/mongooseim//lib/ejabberd-2.1.8+mim-1.5.0/priv/mysql.sql' | ../../bin/python manage.py dbshell",
user="pydev")
# Set up data for main site
sudo(' ../../bin/python manage.py migrate',
user="pydev")
sudo(' ../../bin/python manage.py runscript setup_test_data',
user="pydev")
# Delete the mnesia database
sudo('rm -rf /usr/lib/mongooseim/Mnesia*')
# Restart chat so anything cached by the chat server is forgotten
sudo('mongooseimctl restart')
# Set up data for example network for Kvoti
#newnetwork('di')
def newnetwork(name):
# TODO this needs to create the Tenant record in the main 'database'
with cd('/srv/venv/ditto/ditto'):
with shell_env(DJANGO_CONFIGURATION='Production', DJANGO_TENANT=name):
sudo(' ../../bin/python manage.py migrate',
user="pydev")
sudo(' ../../bin/python manage.py runscript setup_test_data',
user="pydev")
sudo(' ../../bin/python manage.py runscript setup_test_form',
user="pydev")
# don't set up chat data for now while we're playing with the chat bot
# sudo(' ../../bin/python manage.py runscript setup_chat_data',
# user="pydev")
@hosts('localhost')
def email(body):
fromaddr = 'mark@kvoti.technology'
toaddrs = ['sarah@kvoti.technology', 'mark@kvoti.technology']
msg = MIMEText(body)
msg['Subject'] = '[DITTO] deployment'
msg['From'] = fromaddr
msg['To'] = ','.join(toaddrs)
username = 'mark@kvoti.technology'
password = os.environ['FAB_EMAIL_PASS']
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username, password)
server.sendmail(fromaddr, toaddrs, msg.as_string())
server.quit()
| bsd-3-clause | 8,849,704,638,203,996,000 | 39 | 135 | 0.626176 | false |
edmorley/django | tests/backends/postgresql/tests.py | 17 | 6011 | import unittest
import warnings
from unittest import mock
from django.db import DatabaseError, connection
from django.test import TestCase
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL tests')
class Tests(TestCase):
def test_nodb_connection(self):
"""
The _nodb_connection property fallbacks to the default connection
database when access to the 'postgres' database is not granted.
"""
def mocked_connect(self):
if self.settings_dict['NAME'] is None:
raise DatabaseError()
return ''
nodb_conn = connection._nodb_connection
self.assertIsNone(nodb_conn.settings_dict['NAME'])
# Now assume the 'postgres' db isn't available
with warnings.catch_warnings(record=True) as w:
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.connect',
side_effect=mocked_connect, autospec=True):
warnings.simplefilter('always', RuntimeWarning)
nodb_conn = connection._nodb_connection
self.assertIsNotNone(nodb_conn.settings_dict['NAME'])
self.assertEqual(nodb_conn.settings_dict['NAME'], connection.settings_dict['NAME'])
# Check a RuntimeWarning has been emitted
self.assertEqual(len(w), 1)
self.assertEqual(w[0].message.__class__, RuntimeWarning)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
new_connection = connection.copy()
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Invalidate timezone name cache, because the setting_changed
# handler cannot know about new_connection.
del new_connection.timezone_name
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
with self.settings(TIME_ZONE=new_tz):
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
new_connection = connection.copy()
new_connection.settings_dict['AUTOCOMMIT'] = False
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
The transaction level can be configured with
DATABASES ['OPTIONS']['isolation_level'].
"""
import psycopg2
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
default_level = read_committed if psycopg2.__version__ < '2.7' else None
self.assertEqual(connection.connection.isolation_level, default_level)
new_connection = connection.copy()
new_connection.settings_dict['OPTIONS']['isolation_level'] = serializable
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute('SELECT %s', (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ['awef']
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ['ᄲawef']
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
lookups = (
'iexact', 'contains', 'icontains', 'startswith', 'istartswith',
'endswith', 'iendswith', 'regex', 'iregex',
)
for lookup in lookups:
with self.subTest(lookup=lookup):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql.base import psycopg2_version
with mock.patch('psycopg2.__version__', '4.2.1 (dt dec pq3 ext lo64)'):
self.assertEqual(psycopg2_version(), (4, 2, 1))
with mock.patch('psycopg2.__version__', '4.2b0.dev1 (dt dec pq3 ext lo64)'):
self.assertEqual(psycopg2_version(), (4, 2))
| bsd-3-clause | -883,465,429,207,508,400 | 39.877551 | 91 | 0.615244 | false |
apagac/robottelo | tests/foreman/ui/test_partitiontable.py | 2 | 6384 | # -*- encoding: utf-8 -*-
"""Test class for Partition Table UI"""
from ddt import ddt
from fauxfactory import gen_string
from robottelo.common.decorators import data, run_only_on, skip_if_bug_open
from robottelo.common.constants import PARTITION_SCRIPT_DATA_FILE
from robottelo.common.helpers import read_data_file, generate_strings_list
from robottelo.test import UITestCase
from robottelo.ui.factory import make_partitiontable
from robottelo.ui.locators import common_locators
from robottelo.ui.session import Session
@run_only_on('sat')
@ddt
class PartitionTable(UITestCase):
"""Implements the partition table tests from UI"""
@data(*generate_strings_list(len1=10))
def test_positive_create_partition_table(self, name):
"""@Test: Create a new partition table
@Feature: Partition table - Positive Create
@Assert: Partition table is created
"""
layout = read_data_file(PARTITION_SCRIPT_DATA_FILE)
os_family = "Red Hat"
with Session(self.browser) as session:
make_partitiontable(session, name=name, layout=layout,
os_family=os_family)
self.assertIsNotNone(self.partitiontable.search(name))
@data(*generate_strings_list(len1=256))
def test_negative_create_partition_table_1(self, name):
"""@Test: Create a new partition table with 256 characters in name
@Feature: Partition table - Negative Create
@Assert: Partition table is not created
"""
layout = read_data_file(PARTITION_SCRIPT_DATA_FILE)
os_family = "Red Hat"
with Session(self.browser) as session:
make_partitiontable(session, name=name, layout=layout,
os_family=os_family)
self.assertIsNotNone(self.partitiontable.wait_until_element
(common_locators["name_haserror"]))
self.assertIsNone(self.partitiontable.search(name))
@data("", " ")
def test_negative_create_partition_table_2(self, name):
"""@Test: Create partition table with blank and whitespace in name
@Feature: Partition table - Negative Create
@Assert: Partition table is not created
"""
layout = read_data_file(PARTITION_SCRIPT_DATA_FILE)
os_family = "Red Hat"
with Session(self.browser) as session:
make_partitiontable(session, name=name, layout=layout,
os_family=os_family)
self.assertIsNotNone(self.partitiontable.wait_until_element
(common_locators["name_haserror"]))
@data(*generate_strings_list(len1=10))
def test_negative_create_partition_table_3(self, name):
"""@Test: Create a new partition table with same name
@Feature: Partition table - Negative Create
@Assert: Partition table is not created
"""
layout = read_data_file(PARTITION_SCRIPT_DATA_FILE)
os_family = "Red Hat"
with Session(self.browser) as session:
make_partitiontable(session, name=name, layout=layout,
os_family=os_family)
self.assertIsNotNone(self.partitiontable.search(name))
make_partitiontable(session, name=name, layout=layout,
os_family=os_family)
self.assertIsNotNone(self.partitiontable.wait_until_element
(common_locators["name_haserror"]))
@data(*generate_strings_list(len1=10))
def test_negative_create_partition_table_4(self, name):
"""@Test: Create a new partition table with empty layout
@Feature: Partition table - Negative Create
@Assert: Partition table is not created
"""
layout = ""
os_family = "Red Hat"
with Session(self.browser) as session:
make_partitiontable(session, name=name, layout=layout,
os_family=os_family)
self.assertIsNotNone(self.partitiontable.wait_until_element
(common_locators["haserror"]))
self.assertIsNone(self.partitiontable.search(name))
@skip_if_bug_open('bugzilla', 1177591)
@data(*generate_strings_list(len1=10))
def test_remove_partition_table(self, name):
"""@Test: Delete a partition table
@Feature: Partition table - Positive Delete
@Assert: Partition table is deleted
"""
layout = "test layout"
os_family = "Red Hat"
with Session(self.browser) as session:
make_partitiontable(session, name=name, layout=layout,
os_family=os_family)
self.assertIsNotNone(self.partitiontable.search(name))
self.partitiontable.delete(name, really=True)
self.assertIsNotNone(self.partitiontable.wait_until_element
(common_locators["notif.success"]))
self.assertIsNone(self.partitiontable.search(name))
@data({u'name': gen_string('alpha'),
u'new_name': gen_string('alpha')},
{u'name': gen_string('html'),
u'new_name': gen_string('html')},
{u'name': gen_string('utf8'),
u'new_name': gen_string('utf8')},
{u'name': gen_string('alphanumeric'),
u'new_name': gen_string('alphanumeric')})
def test_update_partition_table(self, test_data):
"""@Test: Update partition table with its name, layout and OS family
@Feature: Partition table - Positive Update
@Assert: Partition table is updated
"""
layout = "test layout"
new_layout = read_data_file(PARTITION_SCRIPT_DATA_FILE)
os_family = "Debian"
new_os_family = "Red Hat"
with Session(self.browser) as session:
make_partitiontable(session, name=test_data['name'], layout=layout,
os_family=os_family)
self.assertIsNotNone(self.partitiontable.search(test_data['name']))
self.partitiontable.update(test_data['name'],
test_data['new_name'],
new_layout, new_os_family)
self.assertIsNotNone(self.partitiontable.search
(test_data['new_name']))
| gpl-3.0 | -3,273,960,328,868,190,000 | 39.66242 | 79 | 0.605576 | false |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/python/mxnet/symbol/symbol.py | 1 | 107410 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments, too-many-lines
# pylint: disable=import-error, no-name-in-module
"""Symbolic configuration API of MXNet."""
from __future__ import absolute_import as _abs
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
from array import array
import ctypes
import warnings
from numbers import Number
import numpy as _numpy
from ..attribute import AttrScope
from ..base import _LIB, numeric_types, c_array, c_array_buf, c_str, c_str_array, c_handle_array
from ..base import mx_uint, py_str, string_types, integer_types
from ..base import NDArrayHandle, ExecutorHandle, SymbolHandle
from ..base import check_call, MXNetError, NotImplementedForSymbol
from ..context import Context, current_context
from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP, _GRAD_REQ_MAP
from ..ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID
from ..ndarray import _ndarray_cls
from ..executor import Executor
from . import _internal
from . import op
from ._internal import SymbolBase, _set_symbol_class
__all__ = ["Symbol", "var", "Variable", "Group", "load", "load_json",
"pow", "maximum", "minimum", "hypot", "eye", "zeros", "ones", "full", "arange",
"histogram"]
class Symbol(SymbolBase):
"""Symbol is symbolic graph of the mxnet."""
# disable dictionary storage, also do not have parent type.
# pylint: disable=no-member
__slots__ = []
# Make numpy functions return Symbol instead of numpy object array
__array_priority__ = 1000.0
def __repr__(self):
"""Gets a string representation of the symbol."""
name = self.name
if name is None:
name = ', '.join([i.name for i in self])
return '<%s group [%s]>' % (self.__class__.__name__, name)
else:
return '<%s %s>' % (self.__class__.__name__, name)
def __iter__(self):
"""Returns a generator object of symbol.
One can loop through the returned object list to get outputs.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a+b
>>> d = mx.sym.Variable('d')
>>> e = d+c
>>> out = e.get_children()
>>> out
<Symbol Grouped>
>>> for i in out:
... i
...
<Symbol d>
<Symbol _plus0>
"""
return (self[i] for i in self.list_outputs())
def __add__(self, other):
"""x.__add__(y) <=> x+y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_add` instead. """
if isinstance(other, Symbol):
return _internal._Plus(self, other)
if isinstance(other, Number):
return _internal._PlusScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __bool__(self):
raise NotImplementedForSymbol(self.__bool__, 'bool')
__nonzero__ = __bool__
def __iadd__(self, other):
raise NotImplementedForSymbol(self.__iadd__, '+=', other, 1)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""x.__sub__(y) <=> x-y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_sub` instead. """
if isinstance(other, Symbol):
return _internal._Minus(self, other)
if isinstance(other, Number):
return _internal._MinusScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __isub__(self, other):
raise NotImplementedForSymbol(self.__isub__, '-=', other)
def __rsub__(self, other):
"""x.__rsub__(y) <=> y-x
Only `NDArray` is supported for now.
Example
-------
>>> x = mx.nd.ones((2,3))*3
>>> y = mx.nd.ones((2,3))
>>> x.__rsub__(y).asnumpy()
array([[-2., -2., -2.],
[-2., -2., -2.]], dtype=float32)
"""
if isinstance(other, Number):
return _internal._RMinusScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __mul__(self, other):
"""x.__mul__(y) <=> x*y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_mul` instead. """
if isinstance(other, Symbol):
return _internal._Mul(self, other)
if isinstance(other, Number):
return _internal._MulScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __imul__(self, other):
raise NotImplementedForSymbol(self.__imul__, '*=', other)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
"""x.__div__(y) <=> x/y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_div` instead. """
if isinstance(other, Symbol):
return _internal._Div(self, other)
if isinstance(other, Number):
return _internal._DivScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y/x
Only `NDArray` is supported for now.
Example
-------
>>> x = mx.nd.ones((2,3))*3
>>> y = mx.nd.ones((2,3))
>>> x.__rdiv__(y).asnumpy()
array([[ 0.33333334, 0.33333334, 0.33333334],
[ 0.33333334, 0.33333334, 0.33333334]], dtype=float32)
"""
if isinstance(other, Number):
return _internal._RDivScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __mod__(self, other):
"""x.__mod__(y) <=> x%y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_mod` instead. """
if isinstance(other, Symbol):
return _internal._Mod(self, other)
if isinstance(other, Number):
return _internal._ModScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rmod__(self, other):
"""x.__rmod__(y) <=> y%x
Only `NDArray` is supported for now.
Example
-------
>>> x = mx.nd.ones((2,3))*3
>>> y = mx.nd.ones((2,3))
>>> x.__rmod__(y).asnumpy()
array([[ 1., 1., 1.,
[ 1., 1., 1., dtype=float32)
"""
if isinstance(other, Number):
return _internal._RModScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __idiv__(self, other):
raise NotImplementedForSymbol(self.__idiv__, '/=', other)
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __itruediv__(self, other):
raise NotImplementedForSymbol(self.__itruediv__, '/=', other)
def __pow__(self, other):
"""x.__pow__(y) <=> x**y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_pow` instead. """
if isinstance(other, Symbol):
return _internal._Power(self, other)
if isinstance(other, Number):
return _internal._PowerScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rpow__(self, other):
raise NotImplementedForSymbol(self.__rpow__, 'y**x', other)
def __neg__(self):
"""x.__neg__() <=> -x
Numerical negative, element-wise.
Example
-------
>>> a = mx.sym.Variable('a')
>>> a
<Symbol a>
>>> -a
<Symbol _mulscalar0>
>>> a_neg = a.__neg__()
>>> c = a_neg*b
>>> ex = c.eval(ctx=mx.cpu(), a=mx.nd.ones([2,3]), b=mx.nd.ones([2,3]))
>>> ex[0].asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
"""
return self.__mul__(-1.0)
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
"""Returns a deep copy of the input object.
This function returns a deep copy of the input object including the current state
of all its parameters such as weights, biases, etc.
Any changes made to the deep copy do not reflect in the original object.
Example
-------
>>> import copy
>>> data = mx.sym.Variable('data')
>>> data_1 = copy.deepcopy(data)
>>> data_1 = 2*data
>>> data_1.tojson()
>>> data_1 is data # Data got modified
False
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolCopy(self.handle,
ctypes.byref(handle)))
return Symbol(handle)
def __eq__(self, other):
"""x.__eq__(y) <=> x==y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_equal` instead. """
if isinstance(other, Symbol):
return _internal._equal(self, other)
if isinstance(other, numeric_types):
return _internal._equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __ne__(self, other):
"""x.__ne__(y) <=> x!=y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_not_equal` instead. """
if isinstance(other, Symbol):
return _internal._not_equal(self, other)
if isinstance(other, numeric_types):
return _internal._not_equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __gt__(self, other):
"""x.__gt__(y) <=> x>y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_greater` instead. """
if isinstance(other, Symbol):
return _internal._greater(self, other)
if isinstance(other, numeric_types):
return _internal._greater_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __ge__(self, other):
"""x.__ge__(y) <=> x>=y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_greater_equal` instead. """
if isinstance(other, Symbol):
return _internal._greater_equal(self, other)
if isinstance(other, numeric_types):
return _internal._greater_equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __lt__(self, other):
"""x.__lt__(y) <=> x<y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_lesser` instead. """
if isinstance(other, Symbol):
return _internal._lesser(self, other)
if isinstance(other, numeric_types):
return _internal._lesser_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __le__(self, other):
"""x.__le__(y) <=> x<=y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_lesser_equal` instead. """
if isinstance(other, Symbol):
return _internal._lesser_equal(self, other)
if isinstance(other, numeric_types):
return _internal._lesser_equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __getstate__(self):
handle = self.handle
if handle is not None:
return {'handle': self.tojson()}
else:
return {'handle': None}
def __setstate__(self, state):
# pylint: disable=assigning-non-slot
handle = state['handle']
if handle is not None:
json_str = handle
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
self.handle = handle
else:
self.handle = None
def __call__(self, *args, **kwargs):
"""Composes symbol using inputs.
x.__call__(y, z) <=> x(y,z)
This function internally calls `_compose` to compose the symbol and
returns the composed symbol.
Example
-------
>>> data = mx.symbol.Variable('data')
>>> net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
>>> net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
>>> composed = net2(fc3_data=net1, name='composed')
>>> composed
<Symbol composed>
>>> called = net2.__call__(fc3_data=net1, name='composed')
>>> called
<Symbol composed>
Parameters
----------
args:
Positional arguments.
kwargs:
Keyword arguments.
Returns
-------
The resulting symbol.
"""
s = self.__copy__()
s._compose(*args, **kwargs)
return s
def _compose(self, *args, **kwargs):
"""Composes symbol using inputs.
x._compose(y, z) <=> x(y,z)
This function mutates the current symbol.
Example
-------
>>> data = mx.symbol.Variable('data')
>>> net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
>>> net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
>>> net2
<Symbol fc3>
>>> net2._compose(fc3_data=net1, name='composed')
>>> net2
<Symbol composed>
Parameters
----------
args:
Positional arguments.
kwargs:
Keyword arguments.
Returns
-------
The resulting symbol.
"""
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for arg in args:
if not isinstance(arg, Symbol):
raise TypeError('Compose expect `Symbol` as arguments')
for val in kwargs.values():
if not isinstance(val, Symbol):
raise TypeError('Compose expect `Symbol` as arguments')
num_args = len(args) + len(kwargs)
if len(kwargs) != 0:
keys = c_str_array(kwargs.keys())
args = c_handle_array(kwargs.values())
else:
keys = None
args = c_handle_array(args)
check_call(_LIB.MXSymbolCompose(
self.handle, name, num_args, keys, args))
def __getitem__(self, index):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of the input symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> a.__getitem__(0)
<Symbol a>
>>> a[0]
<Symbol a>
Parameters
----------
index : int or str
Indexing key
"""
output_count = len(self)
if isinstance(index, py_slice):
start = 0 if index.start is None else index.start
stop = output_count if index.stop is None else index.stop
step = 1 if index.step is None else index.step
return Group([self[i] for i in range(start, stop, step)])
if isinstance(index, string_types):
# Returning this list of names is expensive. Some symbols may have hundreds of outputs
output_names = self.list_outputs()
idx = None
for i, name in enumerate(output_names):
if name == index:
if idx is not None:
raise ValueError('There are multiple outputs with name \"%s\"' % index)
idx = i
if idx is None:
raise ValueError('Cannot find output that matches name \"%s\"' % index)
index = idx
if not isinstance(index, int):
raise TypeError('Symbol only support integer index to fetch i-th output')
if index >= output_count:
# Important, python determines the end by this exception
raise IndexError
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetOutput(
self.handle, mx_uint(index), ctypes.byref(handle)))
return Symbol(handle=handle)
@property
def name(self):
"""Gets name string from the symbol, this function only works for non-grouped symbol.
Returns
-------
value : str
The name of this symbol, returns ``None`` for grouped symbol.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
check_call(_LIB.MXSymbolGetName(
self.handle, ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
else:
return None
def attr(self, key):
"""Returns the attribute string for corresponding input key from the symbol.
This function only works for non-grouped symbols.
Example
-------
>>> data = mx.sym.Variable('data', attr={'mood': 'angry'})
>>> data.attr('mood')
'angry'
Parameters
----------
key : str
The key corresponding to the desired attribute.
Returns
-------
value : str
The desired attribute value, returns ``None`` if the attribute does not exist.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
check_call(_LIB.MXSymbolGetAttr(
self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
else:
return None
def list_attr(self, recursive=False):
"""Gets all attributes from the symbol.
Example
-------
>>> data = mx.sym.Variable('data', attr={'mood': 'angry'})
>>> data.list_attr()
{'mood': 'angry'}
Returns
-------
ret : Dict of str to str
A dictionary mapping attribute keys to values.
"""
if recursive:
raise DeprecationWarning("Symbol.list_attr with recursive=True has been deprecated. "
"Please use attr_dict instead.")
size = mx_uint()
pairs = ctypes.POINTER(ctypes.c_char_p)()
f_handle = _LIB.MXSymbolListAttrShallow
check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs)))
return {py_str(pairs[i * 2]): py_str(pairs[i * 2 + 1]) for i in range(size.value)}
def attr_dict(self):
"""Recursively gets all attributes from the symbol and its children.
Example
-------
>>> a = mx.sym.Variable('a', attr={'a1':'a2'})
>>> b = mx.sym.Variable('b', attr={'b1':'b2'})
>>> c = a+b
>>> c.attr_dict()
{'a': {'a1': 'a2'}, 'b': {'b1': 'b2'}}
Returns
-------
ret : Dict of str to dict
There is a key in the returned dict for every child with non-empty attribute set.
For each symbol, the name of the symbol is its key in the dict
and the correspond value is that symbol's attribute list (itself a dictionary).
"""
size = mx_uint()
pairs = ctypes.POINTER(ctypes.c_char_p)()
f_handle = _LIB.MXSymbolListAttr
check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs)))
ret = {}
for i in range(size.value):
name, key = py_str(pairs[i * 2]).split('$')
val = py_str(pairs[i * 2 + 1])
if name not in ret:
ret[name] = {}
ret[name][key] = val
return ret
def _set_attr(self, **kwargs):
"""Sets an attribute of the symbol.
For example. A._set_attr(foo="bar") adds the mapping ``"{foo: bar}"``
to the symbol's attribute dictionary.
Parameters
----------
**kwargs
The attributes to set
"""
for key, value in kwargs.items():
if not isinstance(value, string_types):
raise ValueError("Set Attr only accepts string values")
check_call(_LIB.MXSymbolSetAttr(
self.handle, c_str(key), c_str(str(value))))
def get_internals(self):
"""Gets a new grouped symbol `sgroup`. The output of `sgroup` is a list of
outputs of all of the internal nodes.
Consider the following code:
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> d = c.get_internals()
>>> d
<Symbol Grouped>
>>> d.list_outputs()
['a', 'b', '_plus4_output']
Returns
-------
sgroup : Symbol
A symbol group containing all internal and leaf nodes of the computation graph
used to compute the symbol.
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetInternals(
self.handle, ctypes.byref(handle)))
return Symbol(handle=handle)
def get_children(self):
"""Gets a new grouped symbol whose output contains
inputs to output nodes of the original symbol.
Example
-------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.Variable('z')
>>> a = y+z
>>> b = x+a
>>> b.get_children()
<Symbol Grouped>
>>> b.get_children().list_outputs()
['x', '_plus10_output']
>>> b.get_children().get_children().list_outputs()
['y', 'z']
Returns
-------
sgroup : Symbol or None
The children of the head node. If the symbol has no
inputs then ``None`` will be returned.
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetChildren(
self.handle, ctypes.byref(handle)))
ret = Symbol(handle=handle)
if len(ret.list_outputs()) == 0:
return None
return ret
def list_arguments(self):
"""Lists all the arguments in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_arguments
['a', 'b']
Returns
-------
args : list of string
List containing the names of all the arguments required to compute the symbol.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListArguments(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
def list_outputs(self):
"""Lists all the outputs in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_outputs()
['_plus12_output']
Returns
-------
list of str
List of all the outputs.
For most symbols, this list contains only the name of this symbol.
For symbol groups, this is a list with the names of all symbols
in the group.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListOutputs(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
# pylint: disable=invalid-length-returned
def __len__(self):
"""Get number of outputs for the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> len(c)
Returns
-------
len(self): Number of outputs
Number of outputs
"""
output_count = mx_uint()
check_call(_LIB.MXSymbolGetNumOutputs(self.handle, ctypes.byref(output_count)))
return output_count.value
def list_auxiliary_states(self):
"""Lists all the auxiliary states in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_auxiliary_states()
[]
Example of auxiliary states in `BatchNorm`.
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> fc2.list_auxiliary_states()
['batchnorm0_moving_mean', 'batchnorm0_moving_var']
Returns
-------
aux_states : list of str
List of the auxiliary states in input symbol.
Notes
-----
Auxiliary states are special states of symbols that do not correspond to an argument,
and are not updated by gradient descent. Common examples of auxiliary states
include the `moving_mean` and `moving_variance` in `BatchNorm`.
Most operators do not have auxiliary states.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListAuxiliaryStates(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
def list_inputs(self):
"""Lists all arguments and auxiliary states of this Symbol.
Returns
-------
inputs : list of str
List of all inputs.
Examples
--------
>>> bn = mx.sym.BatchNorm(name='bn')
>>> bn.list_arguments()
['bn_data', 'bn_gamma', 'bn_beta']
>>> bn.list_auxiliary_states()
['bn_moving_mean', 'bn_moving_var']
>>> bn.list_inputs()
['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var']
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.NNSymbolListInputNames(
self.handle, 0, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
def infer_type(self, *args, **kwargs):
"""Infers the type of all arguments and all outputs, given the known types
for some arguments.
This function takes the known types of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing types.
Inconsistencies in the known types will cause an error to be raised.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_types, out_types, aux_types = c.infer_type(a='float32')
>>> arg_types
[<type 'numpy.float32'>, <type 'numpy.float32'>]
>>> out_types
[<type 'numpy.float32'>]
>>> aux_types
[]
Parameters
----------
*args :
Type of known arguments in a positional way.
Unknown type can be marked as None.
**kwargs :
Keyword arguments of known types.
Returns
-------
arg_types : list of numpy.dtype or None
List of argument types.
The order is same as the order of list_arguments().
out_types : list of numpy.dtype or None
List of output types.
The order is same as the order of list_outputs().
aux_types : list of numpy.dtype or None
List of auxiliary state types.
The order is same as the order of list_auxiliary_states().
"""
# pylint: disable=too-many-locals
if len(args) != 0 and len(kwargs) != 0:
raise ValueError('Can only specify known argument \
types either by positional or kwargs way.')
sdata = []
if len(args) != 0:
keys = c_array(ctypes.c_char_p, [])
for s in args:
if s is not None:
s = _numpy.dtype(s).type
if s not in _DTYPE_NP_TO_MX:
raise TypeError('Argument need to be one of ' + str(_DTYPE_NP_TO_MX))
sdata.append(_DTYPE_NP_TO_MX[s])
else:
sdata.append(-1)
else:
str_keys = []
for k, v in kwargs.items():
v = _numpy.dtype(v).type
if v in _DTYPE_NP_TO_MX:
str_keys.append(k)
sdata.append(_DTYPE_NP_TO_MX[v])
keys = c_str_array(str_keys)
arg_type_size = mx_uint()
arg_type_data = ctypes.POINTER(ctypes.c_int)()
out_type_size = mx_uint()
out_type_data = ctypes.POINTER(ctypes.c_int)()
aux_type_size = mx_uint()
aux_type_data = ctypes.POINTER(ctypes.c_int)()
complete = ctypes.c_int()
check_call(_LIB.MXSymbolInferType(
self.handle,
mx_uint(len(sdata)),
keys,
c_array_buf(ctypes.c_int, array('i', sdata)),
ctypes.byref(arg_type_size),
ctypes.byref(arg_type_data),
ctypes.byref(out_type_size),
ctypes.byref(out_type_data),
ctypes.byref(aux_type_size),
ctypes.byref(aux_type_data),
ctypes.byref(complete)))
if complete.value != 0:
arg_types = [
_DTYPE_MX_TO_NP[arg_type_data[i]] for i in range(arg_type_size.value)]
out_types = [
_DTYPE_MX_TO_NP[out_type_data[i]] for i in range(out_type_size.value)]
aux_types = [
_DTYPE_MX_TO_NP[aux_type_data[i]] for i in range(aux_type_size.value)]
return (arg_types, out_types, aux_types)
else:
return (None, None, None)
# pylint: enable=too-many-locals
def infer_shape(self, *args, **kwargs):
"""Infers the shapes of all arguments and all outputs given the known shapes of
some arguments.
This function takes the known shapes of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing shapes.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3))
>>> arg_shapes
[(3L, 3L), (3L, 3L)]
>>> out_shapes
[(3L, 3L)]
>>> aux_shapes
[]
>>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None.
(None, None, None)
Inconsistencies in the known shapes will cause an error to be raised.
See the following example:
>>> data = mx.sym.Variable('data')
>>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000)
>>> out = mx.sym.Activation(data=out, act_type='relu')
>>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10)
>>> weight_shape= (1, 100)
>>> data_shape = (100, 100)
>>> out.infer_shape(data=data_shape, fc1_weight=weight_shape)
Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100)
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None.
**kwargs :
Keyword arguments of the known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
"""
try:
res = self._infer_shape_impl(False, *args, **kwargs)
if res[1] is None:
arg_shapes, _, _ = self._infer_shape_impl(True, *args, **kwargs)
arg_names = self.list_arguments()
unknowns = []
for name, shape in zip(arg_names, arg_shapes):
if not shape or not _numpy.prod(shape):
if len(unknowns) >= 10:
unknowns.append('...')
break
unknowns.append('%s: %s' % (name, str(shape)))
warnings.warn(
"Cannot decide shape for the following arguments " +
"(0s in shape means unknown dimensions). " +
"Consider providing them as input:\n\t" +
"\n\t".join(unknowns), stacklevel=2)
return res
except MXNetError:
print("infer_shape error. Arguments:")
for i, arg in enumerate(args):
print(" #%d: %s" % (i, arg))
for k, v in kwargs.items():
print(" %s: %s" % (k, v))
raise
def infer_shape_partial(self, *args, **kwargs):
"""Infers the shape partially.
This functions works the same way as `infer_shape`,
except that this function can return partial results.
In the following example, information about fc2 is not available. So, `infer_shape`
will return a tuple of `None` values but `infer_shape_partial` will return partial values.
Example
-------
>>> data = mx.sym.Variable('data')
>>> prev = mx.sym.Variable('prev')
>>> fc1 = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=128)
>>> fc2 = mx.sym.FullyConnected(data=prev, name='fc2', num_hidden=128)
>>> out = mx.sym.Activation(data=mx.sym.elemwise_add(fc1, fc2), act_type='relu')
>>> out.list_arguments()
['data', 'fc1_weight', 'fc1_bias', 'prev', 'fc2_weight', 'fc2_bias']
>>> out.infer_shape(data=(10,64))
(None, None, None)
>>> out.infer_shape_partial(data=(10,64))
([(10L, 64L), (128L, 64L), (128L,), (), (), ()], [(10L, 128L)], [])
>>> # infers shape if you give information about fc2
>>> out.infer_shape(data=(10,64), prev=(10,128))
([(10L, 64L), (128L, 64L), (128L,), (10L, 128L), (128L, 128L), (128L,)], [(10L, 128L)], [])
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None
**kwargs :
Keyword arguments of known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
"""
return self._infer_shape_impl(True, *args, **kwargs)
def _infer_shape_impl(self, partial, *args, **kwargs):
"""The actual implementation for calling shape inference API."""
# pylint: disable=too-many-locals
if len(args) != 0 and len(kwargs) != 0:
raise ValueError('Can only specify known argument \
shapes either by positional or kwargs way.')
sdata = []
indptr = [0]
if len(args) != 0:
keys = c_array(ctypes.c_char_p, [])
for i, s in enumerate(args):
if s is not None:
if not isinstance(s, tuple):
raise TypeError("Arguments need to be shapes (tuple), "
"but argument %d is %s." % (i, type(s)))
sdata.extend(s)
indptr.append(len(sdata))
else:
str_keys = []
for k, v in kwargs.items():
if not isinstance(v, tuple):
raise TypeError("Arguments need to be shapes (tuple), "
"but '%s' is %s." % (k, type(v)))
str_keys.append(k)
sdata.extend(v)
indptr.append(len(sdata))
keys = c_str_array(str_keys)
arg_shape_size = mx_uint()
arg_shape_ndim = ctypes.POINTER(mx_uint)()
arg_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
out_shape_size = mx_uint()
out_shape_ndim = ctypes.POINTER(mx_uint)()
out_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
aux_shape_size = mx_uint()
aux_shape_ndim = ctypes.POINTER(mx_uint)()
aux_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
complete = ctypes.c_int()
if partial:
infer_func = _LIB.MXSymbolInferShapePartial
else:
infer_func = _LIB.MXSymbolInferShape
check_call(infer_func(
self.handle,
mx_uint(len(indptr) - 1),
keys,
c_array_buf(mx_uint, array('I', indptr)),
c_array_buf(mx_uint, array('I', sdata)),
ctypes.byref(arg_shape_size),
ctypes.byref(arg_shape_ndim),
ctypes.byref(arg_shape_data),
ctypes.byref(out_shape_size),
ctypes.byref(out_shape_ndim),
ctypes.byref(out_shape_data),
ctypes.byref(aux_shape_size),
ctypes.byref(aux_shape_ndim),
ctypes.byref(aux_shape_data),
ctypes.byref(complete)))
if complete.value != 0:
arg_shapes = [
tuple(arg_shape_data[i][:arg_shape_ndim[i]]) for i in range(arg_shape_size.value)]
out_shapes = [
tuple(out_shape_data[i][:out_shape_ndim[i]]) for i in range(out_shape_size.value)]
aux_shapes = [
tuple(aux_shape_data[i][:aux_shape_ndim[i]]) for i in range(aux_shape_size.value)]
return (arg_shapes, out_shapes, aux_shapes)
else:
return (None, None, None)
# pylint: enable=too-many-locals
def debug_str(self):
"""Gets a debug string of symbol.
It contains Symbol output, variables and operators in the computation graph
with their inputs, variables and attributes.
Returns
-------
string
Debug string of the symbol.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.sin(a)
>>> c = 2 * a + b
>>> d = mx.sym.FullyConnected(data=c, num_hidden=10)
>>> d.debug_str()
>>> print d.debug_str()
Symbol Outputs:
output[0]=fullyconnected0(0)
Variable:a
--------------------
Op:_mul_scalar, Name=_mulscalar0
Inputs:
arg[0]=a(0) version=0
Attrs:
scalar=2
--------------------
Op:sin, Name=sin0
Inputs:
arg[0]=a(0) version=0
--------------------
Op:elemwise_add, Name=_plus0
Inputs:
arg[0]=_mulscalar0(0)
arg[1]=sin0(0)
Variable:fullyconnected0_weight
Variable:fullyconnected0_bias
--------------------
Op:FullyConnected, Name=fullyconnected0
Inputs:
arg[0]=_plus0(0)
arg[1]=fullyconnected0_weight(0) version=0
arg[2]=fullyconnected0_bias(0) version=0
Attrs:
num_hidden=10
"""
debug_str = ctypes.c_char_p()
check_call(_LIB.MXSymbolPrint(
self.handle, ctypes.byref(debug_str)))
return py_str(debug_str.value)
def save(self, fname):
"""Saves symbol to a file.
You can also use pickle to do the job if you only work on python.
The advantage of `load`/`save` functions is that the file contents are language agnostic.
This means the model saved by one language binding can be loaded by a different
language binding of `MXNet`.
You also get the benefit of being able to directly load/save from cloud storage(S3, HDFS).
Parameters
----------
fname : str
The name of the file.
- "s3://my-bucket/path/my-s3-symbol"
- "hdfs://my-bucket/path/my-hdfs-symbol"
- "/path-to/my-local-symbol"
See Also
--------
symbol.load : Used to load symbol from file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
check_call(_LIB.MXSymbolSaveToFile(self.handle, c_str(fname)))
def tojson(self):
"""Saves symbol to a JSON string.
See Also
--------
symbol.load_json : Used to load symbol from JSON string.
"""
json_str = ctypes.c_char_p()
check_call(_LIB.MXSymbolSaveToJSON(self.handle, ctypes.byref(json_str)))
return py_str(json_str.value)
@staticmethod
def _get_ndarray_inputs(arg_key, args, arg_names, allow_missing):
"""Helper function to get NDArray lists handles from various inputs.
Parameters
----------
arg_key : str
The name of argument, used for error message.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbols.
If type is list of NDArray, the position is in the same order of arg_names.
If type is dict of str to NDArray, then it maps the name of arguments
to the corresponding NDArray,
args_names : list of string
List of argument names.
allow_missing : boolean
Whether missing argument is allowed.
When allowed, the missing handle will be set to None(null)
Returns
-------
handles : list of NDArrayHandle
The positional list of NDArrayHandles generated from input.
"""
# setup args
arg_handles = []
arg_arrays = []
if isinstance(args, list):
if len(args) != len(arg_names):
raise ValueError('Length of %s does not match the number of arguments' % arg_key)
for narr in args:
if narr is None and allow_missing:
arg_handles.append(None)
elif not isinstance(narr, NDArray):
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
else:
arg_handles.append(narr.handle)
arg_arrays = args
elif isinstance(args, dict):
for name in arg_names:
if name in args:
narr = args[name]
if not isinstance(narr, NDArray):
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
arg_handles.append(narr.handle)
arg_arrays.append(narr)
else:
if allow_missing:
arg_handles.append(None)
arg_arrays.append(None)
else:
raise ValueError('key `%s` is missing in `%s`' % (name, arg_key))
else:
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
return c_array(NDArrayHandle, arg_handles), arg_arrays
# pylint: disable=too-many-locals
def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
group2ctx=None, shared_arg_names=None, shared_exec=None,
shared_buffer=None, **kwargs):
"""Bind current symbol to get an executor, allocate all the arguments needed.
Allows specifying data types.
This function simplifies the binding procedure. You need to specify only input data shapes.
Before binding the executor, the function allocates arguments and auxiliary states
that were not explicitly specified. Allows specifying data types.
Example
-------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.FullyConnected(x, num_hidden=4)
>>> exe = y.simple_bind(mx.cpu(), x=(5,4), grad_req='null')
>>> exe.forward()
[<NDArray 5x4 @cpu(0)>]
>>> exe.outputs[0].asnumpy()
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
>>> exe.arg_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
>>> exe.grad_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
grad_req: string
{'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means every time gradient is written to specified `args_grad` NDArray.
- 'add' means every time gradient is added to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
type_dict : Dict of str->numpy.dtype
Input type dictionary, name->dtype
stype_dict : Dict of str->str
Input storage type dictionary, name->storage_type
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
shared_arg_names : List of string
The argument names whose `NDArray` of shared_exec can be reused for initializing
the current executor.
shared_exec : Executor
The executor whose arg_arrays, arg_arrays, grad_arrays, and aux_arrays can be
reused for initializing the current executor.
shared_buffer : Dict of string to `NDArray`
The dict mapping argument names to the `NDArray` that can be reused for initializing
the current executor. This buffer will be checked for reuse if one argument name
of the current executor is not found in `shared_arg_names`. The `NDArray` s are
expected have default storage type.
kwargs : Dict of str->shape
Input shape dictionary, name->shape
Returns
-------
executor : mxnet.Executor
The generated executor
"""
# data types
num_provided_arg_types = 0
provided_arg_type_names = ctypes.POINTER(ctypes.c_char_p)() # provided type argument names
provided_arg_type_data = ctypes.POINTER(mx_uint)() # provided types
if type_dict is not None:
provided_arg_type_names = []
provided_arg_type_data = []
for k, v in type_dict.items():
v = _numpy.dtype(v).type
if v in _DTYPE_NP_TO_MX:
provided_arg_type_names.append(k)
provided_arg_type_data.append(_DTYPE_NP_TO_MX[v])
num_provided_arg_types = mx_uint(len(provided_arg_type_names))
provided_arg_type_names = c_str_array(provided_arg_type_names)
provided_arg_type_data = c_array_buf(ctypes.c_int, array('i', provided_arg_type_data))
# storage types
num_provided_arg_stypes = 0
# provided storage type argument names
provided_arg_stype_names = ctypes.POINTER(ctypes.c_char_p)()
provided_arg_stype_data = ctypes.POINTER(mx_uint)() # provided storage types
if stype_dict is not None:
provided_arg_stype_names = []
provided_arg_stype_data = []
for k, v in stype_dict.items():
if v in _STORAGE_TYPE_STR_TO_ID:
provided_arg_stype_names.append(k)
provided_arg_stype_data.append(_STORAGE_TYPE_STR_TO_ID[v])
num_provided_arg_stypes = mx_uint(len(provided_arg_stype_names))
provided_arg_stype_names = c_str_array(provided_arg_stype_names)
provided_arg_stype_data = c_array_buf(ctypes.c_int, array('i', provided_arg_stype_data))
provided_arg_shape_data = [] # shape data
# argument shape index in sdata,
# e.g. [sdata[indptr[0]], sdata[indptr[1]]) is the shape of the first arg
provided_arg_shape_idx = [0]
provided_arg_shape_names = [] # provided argument names
for k, v in kwargs.items():
# if k not in listed_arguments and k not in listed_aux_states:
# raise ValueError('arg name %s is not valid', k)
if isinstance(v, tuple):
provided_arg_shape_names.append(k)
provided_arg_shape_data.extend(v)
provided_arg_shape_idx.append(len(provided_arg_shape_data))
provided_req_type_list_len = 0
provided_grad_req_types = ctypes.POINTER(ctypes.c_char_p)()
provided_grad_req_names = ctypes.POINTER(ctypes.c_char_p)()
if grad_req is not None:
if isinstance(grad_req, string_types):
# use provided_req_type_list_len = 0 to indicate this situation
provided_req_type_list_len = 0
provided_grad_req_types = [grad_req]
elif isinstance(grad_req, list):
if len(grad_req) == 0:
raise RuntimeError('grad_req in simple_bind cannot be an empty list')
provided_grad_req_types = grad_req
provided_req_type_list_len = len(provided_grad_req_types)
elif isinstance(grad_req, dict):
if len(grad_req) == 0:
raise RuntimeError('grad_req in simple_bind cannot be an empty dict')
provided_grad_req_names = []
provided_grad_req_types = []
for k, v in grad_req.items():
provided_grad_req_names.append(k)
provided_grad_req_types.append(v)
provided_grad_req_names = c_str_array(provided_grad_req_names)
provided_req_type_list_len = len(provided_grad_req_types)
provided_grad_req_types = c_str_array(provided_grad_req_types)
num_ctx_map_keys = mx_uint(0)
ctx_map_keys = ctypes.POINTER(ctypes.c_char_p)()
ctx_map_dev_types = ctypes.POINTER(ctypes.c_int)()
ctx_map_dev_ids = ctypes.POINTER(ctypes.c_int)()
if group2ctx is not None:
ctx_map_keys = []
ctx_map_dev_types = []
ctx_map_dev_ids = []
for key, val in group2ctx.items():
ctx_map_keys.append(key)
ctx_map_dev_types.append(val.device_typeid)
ctx_map_dev_ids.append(val.device_id)
num_ctx_map_keys = mx_uint(len(ctx_map_keys))
ctx_map_keys = c_str_array(ctx_map_keys)
ctx_map_dev_types = c_array(ctypes.c_int, array('i', ctx_map_dev_types))
ctx_map_dev_ids = c_array(ctypes.c_int, array('i', ctx_map_dev_ids))
# prepare param names
shared_arg_name_list = []
if shared_arg_names is not None:
if not isinstance(shared_arg_names, list):
raise ValueError('shared_arg_names in simple_bind must be a list or None')
shared_arg_name_list = shared_arg_names
# prepare shared_buffer
if shared_buffer is None:
shared_buffer_len = ctypes.c_int(-1)
shared_buffer_names = ctypes.POINTER(ctypes.c_char_p)()
shared_buffer_handles = ctypes.POINTER(NDArrayHandle)()
else:
if not isinstance(shared_buffer, dict):
raise ValueError('shared_buffer in simple_bind must be dict or None')
buffer_names = shared_buffer.keys()
buffer_arrays = shared_buffer.values()
for v in buffer_arrays:
assert(v.stype == 'default'), \
"shared_buffer is expected to only contain NDArrays with default storage"
shared_buffer_names = c_str_array(buffer_names)
shared_buffer_len = ctypes.c_int(len(buffer_arrays))
shared_buffer_handles = c_handle_array(buffer_arrays)
updated_shared_buffer_names = ctypes.POINTER(ctypes.c_char_p)()
updated_shared_buffer_handles = ctypes.POINTER(NDArrayHandle)()
# prepare shared_exec_handle
shared_exec_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle()
# prepare current executor handle
exe_handle = ExecutorHandle()
# prepare current executor's in_args, arg_grads, and aux_states
num_in_args = ctypes.c_uint()
in_arg_handles = ctypes.POINTER(NDArrayHandle)()
arg_grad_handles = ctypes.POINTER(NDArrayHandle)()
num_aux_states = ctypes.c_uint()
aux_state_handles = ctypes.POINTER(NDArrayHandle)()
try:
check_call(_LIB.MXExecutorSimpleBind(self.handle,
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
num_ctx_map_keys,
ctx_map_keys,
ctx_map_dev_types,
ctx_map_dev_ids,
mx_uint(provided_req_type_list_len),
provided_grad_req_names,
provided_grad_req_types,
mx_uint(len(provided_arg_shape_names)),
c_str_array(provided_arg_shape_names),
c_array_buf(mx_uint,
array('I', provided_arg_shape_data)),
c_array_buf(mx_uint,
array('I', provided_arg_shape_idx)),
num_provided_arg_types,
provided_arg_type_names,
provided_arg_type_data,
num_provided_arg_stypes,
provided_arg_stype_names,
provided_arg_stype_data,
mx_uint(len(shared_arg_name_list)),
c_str_array(shared_arg_name_list),
ctypes.byref(shared_buffer_len),
shared_buffer_names,
shared_buffer_handles,
ctypes.byref(updated_shared_buffer_names),
ctypes.byref(updated_shared_buffer_handles),
ctypes.byref(num_in_args),
ctypes.byref(in_arg_handles),
ctypes.byref(arg_grad_handles),
ctypes.byref(num_aux_states),
ctypes.byref(aux_state_handles),
shared_exec_handle,
ctypes.byref(exe_handle)))
except MXNetError as e:
error_msg = "simple_bind error. Arguments:\n"
for k, v in kwargs.items():
error_msg += "%s: %s\n" % (k, v)
error_msg += "%s" % e
raise RuntimeError(error_msg)
# update shared_buffer
if shared_buffer is not None:
for i in range(shared_buffer_len.value):
k = py_str(updated_shared_buffer_names[i])
v = NDArray(NDArrayHandle(updated_shared_buffer_handles[i]))
shared_buffer[k] = v
# create in_args, arg_grads, and aux_states for the current executor
arg_arrays = [_ndarray_cls(NDArrayHandle(in_arg_handles[i]))
for i in range(num_in_args.value)]
grad_arrays = [_ndarray_cls(NDArrayHandle(arg_grad_handles[i]))
if arg_grad_handles[i] is not None
else None for i in range(num_in_args.value)]
aux_arrays = [_ndarray_cls(NDArrayHandle(aux_state_handles[i]))
for i in range(num_aux_states.value)]
executor = Executor(exe_handle, self, ctx, grad_req, group2ctx)
executor.arg_arrays = arg_arrays
executor.grad_arrays = grad_arrays
executor.aux_arrays = aux_arrays
return executor
def bind(self, ctx, args, args_grad=None, grad_req='write',
aux_states=None, group2ctx=None, shared_exec=None):
"""Binds the current symbol to an executor and returns it.
We first declare the computation and then bind to the data to run.
This function returns an executor which provides method `forward()` method for evaluation
and a `outputs()` method to get all the results.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
<Symbol _plus1>
>>> ex = c.bind(ctx=mx.cpu(), args={'a' : mx.nd.ones([2,3]), 'b' : mx.nd.ones([2,3])})
>>> ex.forward()
[<NDArray 2x3 @cpu(0)>]
>>> ex.outputs[0].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbol.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding `NDArray`.
- In either case, all the arguments must be provided.
args_grad : list of NDArray or dict of str to `NDArray`, optional
When specified, `args_grad` provides NDArrays to hold
the result of gradient value in backward.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding NDArray.
- When the type is a dict of str to `NDArray`, one only need to provide the dict
for required argument gradient.
Only the specified argument gradient will be calculated.
grad_req : {'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means everytime gradient is write to specified `args_grad` `NDArray`.
- 'add' means everytime gradient is add to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
aux_states : list of `NDArray`, or dict of str to `NDArray`, optional
Input auxiliary states to the symbol, only needed when the output of
`list_auxiliary_states()` is not empty.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_auxiliary_states()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of
`auxiliary_states` to the corresponding `NDArray`,
- In either case, all the auxiliary states need to be provided.
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
shared_exec : mx.executor.Executor
Executor to share memory with. This is intended for runtime reshaping, variable length
sequences, etc. The returned executor shares state with `shared_exec`, and should not be
used in parallel with it.
Returns
-------
executor : Executor
The generated executor
Notes
-----
Auxiliary states are the special states of symbols that do not correspond
to an argument, and do not have gradient but are still useful
for the specific operations. Common examples of auxiliary states include
the `moving_mean` and `moving_variance` states in `BatchNorm`.
Most operators do not have auxiliary states and in those cases,
this parameter can be safely ignored.
One can give up gradient by using a dict in `args_grad` and only specify
gradient they interested in.
"""
# pylint: disable=too-many-locals, too-many-branches
if not isinstance(ctx, Context):
raise TypeError("Context type error")
listed_arguments = self.list_arguments()
args_handle, args = self._get_ndarray_inputs('args', args, listed_arguments, False)
# setup args gradient
if args_grad is None:
args_grad_handle = c_array(NDArrayHandle, [None] * len(args))
else:
args_grad_handle, args_grad = self._get_ndarray_inputs(
'args_grad', args_grad, listed_arguments, True)
if aux_states is None:
aux_states = []
aux_args_handle, aux_states = self._get_ndarray_inputs(
'aux_states', aux_states, self.list_auxiliary_states(), False)
# setup requirements
if isinstance(grad_req, string_types):
if grad_req not in _GRAD_REQ_MAP:
raise ValueError('grad_req must be in %s' % str(_GRAD_REQ_MAP))
reqs_array = c_array_buf(mx_uint,
array('I', [_GRAD_REQ_MAP[grad_req]] * len(listed_arguments)))
elif isinstance(grad_req, list):
reqs_array = c_array_buf(mx_uint,
array('I', [_GRAD_REQ_MAP[item] for item in grad_req]))
elif isinstance(grad_req, dict):
req_array = []
for name in listed_arguments:
if name in grad_req:
req_array.append(_GRAD_REQ_MAP[grad_req[name]])
else:
req_array.append(0)
reqs_array = c_array_buf(mx_uint, array('I', req_array))
ctx_map_keys = []
ctx_map_dev_types = []
ctx_map_dev_ids = []
if group2ctx:
for key, val in group2ctx.items():
ctx_map_keys.append(key)
ctx_map_dev_types.append(val.device_typeid)
ctx_map_dev_ids.append(val.device_id)
handle = ExecutorHandle()
shared_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle()
check_call(_LIB.MXExecutorBindEX(self.handle,
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
mx_uint(len(ctx_map_keys)),
c_str_array(ctx_map_keys),
c_array_buf(ctypes.c_int, array('i', ctx_map_dev_types)),
c_array_buf(ctypes.c_int, array('i', ctx_map_dev_ids)),
mx_uint(len(args)),
args_handle,
args_grad_handle,
reqs_array,
mx_uint(len(aux_states)),
aux_args_handle,
shared_handle,
ctypes.byref(handle)))
executor = Executor(handle, self, ctx, grad_req, group2ctx)
executor.arg_arrays = args
executor.grad_arrays = args_grad
executor.aux_arrays = aux_states
return executor
def gradient(self, wrt):
"""Gets the autodiff of current symbol.
This function can only be used if current symbol is a loss function.
.. note:: This function is currently not implemented.
Parameters
----------
wrt : Array of String
keyword arguments of the symbol that the gradients are taken.
Returns
-------
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients.
"""
handle = SymbolHandle()
c_wrt = c_str_array(wrt)
check_call(_LIB.MXSymbolGrad(self.handle,
mx_uint(len(wrt)),
c_wrt,
ctypes.byref(handle)))
return Symbol(handle)
# pylint: enable= no-member
def eval(self, ctx=None, **kwargs):
"""Evaluates a symbol given arguments.
The `eval` method combines a call to `bind` (which returns an executor)
with a call to `forward` (executor method).
For the common use case, where you might repeatedly evaluate with same arguments,
eval is slow.
In that case, you should call `bind` once and then repeatedly call forward.
This function allows simpler syntax for less cumbersome introspection.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
>>> ex = c.eval(ctx = mx.cpu(), a = mx.nd.ones([2,3]), b = mx.nd.ones([2,3]))
>>> ex
[<NDArray 2x3 @cpu(0)>]
>>> ex[0].asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
Parameters
----------
ctx : Context
The device context the generated executor to run on.
kwargs : Keyword arguments of type `NDArray`
Input arguments to the symbol. All the arguments must be provided.
Returns
----------
result : a list of NDArrays corresponding to the values taken by each symbol when
evaluated on given args. When called on a single symbol (not a group),
the result will be a list with one element.
"""
if ctx is None:
ctx = current_context()
return self.bind(ctx, kwargs).forward()
def reshape(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape`.
The arguments are the same as for :py:func:`reshape`, with
this array as data.
"""
return op.reshape(self, *args, **kwargs)
def reshape_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape_like`.
The arguments are the same as for :py:func:`reshape_like`, with
this array as data.
"""
return op.reshape_like(self, *args, **kwargs)
def astype(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cast`.
The arguments are the same as for :py:func:`cast`, with
this array as data.
"""
return op.cast(self, *args, **kwargs)
def zeros_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`zeros_like`.
The arguments are the same as for :py:func:`zeros_like`, with
this array as data.
"""
return op.zeros_like(self, *args, **kwargs)
def ones_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ones_like`.
The arguments are the same as for :py:func:`ones_like`, with
this array as data.
"""
return op.ones_like(self, *args, **kwargs)
def broadcast_axes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_axes`.
The arguments are the same as for :py:func:`broadcast_axes`, with
this array as data.
"""
return op.broadcast_axes(self, *args, **kwargs)
def repeat(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`repeat`.
The arguments are the same as for :py:func:`repeat`, with
this array as data.
"""
return op.repeat(self, *args, **kwargs)
def pad(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pad`.
The arguments are the same as for :py:func:`pad`, with
this array as data.
"""
return op.pad(self, *args, **kwargs)
def swapaxes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`swapaxes`.
The arguments are the same as for :py:func:`swapaxes`, with
this array as data.
"""
return op.swapaxes(self, *args, **kwargs)
def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
return op.split(self, *args, **kwargs)
def slice(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice`.
The arguments are the same as for :py:func:`slice`, with
this array as data.
"""
return op.slice(self, *args, **kwargs)
def slice_axis(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_axis`.
The arguments are the same as for :py:func:`slice_axis`, with
this array as data.
"""
return op.slice_axis(self, *args, **kwargs)
def slice_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_like`.
The arguments are the same as for :py:func:`slice_like`, with
this array as data.
"""
return op.slice_like(self, *args, **kwargs)
def take(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`take`.
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
return op.take(self, *args, **kwargs)
def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
The arguments are the same as for :py:func:`one_hot`, with
this array as data.
"""
return op.one_hot(self, *args, **kwargs)
def pick(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pick`.
The arguments are the same as for :py:func:`pick`, with
this array as data.
"""
return op.pick(self, *args, **kwargs)
def sort(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sort`.
The arguments are the same as for :py:func:`sort`, with
this array as data.
"""
return op.sort(self, *args, **kwargs)
def topk(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`topk`.
The arguments are the same as for :py:func:`topk`, with
this array as data.
"""
return op.topk(self, *args, **kwargs)
def argsort(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argsort`.
The arguments are the same as for :py:func:`argsort`, with
this array as data.
"""
return op.argsort(self, *args, **kwargs)
def argmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax`.
The arguments are the same as for :py:func:`argmax`, with
this array as data.
"""
return op.argmax(self, *args, **kwargs)
def argmax_channel(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax_channel`.
The arguments are the same as for :py:func:`argmax_channel`, with
this array as data.
"""
return op.argmax_channel(self, *args, **kwargs)
def argmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmin`.
The arguments are the same as for :py:func:`argmin`, with
this array as data.
"""
return op.argmin(self, *args, **kwargs)
def clip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`clip`.
The arguments are the same as for :py:func:`clip`, with
this array as data.
"""
return op.clip(self, *args, **kwargs)
def abs(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`abs`.
The arguments are the same as for :py:func:`abs`, with
this array as data.
"""
return op.abs(self, *args, **kwargs)
def sign(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sign`.
The arguments are the same as for :py:func:`sign`, with
this array as data.
"""
return op.sign(self, *args, **kwargs)
def flatten(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flatten`.
The arguments are the same as for :py:func:`flatten`, with
this array as data.
"""
return op.flatten(self, *args, **kwargs)
def shape_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`shape_array`.
The arguments are the same as for :py:func:`shape_op`, with
this array as data.
"""
return op.shape_array(self, *args, **kwargs)
def size_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`size_array`.
The arguments are the same as for :py:func:`size_array`, with
this array as data.
"""
return op.size_array(self, *args, **kwargs)
def expand_dims(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expand_dims`.
The arguments are the same as for :py:func:`expand_dims`, with
this array as data.
"""
return op.expand_dims(self, *args, **kwargs)
def broadcast_to(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_to`.
The arguments are the same as for :py:func:`broadcast_to`, with
this array as data.
"""
return op.broadcast_to(self, *args, **kwargs)
def broadcast_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_like`.
The arguments are the same as for :py:func:`broadcast_like`, with
this array as data.
"""
return op.broadcast_like(self, *args, **kwargs)
def tile(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tile`.
The arguments are the same as for :py:func:`tile`, with
this array as data.
"""
return op.tile(self, *args, **kwargs)
def transpose(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`transpose`.
The arguments are the same as for :py:func:`transpose`, with
this array as data.
"""
return op.transpose(self, *args, **kwargs)
def flip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flip`.
The arguments are the same as for :py:func:`flip`, with
this array as data.
"""
return op.flip(self, *args, **kwargs)
def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
return op.depth_to_space(self, *args, **kwargs)
def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
return op.space_to_depth(self, *args, **kwargs)
def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
The arguments are the same as for :py:func:`diag`, with
this array as data.
"""
return op.diag(self, k, **kwargs)
def sum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sum`.
The arguments are the same as for :py:func:`sum`, with
this array as data.
"""
return op.sum(self, *args, **kwargs)
def nansum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nansum`.
The arguments are the same as for :py:func:`nansum`, with
this array as data.
"""
return op.nansum(self, *args, **kwargs)
def prod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`prod`.
The arguments are the same as for :py:func:`prod`, with
this array as data.
"""
return op.prod(self, *args, **kwargs)
def nanprod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nanprod`.
The arguments are the same as for :py:func:`nanprod`, with
this array as data.
"""
return op.nanprod(self, *args, **kwargs)
def mean(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`mean`.
The arguments are the same as for :py:func:`mean`, with
this array as data.
"""
return op.mean(self, *args, **kwargs)
def max(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`max`.
The arguments are the same as for :py:func:`max`, with
this array as data.
"""
return op.max(self, *args, **kwargs)
def min(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`min`.
The arguments are the same as for :py:func:`min`, with
this array as data.
"""
return op.min(self, *args, **kwargs)
def norm(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`norm`.
The arguments are the same as for :py:func:`norm`, with
this array as data.
"""
return op.norm(self, *args, **kwargs)
def round(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`round`.
The arguments are the same as for :py:func:`round`, with
this array as data.
"""
return op.round(self, *args, **kwargs)
def rint(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rint`.
The arguments are the same as for :py:func:`rint`, with
this array as data.
"""
return op.rint(self, *args, **kwargs)
def fix(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`fix`.
The arguments are the same as for :py:func:`fix`, with
this array as data.
"""
return op.fix(self, *args, **kwargs)
def floor(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`floor`.
The arguments are the same as for :py:func:`floor`, with
this array as data.
"""
return op.floor(self, *args, **kwargs)
def ceil(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ceil`.
The arguments are the same as for :py:func:`ceil`, with
this array as data.
"""
return op.ceil(self, *args, **kwargs)
def trunc(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`trunc`.
The arguments are the same as for :py:func:`trunc`, with
this array as data.
"""
return op.trunc(self, *args, **kwargs)
def sin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sin`.
The arguments are the same as for :py:func:`sin`, with
this array as data.
"""
return op.sin(self, *args, **kwargs)
def cos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cos`.
The arguments are the same as for :py:func:`cos`, with
this array as data.
"""
return op.cos(self, *args, **kwargs)
def tan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tan`.
The arguments are the same as for :py:func:`tan`, with
this array as data.
"""
return op.tan(self, *args, **kwargs)
def arcsin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsin`.
The arguments are the same as for :py:func:`arcsin`, with
this array as data.
"""
return op.arcsin(self, *args, **kwargs)
def arccos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccos`.
The arguments are the same as for :py:func:`arccos`, with
this array as data.
"""
return op.arccos(self, *args, **kwargs)
def arctan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctan`.
The arguments are the same as for :py:func:`arctan`, with
this array as data.
"""
return op.arctan(self, *args, **kwargs)
def degrees(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`degrees`.
The arguments are the same as for :py:func:`degrees`, with
this array as data.
"""
return op.degrees(self, *args, **kwargs)
def radians(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`radians`.
The arguments are the same as for :py:func:`radians`, with
this array as data.
"""
return op.radians(self, *args, **kwargs)
def sinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sinh`.
The arguments are the same as for :py:func:`sinh`, with
this array as data.
"""
return op.sinh(self, *args, **kwargs)
def cosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cosh`.
The arguments are the same as for :py:func:`cosh`, with
this array as data.
"""
return op.cosh(self, *args, **kwargs)
def tanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tanh`.
The arguments are the same as for :py:func:`tanh`, with
this array as data.
"""
return op.tanh(self, *args, **kwargs)
def arcsinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsinh`.
The arguments are the same as for :py:func:`arcsinh`, with
this array as data.
"""
return op.arcsinh(self, *args, **kwargs)
def arccosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccosh`.
The arguments are the same as for :py:func:`arccosh`, with
this array as data.
"""
return op.arccosh(self, *args, **kwargs)
def arctanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctanh`.
The arguments are the same as for :py:func:`arctanh`, with
this array as data.
"""
return op.arctanh(self, *args, **kwargs)
def exp(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`exp`.
The arguments are the same as for :py:func:`exp`, with
this array as data.
"""
return op.exp(self, *args, **kwargs)
def expm1(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expm1`.
The arguments are the same as for :py:func:`expm1`, with
this array as data.
"""
return op.expm1(self, *args, **kwargs)
def log(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log`.
The arguments are the same as for :py:func:`log`, with
this array as data.
"""
return op.log(self, *args, **kwargs)
def log10(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log10`.
The arguments are the same as for :py:func:`log10`, with
this array as data.
"""
return op.log10(self, *args, **kwargs)
def log2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log2`.
The arguments are the same as for :py:func:`log2`, with
this array as data.
"""
return op.log2(self, *args, **kwargs)
def log1p(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log1p`.
The arguments are the same as for :py:func:`log1p`, with
this array as data.
"""
return op.log1p(self, *args, **kwargs)
def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
The arguments are the same as for :py:func:`sqrt`, with
this array as data.
"""
return op.sqrt(self, *args, **kwargs)
def rsqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rsqrt`.
The arguments are the same as for :py:func:`rsqrt`, with
this array as data.
"""
return op.rsqrt(self, *args, **kwargs)
def cbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cbrt`.
The arguments are the same as for :py:func:`cbrt`, with
this array as data.
"""
return op.cbrt(self, *args, **kwargs)
def rcbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rcbrt`.
The arguments are the same as for :py:func:`rcbrt`, with
this array as data.
"""
return op.rcbrt(self, *args, **kwargs)
def square(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`square`.
The arguments are the same as for :py:func:`square`, with
this array as data.
"""
return op.square(self, *args, **kwargs)
def reciprocal(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reciprocal`.
The arguments are the same as for :py:func:`reciprocal`, with
this array as data.
"""
return op.reciprocal(self, *args, **kwargs)
def relu(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`relu`.
The arguments are the same as for :py:func:`relu`, with
this array as data.
"""
return op.relu(self, *args, **kwargs)
def sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sigmoid`.
The arguments are the same as for :py:func:`sigmoid`, with
this array as data.
"""
return op.sigmoid(self, *args, **kwargs)
def softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmax`.
The arguments are the same as for :py:func:`softmax`, with
this array as data.
"""
return op.softmax(self, *args, **kwargs)
def log_softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_softmax`.
The arguments are the same as for :py:func:`log_softmax`, with
this array as data.
"""
return op.log_softmax(self, *args, **kwargs)
def softmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmin`.
The arguments are the same as for :py:func:`softmin`, with
this array as data.
"""
return op.softmin(self, *args, **kwargs)
def squeeze(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`squeeze`.
The arguments are the same as for :py:func:`squeeze`, with
this array as data.
"""
return op.squeeze(self, *args, **kwargs)
def get_backend_symbol(self, backend):
"""Return symbol for target backend.
Parameters
----------
backend : str
The backend names.
Returns
-------
out : Symbol
The created Symbol for target backend.
"""
out = SymbolHandle()
check_call(_LIB.MXGenBackendSubgraph(self.handle, c_str(backend), ctypes.byref(out)))
return Symbol(out)
def wait_to_read(self):
raise NotImplementedForSymbol(self.wait_to_read, None)
def asnumpy(self):
raise NotImplementedForSymbol(self.asnumpy, None)
def asscalar(self):
raise NotImplementedForSymbol(self.asscalar, None)
def copy(self):
raise NotImplementedForSymbol(self.copy, None)
def as_in_context(self):
raise NotImplementedForSymbol(self.as_in_context, None)
def detach(self):
raise NotImplementedForSymbol(self.detach, None)
def backward(self):
raise NotImplementedForSymbol(self.backward, None)
def var(name, attr=None, shape=None, lr_mult=None, wd_mult=None, dtype=None,
init=None, stype=None, **kwargs):
"""Creates a symbolic variable with specified name.
Example
-------
>>> data = mx.sym.Variable('data', attr={'a': 'b'})
>>> data
<Symbol data>
>>> csr_data = mx.sym.Variable('csr_data', stype='csr')
>>> csr_data
<Symbol csr_data>
>>> row_sparse_weight = mx.sym.Variable('weight', stype='row_sparse')
>>> row_sparse_weight
<Symbol weight>
Parameters
----------
name : str
Variable name.
attr : Dict of strings
Additional attributes to set on the variable. Format {string : string}.
shape : tuple
The shape of a variable. If specified, this will be used during the shape inference.
If one has specified a different shape for this variable using
a keyword argument when calling shape inference, this shape information will be ignored.
lr_mult : float
The learning rate multiplier for input variable.
wd_mult : float
Weight decay multiplier for input variable.
dtype : str or numpy.dtype
The dtype for input variable. If not specified, this value will be inferred.
init : initializer (mxnet.init.*)
Initializer for this variable to (optionally) override the default initializer.
stype : str
The storage type of the variable, such as 'row_sparse', 'csr', 'default', etc
kwargs : Additional attribute variables
Additional attributes must start and end with double underscores.
Returns
-------
variable : Symbol
A symbol corresponding to an input to the computation graph.
"""
if not isinstance(name, string_types):
raise TypeError('Expect a string for variable `name`')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateVariable(c_str(name), ctypes.byref(handle)))
ret = Symbol(handle)
if not hasattr(AttrScope._current, "value"):
AttrScope._current.value = AttrScope()
attr = AttrScope._current.value.get(attr)
attr = {} if attr is None else attr
if shape is not None:
attr['__shape__'] = str(shape)
if lr_mult is not None:
attr['__lr_mult__'] = str(lr_mult)
if wd_mult is not None:
attr['__wd_mult__'] = str(wd_mult)
if dtype is not None:
attr['__dtype__'] = str(_DTYPE_NP_TO_MX[_numpy.dtype(dtype).type])
if init is not None:
if not isinstance(init, string_types):
init = init.dumps()
attr['__init__'] = init
if stype is not None:
attr['__storage_type__'] = str(_STORAGE_TYPE_STR_TO_ID[stype])
for k, v in kwargs.items():
if k.startswith('__') and k.endswith('__'):
attr[k] = str(v)
else:
raise ValueError('Attribute name=%s is not supported.'
' Additional attributes must start and end with double underscores,'
' e.g, __yourattr__' % k)
ret._set_attr(**attr)
return ret
# for back compatibility
Variable = var
def Group(symbols):
"""Creates a symbol that contains a collection of other symbols, grouped together.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> mx.sym.Group([a,b])
<Symbol Grouped>
Parameters
----------
symbols : list
List of symbols to be grouped.
Returns
-------
sym : Symbol
A group symbol.
"""
if not symbols or any(not isinstance(sym, Symbol) for sym in symbols):
raise TypeError('Expected a list of symbols as input')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateGroup(
mx_uint(len(symbols)),
c_handle_array(symbols), ctypes.byref(handle)))
return Symbol(handle)
def load(fname):
"""Loads symbol from a JSON file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS).
Parameters
----------
fname : str
The name of the file, examples:
- `s3://my-bucket/path/my-s3-symbol`
- `hdfs://my-bucket/path/my-hdfs-symbol`
- `/path-to/my-local-symbol`
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.save : Used to save symbol into file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle)))
return Symbol(handle)
def load_json(json_str):
"""Loads symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.tojson : Used to save symbol into json string.
"""
if not isinstance(json_str, string_types):
raise TypeError('fname required to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
return Symbol(handle)
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def pow(base, exp):
"""Returns element-wise result of base element raised to powers from exp element.
Both inputs can be Symbol or scalar number.
Broadcasting is not supported. Use `broadcast_pow` instead.
Parameters
---------
base : Symbol or scalar
The base symbol
exp : Symbol or scalar
The exponent symbol
Returns
-------
Symbol or scalar
The bases in x raised to the exponents in y.
Examples
--------
>>> mx.sym.pow(2, 3)
8
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.pow(x, 2)
>>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()
array([ 1., 4.], dtype=float32)
>>> z = mx.sym.pow(3, y)
>>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 27.], dtype=float32)
>>> z = mx.sym.pow(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 64.], dtype=float32)
"""
if isinstance(base, Symbol) and isinstance(exp, Symbol):
return _internal._Power(base, exp)
if isinstance(base, Symbol) and isinstance(exp, Number):
return _internal._PowerScalar(base, scalar=exp)
if isinstance(base, Number) and isinstance(exp, Symbol):
return _internal._RPowerScalar(exp, scalar=base)
if isinstance(base, Number) and isinstance(exp, Number):
return base**exp
else:
raise TypeError('types (%s, %s) not supported' % (str(type(base)), str(type(exp))))
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def maximum(left, right):
"""Returns element-wise maximum of the input elements.
Both inputs can be Symbol or scalar number. Broadcasting is not supported.
Parameters
---------
left : Symbol or scalar
First symbol to be compared.
right : Symbol or scalar
Second symbol to be compared.
Returns
-------
Symbol or scalar
The element-wise maximum of the input symbols.
Examples
--------
>>> mx.sym.maximum(2, 3.5)
3.5
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.maximum(x, 4)
>>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()
array([ 4., 5., 4., 10.], dtype=float32)
>>> z = mx.sym.maximum(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()
array([ 10., 4.], dtype=float32)
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Maximum(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._MaximumScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._MaximumScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return left if left > right else right
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def minimum(left, right):
"""Returns element-wise minimum of the input elements.
Both inputs can be Symbol or scalar number. Broadcasting is not supported.
Parameters
---------
left : Symbol or scalar
First symbol to be compared.
right : Symbol or scalar
Second symbol to be compared.
Returns
-------
Symbol or scalar
The element-wise minimum of the input symbols.
Examples
--------
>>> mx.sym.minimum(2, 3.5)
2
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.minimum(x, 4)
>>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()
array([ 3., 4., 2., 4.], dtype=float32)
>>> z = mx.sym.minimum(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()
array([ 3., 2.], dtype=float32)
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Minimum(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._MinimumScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._MinimumScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return left if left < right else right
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def hypot(left, right):
"""Given the "legs" of a right triangle, returns its hypotenuse.
Equivalent to :math:`\\sqrt(left^2 + right^2)`, element-wise.
Both inputs can be Symbol or scalar number. Broadcasting is not supported.
Parameters
---------
left : Symbol or scalar
First leg of the triangle(s).
right : Symbol or scalar
Second leg of the triangle(s).
Returns
-------
Symbol or scalar
The hypotenuse of the triangle(s)
Examples
--------
>>> mx.sym.hypot(3, 4)
5.0
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.hypot(x, 4)
>>> z.eval(x=mx.nd.array([3,5,2]))[0].asnumpy()
array([ 5., 6.40312433, 4.47213602], dtype=float32)
>>> z = mx.sym.hypot(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()
array([ 10.44030666, 4.47213602], dtype=float32)
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Hypot(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._HypotScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._HypotScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return _numpy.hypot(left, right)
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
def eye(N, M=0, k=0, dtype=None, **kwargs):
"""Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere.
Parameters
----------
N: int
Number of rows in the output.
M: int, optional
Number of columns in the output. If 0, defaults to N.
k: int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol.
"""
if dtype is None:
dtype = _numpy.float32
return _internal._eye(N, M, k, dtype=dtype, **kwargs)
def zeros(shape, dtype=None, **kwargs):
"""Returns a new symbol of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol.
"""
if dtype is None:
dtype = _numpy.float32
return _internal._zeros(shape=shape, dtype=dtype, **kwargs)
def ones(shape, dtype=None, **kwargs):
"""Returns a new symbol of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol
"""
if dtype is None:
dtype = _numpy.float32
return _internal._ones(shape=shape, dtype=dtype, **kwargs)
def full(shape, val, dtype=None, **kwargs):
"""Returns a new array of given shape and type, filled with the given value `val`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
val : scalar
Fill value.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol
"""
if dtype is None:
dtype = _numpy.float32
return _internal._full(shape=shape, dtype=dtype, value=float(val), **kwargs)
# pylint: disable=redefined-outer-name
def arange(start, stop=None, step=1.0, repeat=1, infer_range=False, name=None, dtype=None):
"""Returns evenly spaced values within a given interval.
Parameters
----------
start : number
Start of interval. The interval includes this value. The default start value is 0.
stop : number, optional
End of interval. The interval does not include this value.
step : number, optional
Spacing between values.
repeat : int, optional
"The repeating time of all elements.
E.g repeat=3, the element a will be repeated three times --> a, a, a.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol
"""
if dtype is None:
dtype = _numpy.float32
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
infer_range=infer_range, name=name, dtype=dtype)
def histogram(a, bins=10, range=None, **kwargs):
"""Compute the histogram of the input data.
Parameters
----------
a : NDArray
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars
If bins is an int, it defines the number of equal-width bins in the
given range (10, by default). If bins is a sequence, it defines the bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
range : (float, float), required if bins is an integer
The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()).
Values outside the range are ignored. The first element of the range must be less than or
equal to the second. range affects the automatic bin computation as well, the range will
be equally divided by the number of bins.
"""
if isinstance(bins, Symbol):
return _internal._histogram(data=a, bins=bins, **kwargs)
elif isinstance(bins, integer_types):
if range is None:
raise ValueError("null range is not supported in symbol mode")
return _internal._histogram(data=a, bin_cnt=bins, range=range, **kwargs)
raise ValueError("bins argument should be either an integer or an NDArray")
_set_symbol_class(Symbol)
| apache-2.0 | 3,714,805,258,863,992,000 | 35.164983 | 100 | 0.555302 | false |
Chippers255/nb_twitter | nb_twitter/bayes/multivariate.py | 1 | 3457 | # -*- coding: utf-8 -*-
# multivariate.py
# nb_twitter/nb_twitter/bayes
#
# Created by Thomas Nelson <tn90ca@gmail.com>
# Preston Engstrom <pe12nh@brocku.ca>
# Created..........................2015-06-23
# Modified.........................2015-06-30
#
# This script was developed for use as part of the nb_twitter package
import math
import nb_twitter.bayes.bayes as bayes
import nb_twitter.bayes.decorators as decorators
class Multivariate (bayes.Bayes):
"""The Bernoulli variation, as described by Manning et al (2008), generates
a Boolean indicator about each term of the vocabulary equal to 1 if the
term belongs to the examining document and 0 if it does not. The model of
this variation is significantly different from Multinomial not only because
it does not take into consideration the number of occurrences of each word,
but also because it takes into account the non-occurring terms within the
document. While in Multinomial model the non-occurring terms are completely
ignored, in Bernoulli model they are factored when computing the
conditional probabilities and thus the absence of terms is taken into
account.
Bernoulli model is known to make many mistakes while classifying long
documents, primarily because it does not take into account the multiple
occurrences of the words. Note that it is particularly
sensitive to the presence of noisy features.
Multivariate Reference Page:
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
"""
def train(self):
"""This method will train a multivariate naive bayes text classifier.
The classifier will by trained using the provided classes and documents
from the initializer.
"""
for c in self.C:
self.prior[c] = float(self.Nc[c]) / float(self.N)
self.prob[c] = {}
for w in self.V:
Ncw = self.count_documents_from_class_term(c, w)
self.prob[c][w] = (Ncw + 1.0) / (self.Nc[c] + 2.0)
# end def train
@decorators.string_check
def run(self, d):
"""This method will run the trained multivariate naive bayes text
classifier. This method will classify the provided document into
a class.
:param d: The new document to be classified.
:return score: A dictionary of scores for this document in each class.
"""
score = {}
W = self.extract_words_from_document('multivariate', d)
for c in self.C:
score[c] = math.log(self.prior[c])
for w in self.V:
if w in W:
score[c] += math.log(self.prob[c][w])
else:
score[c] += math.log(1 - self.prob[c][w])
return score
# end def run
@decorators.string_check
def count_documents_from_class_term(self, c, w):
"""This method will count the number of documents belonging to a class
'c' that contain the word 'w'.
:param c: The class of documents to count.
:param w: The word a counted document must contain.
:return Ncw: The count of documents in a class with a specific word.
"""
Ncw = 0
for d in self.D:
if d[0] == c and w in d[1].split():
Ncw += 1
return Ncw
# end def count_documents_from_class_term
# end class Multivariate
| mit | 8,744,180,279,110,490,000 | 31.613208 | 79 | 0.632051 | false |
redhat-openstack/heat | heat/api/openstack/v1/software_configs.py | 3 | 2535 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from heat.api.openstack.v1 import util
from heat.common import serializers
from heat.common import wsgi
from heat.rpc import client as rpc_client
class SoftwareConfigController(object):
"""
WSGI controller for Software config in Heat v1 API
Implements the API actions
"""
REQUEST_SCOPE = 'software_configs'
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
def default(self, req, **args):
raise exc.HTTPNotFound()
@util.policy_enforce
def show(self, req, config_id):
"""
Gets detailed information for a software config
"""
sc = self.rpc_client.show_software_config(
req.context, config_id)
return {'software_config': sc}
@util.policy_enforce
def create(self, req, body):
"""
Create a new software config
"""
create_data = {
'name': body.get('name'),
'group': body.get('group'),
'config': body.get('config'),
'inputs': body.get('inputs'),
'outputs': body.get('outputs'),
'options': body.get('options'),
}
sc = self.rpc_client.create_software_config(
req.context, **create_data)
return {'software_config': sc}
@util.policy_enforce
def delete(self, req, config_id):
"""
Delete an existing software config
"""
res = self.rpc_client.delete_software_config(req.context, config_id)
if res is not None:
raise exc.HTTPBadRequest(res['Error'])
raise exc.HTTPNoContent()
def create_resource(options):
"""
Software configs resource factory method.
"""
deserializer = wsgi.JSONRequestDeserializer()
serializer = serializers.JSONResponseSerializer()
return wsgi.Resource(
SoftwareConfigController(options), deserializer, serializer)
| apache-2.0 | 3,816,841,114,749,897,700 | 29.542169 | 78 | 0.634714 | false |
sotdjin/glibglab | venv/lib/python2.7/site-packages/sqlalchemy/engine/reflection.py | 13 | 30187 | # engine/reflection.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides an abstraction for obtaining database schema information.
Usage Notes:
Here are some general conventions when accessing the low level inspector
methods such as get_table_names, get_columns, etc.
1. Inspector methods return lists of dicts in most cases for the following
reasons:
* They're both standard types that can be serialized.
* Using a dict instead of a tuple allows easy expansion of attributes.
* Using a list for the outer structure maintains order and is easy to work
with (e.g. list comprehension [d['name'] for d in cols]).
2. Records that contain a name, such as the column name in a column record
use the key 'name'. So for most return values, each record will have a
'name' attribute..
"""
from .. import exc, sql
from ..sql import schema as sa_schema
from .. import util
from ..sql.type_api import TypeEngine
from ..util import deprecated
from ..util import topological
from .. import inspection
from .base import Connectable
@util.decorator
def cache(fn, self, con, *args, **kw):
info_cache = kw.get('info_cache', None)
if info_cache is None:
return fn(self, con, *args, **kw)
key = (
fn.__name__,
tuple(a for a in args if isinstance(a, util.string_types)),
tuple((k, v) for k, v in kw.items() if
isinstance(v,
util.string_types + util.int_types + (float, )
)
)
)
ret = info_cache.get(key)
if ret is None:
ret = fn(self, con, *args, **kw)
info_cache[key] = ret
return ret
class Inspector(object):
"""Performs database schema inspection.
The Inspector acts as a proxy to the reflection methods of the
:class:`~sqlalchemy.engine.interfaces.Dialect`, providing a
consistent interface as well as caching support for previously
fetched metadata.
A :class:`.Inspector` object is usually created via the
:func:`.inspect` function::
from sqlalchemy import inspect, create_engine
engine = create_engine('...')
insp = inspect(engine)
The inspection method above is equivalent to using the
:meth:`.Inspector.from_engine` method, i.e.::
engine = create_engine('...')
insp = Inspector.from_engine(engine)
Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` may opt
to return an :class:`.Inspector` subclass that provides additional
methods specific to the dialect's target database.
"""
def __init__(self, bind):
"""Initialize a new :class:`.Inspector`.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
For a dialect-specific instance of :class:`.Inspector`, see
:meth:`.Inspector.from_engine`
"""
# this might not be a connection, it could be an engine.
self.bind = bind
# set the engine
if hasattr(bind, 'engine'):
self.engine = bind.engine
else:
self.engine = bind
if self.engine is bind:
# if engine, ensure initialized
bind.connect().close()
self.dialect = self.engine.dialect
self.info_cache = {}
@classmethod
def from_engine(cls, bind):
"""Construct a new dialect-specific Inspector object from the given
engine or connection.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
This method differs from direct a direct constructor call of
:class:`.Inspector` in that the
:class:`~sqlalchemy.engine.interfaces.Dialect` is given a chance to
provide a dialect-specific :class:`.Inspector` instance, which may
provide additional methods.
See the example at :class:`.Inspector`.
"""
if hasattr(bind.dialect, 'inspector'):
return bind.dialect.inspector(bind)
return Inspector(bind)
@inspection._inspects(Connectable)
def _insp(bind):
return Inspector.from_engine(bind)
@property
def default_schema_name(self):
"""Return the default schema name presented by the dialect
for the current engine's database user.
E.g. this is typically ``public`` for PostgreSQL and ``dbo``
for SQL Server.
"""
return self.dialect.default_schema_name
def get_schema_names(self):
"""Return all schema names.
"""
if hasattr(self.dialect, 'get_schema_names'):
return self.dialect.get_schema_names(self.bind,
info_cache=self.info_cache)
return []
def get_table_names(self, schema=None, order_by=None):
"""Return all table names in referred to within a particular schema.
The names are expected to be real tables only, not views.
Views are instead returned using the :meth:`.Inspector.get_view_names`
method.
:param schema: Schema name. If ``schema`` is left at ``None``, the
database's default schema is
used, else the named schema is searched. If the database does not
support named schemas, behavior is undefined if ``schema`` is not
passed as ``None``. For special quoting, use :class:`.quoted_name`.
:param order_by: Optional, may be the string "foreign_key" to sort
the result on foreign key dependencies. Does not automatically
resolve cycles, and will raise :class:`.CircularDependencyError`
if cycles exist.
.. deprecated:: 1.0.0 - see
:meth:`.Inspector.get_sorted_table_and_fkc_names` for a version
of this which resolves foreign key cycles between tables
automatically.
.. versionchanged:: 0.8 the "foreign_key" sorting sorts tables
in order of dependee to dependent; that is, in creation
order, rather than in drop order. This is to maintain
consistency with similar features such as
:attr:`.MetaData.sorted_tables` and :func:`.util.sort_tables`.
.. seealso::
:meth:`.Inspector.get_sorted_table_and_fkc_names`
:attr:`.MetaData.sorted_tables`
"""
if hasattr(self.dialect, 'get_table_names'):
tnames = self.dialect.get_table_names(
self.bind, schema, info_cache=self.info_cache)
else:
tnames = self.engine.table_names(schema)
if order_by == 'foreign_key':
tuples = []
for tname in tnames:
for fkey in self.get_foreign_keys(tname, schema):
if tname != fkey['referred_table']:
tuples.append((fkey['referred_table'], tname))
tnames = list(topological.sort(tuples, tnames))
return tnames
def get_sorted_table_and_fkc_names(self, schema=None):
"""Return dependency-sorted table and foreign key constraint names in
referred to within a particular schema.
This will yield 2-tuples of
``(tablename, [(tname, fkname), (tname, fkname), ...])``
consisting of table names in CREATE order grouped with the foreign key
constraint names that are not detected as belonging to a cycle.
The final element
will be ``(None, [(tname, fkname), (tname, fkname), ..])``
which will consist of remaining
foreign key constraint names that would require a separate CREATE
step after-the-fact, based on dependencies between tables.
.. versionadded:: 1.0.-
.. seealso::
:meth:`.Inspector.get_table_names`
:func:`.sort_tables_and_constraints` - similar method which works
with an already-given :class:`.MetaData`.
"""
if hasattr(self.dialect, 'get_table_names'):
tnames = self.dialect.get_table_names(
self.bind, schema, info_cache=self.info_cache)
else:
tnames = self.engine.table_names(schema)
tuples = set()
remaining_fkcs = set()
fknames_for_table = {}
for tname in tnames:
fkeys = self.get_foreign_keys(tname, schema)
fknames_for_table[tname] = set(
[fk['name'] for fk in fkeys]
)
for fkey in fkeys:
if tname != fkey['referred_table']:
tuples.add((fkey['referred_table'], tname))
try:
candidate_sort = list(topological.sort(tuples, tnames))
except exc.CircularDependencyError as err:
for edge in err.edges:
tuples.remove(edge)
remaining_fkcs.update(
(edge[1], fkc)
for fkc in fknames_for_table[edge[1]]
)
candidate_sort = list(topological.sort(tuples, tnames))
return [
(tname, fknames_for_table[tname].difference(remaining_fkcs))
for tname in candidate_sort
] + [(None, list(remaining_fkcs))]
def get_temp_table_names(self):
"""return a list of temporary table names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
return self.dialect.get_temp_table_names(
self.bind, info_cache=self.info_cache)
def get_temp_view_names(self):
"""return a list of temporary view names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
return self.dialect.get_temp_view_names(
self.bind, info_cache=self.info_cache)
def get_table_options(self, table_name, schema=None, **kw):
"""Return a dictionary of options specified when the table of the
given name was created.
This currently includes some options that apply to MySQL tables.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
if hasattr(self.dialect, 'get_table_options'):
return self.dialect.get_table_options(
self.bind, table_name, schema,
info_cache=self.info_cache, **kw)
return {}
def get_view_names(self, schema=None):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
return self.dialect.get_view_names(self.bind, schema,
info_cache=self.info_cache)
def get_view_definition(self, view_name, schema=None):
"""Return definition for `view_name`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
return self.dialect.get_view_definition(
self.bind, view_name, schema, info_cache=self.info_cache)
def get_columns(self, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a string `table_name` and an optional string `schema`, return
column information as a list of dicts with these keys:
name
the column's name
type
:class:`~sqlalchemy.types.TypeEngine`
nullable
boolean
default
the column's default value
attrs
dict containing optional column attributes
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
col_defs = self.dialect.get_columns(self.bind, table_name, schema,
info_cache=self.info_cache,
**kw)
for col_def in col_defs:
# make this easy and only return instances for coltype
coltype = col_def['type']
if not isinstance(coltype, TypeEngine):
col_def['type'] = coltype()
return col_defs
@deprecated('0.7', 'Call to deprecated method get_primary_keys.'
' Use get_pk_constraint instead.')
def get_primary_keys(self, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a list of column names.
"""
return self.dialect.get_pk_constraint(self.bind, table_name, schema,
info_cache=self.info_cache,
**kw)['constrained_columns']
def get_pk_constraint(self, table_name, schema=None, **kw):
"""Return information about primary key constraint on `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_pk_constraint(self.bind, table_name, schema,
info_cache=self.info_cache,
**kw)
def get_foreign_keys(self, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
foreign key information as a list of dicts with these keys:
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
name
optional name of the foreign key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_foreign_keys(self.bind, table_name, schema,
info_cache=self.info_cache,
**kw)
def get_indexes(self, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a string `table_name` and an optional string `schema`, return
index information as a list of dicts with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
dialect_options
dict of dialect-specific index options. May not be present
for all dialects.
.. versionadded:: 1.0.0
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_indexes(self.bind, table_name,
schema,
info_cache=self.info_cache, **kw)
def get_unique_constraints(self, table_name, schema=None, **kw):
"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
.. versionadded:: 0.8.4
"""
return self.dialect.get_unique_constraints(
self.bind, table_name, schema, info_cache=self.info_cache, **kw)
def get_check_constraints(self, table_name, schema=None, **kw):
"""Return information about check constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
check constraint information as a list of dicts with these keys:
name
the check constraint's name
sqltext
the check constraint's SQL expression
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
.. versionadded:: 1.1.0
"""
return self.dialect.get_check_constraints(
self.bind, table_name, schema, info_cache=self.info_cache, **kw)
def reflecttable(self, table, include_columns, exclude_columns=()):
"""Given a Table object, load its internal constructs based on
introspection.
This is the underlying method used by most dialects to produce
table reflection. Direct usage is like::
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.engine import reflection
engine = create_engine('...')
meta = MetaData()
user_table = Table('user', meta)
insp = Inspector.from_engine(engine)
insp.reflecttable(user_table, None)
:param table: a :class:`~sqlalchemy.schema.Table` instance.
:param include_columns: a list of string column names to include
in the reflection process. If ``None``, all columns are reflected.
"""
dialect = self.bind.dialect
schema = self.bind.schema_for_object(table)
table_name = table.name
# get table-level arguments that are specifically
# intended for reflection, e.g. oracle_resolve_synonyms.
# these are unconditionally passed to related Table
# objects
reflection_options = dict(
(k, table.dialect_kwargs.get(k))
for k in dialect.reflection_options
if k in table.dialect_kwargs
)
# reflect table options, like mysql_engine
tbl_opts = self.get_table_options(
table_name, schema, **table.dialect_kwargs)
if tbl_opts:
# add additional kwargs to the Table if the dialect
# returned them
table._validate_dialect_kwargs(tbl_opts)
if util.py2k:
if isinstance(schema, str):
schema = schema.decode(dialect.encoding)
if isinstance(table_name, str):
table_name = table_name.decode(dialect.encoding)
found_table = False
cols_by_orig_name = {}
for col_d in self.get_columns(
table_name, schema, **table.dialect_kwargs):
found_table = True
self._reflect_column(
table, col_d, include_columns,
exclude_columns, cols_by_orig_name)
if not found_table:
raise exc.NoSuchTableError(table.name)
self._reflect_pk(
table_name, schema, table, cols_by_orig_name, exclude_columns)
self._reflect_fk(
table_name, schema, table, cols_by_orig_name,
exclude_columns, reflection_options)
self._reflect_indexes(
table_name, schema, table, cols_by_orig_name,
include_columns, exclude_columns, reflection_options)
self._reflect_unique_constraints(
table_name, schema, table, cols_by_orig_name,
include_columns, exclude_columns, reflection_options)
self._reflect_check_constraints(
table_name, schema, table, cols_by_orig_name,
include_columns, exclude_columns, reflection_options)
def _reflect_column(
self, table, col_d, include_columns,
exclude_columns, cols_by_orig_name):
orig_name = col_d['name']
table.dispatch.column_reflect(self, table, col_d)
# fetch name again as column_reflect is allowed to
# change it
name = col_d['name']
if (include_columns and name not in include_columns) \
or (exclude_columns and name in exclude_columns):
return
coltype = col_d['type']
col_kw = dict(
(k, col_d[k])
for k in ['nullable', 'autoincrement', 'quote', 'info', 'key']
if k in col_d
)
colargs = []
if col_d.get('default') is not None:
# the "default" value is assumed to be a literal SQL
# expression, so is wrapped in text() so that no quoting
# occurs on re-issuance.
colargs.append(
sa_schema.DefaultClause(
sql.text(col_d['default']), _reflected=True
)
)
if 'sequence' in col_d:
self._reflect_col_sequence(col_d, colargs)
cols_by_orig_name[orig_name] = col = \
sa_schema.Column(name, coltype, *colargs, **col_kw)
if col.key in table.primary_key:
col.primary_key = True
table.append_column(col)
def _reflect_col_sequence(self, col_d, colargs):
if 'sequence' in col_d:
# TODO: mssql and sybase are using this.
seq = col_d['sequence']
sequence = sa_schema.Sequence(seq['name'], 1, 1)
if 'start' in seq:
sequence.start = seq['start']
if 'increment' in seq:
sequence.increment = seq['increment']
colargs.append(sequence)
def _reflect_pk(
self, table_name, schema, table,
cols_by_orig_name, exclude_columns):
pk_cons = self.get_pk_constraint(
table_name, schema, **table.dialect_kwargs)
if pk_cons:
pk_cols = [
cols_by_orig_name[pk]
for pk in pk_cons['constrained_columns']
if pk in cols_by_orig_name and pk not in exclude_columns
]
# update pk constraint name
table.primary_key.name = pk_cons.get('name')
# tell the PKConstraint to re-initialize
# its column collection
table.primary_key._reload(pk_cols)
def _reflect_fk(
self, table_name, schema, table, cols_by_orig_name,
exclude_columns, reflection_options):
fkeys = self.get_foreign_keys(
table_name, schema, **table.dialect_kwargs)
for fkey_d in fkeys:
conname = fkey_d['name']
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_columns = [
cols_by_orig_name[c].key
if c in cols_by_orig_name else c
for c in fkey_d['constrained_columns']
]
if exclude_columns and set(constrained_columns).intersection(
exclude_columns):
continue
referred_schema = fkey_d['referred_schema']
referred_table = fkey_d['referred_table']
referred_columns = fkey_d['referred_columns']
refspec = []
if referred_schema is not None:
sa_schema.Table(referred_table, table.metadata,
autoload=True, schema=referred_schema,
autoload_with=self.bind,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join(
[referred_schema, referred_table, column]))
else:
sa_schema.Table(referred_table, table.metadata, autoload=True,
autoload_with=self.bind,
schema=sa_schema.BLANK_SCHEMA,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join([referred_table, column]))
if 'options' in fkey_d:
options = fkey_d['options']
else:
options = {}
table.append_constraint(
sa_schema.ForeignKeyConstraint(constrained_columns, refspec,
conname, link_to_name=True,
**options))
def _reflect_indexes(
self, table_name, schema, table, cols_by_orig_name,
include_columns, exclude_columns, reflection_options):
# Indexes
indexes = self.get_indexes(table_name, schema)
for index_d in indexes:
name = index_d['name']
columns = index_d['column_names']
unique = index_d['unique']
flavor = index_d.get('type', 'index')
dialect_options = index_d.get('dialect_options', {})
duplicates = index_d.get('duplicates_constraint')
if include_columns and \
not set(columns).issubset(include_columns):
util.warn(
"Omitting %s key for (%s), key covers omitted columns." %
(flavor, ', '.join(columns)))
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
idx_cols = []
for c in columns:
try:
idx_col = cols_by_orig_name[c] \
if c in cols_by_orig_name else table.c[c]
except KeyError:
util.warn(
"%s key '%s' was not located in "
"columns for table '%s'" % (
flavor, c, table_name
))
else:
idx_cols.append(idx_col)
sa_schema.Index(
name, *idx_cols,
**dict(list(dialect_options.items()) + [('unique', unique)])
)
def _reflect_unique_constraints(
self, table_name, schema, table, cols_by_orig_name,
include_columns, exclude_columns, reflection_options):
# Unique Constraints
try:
constraints = self.get_unique_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
conname = const_d['name']
columns = const_d['column_names']
duplicates = const_d.get('duplicates_index')
if include_columns and \
not set(columns).issubset(include_columns):
util.warn(
"Omitting unique constraint key for (%s), "
"key covers omitted columns." %
', '.join(columns))
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_cols = []
for c in columns:
try:
constrained_col = cols_by_orig_name[c] \
if c in cols_by_orig_name else table.c[c]
except KeyError:
util.warn(
"unique constraint key '%s' was not located in "
"columns for table '%s'" % (c, table_name))
else:
constrained_cols.append(constrained_col)
table.append_constraint(
sa_schema.UniqueConstraint(*constrained_cols, name=conname))
def _reflect_check_constraints(
self, table_name, schema, table, cols_by_orig_name,
include_columns, exclude_columns, reflection_options):
try:
constraints = self.get_check_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
table.append_constraint(
sa_schema.CheckConstraint(**const_d))
| mit | 8,901,063,157,590,984,000 | 35.238896 | 78 | 0.573558 | false |
Scarzy/LazyWorship | python_src/read_wav.py | 1 | 1312 | import wave, struct
import json
def getAmplitude(windowData):
accumulator = 0
for i in range(0, len(windowData)):
accumulator += abs(windowData[i])
amplitude = accumulator / len(windowData)
return amplitude
def readWaveData(waveFile):
# read wave data
length = waveFile.getnframes()
waveData = []
for i in range(0, length):
waveDataTemp = waveFile.readframes(1)
data = struct.unpack("<h", waveDataTemp)
#print int(data[0])
waveData.append(int(data[0]))
return (waveData, length)
waveFile = wave.open('sine.wav', 'r')
waveData, length = readWaveData(waveFile);
print 'length = ', length
frameRate = waveFile.getframerate()
print 'frame rate = ', frameRate
windowSizeMS = 100
windowSizeFrames = int((windowSizeMS * 0.001) * frameRate) + 1
print 'windowSizeFrames = ', windowSizeFrames
windowStart = 0
amplitudeList = []
while windowStart < length:
window = waveData[windowStart:windowStart+windowSizeFrames]
amplitudeList.append( getAmplitude(window) )
windowStart += windowSizeFrames
for i in range(0, len(amplitudeList)):
print amplitudeList[i]
sample = {'ObjectInterpolator': 1629, 'PointInterpolator': 1675, 'RectangleInterpolator': 2042}
with open('result.json', 'w') as fp:
json.dump(sample, fp) | gpl-3.0 | -523,624,021,010,301,440 | 22.872727 | 96 | 0.692073 | false |
adrienbrault/home-assistant | homeassistant/components/alexa/const.py | 5 | 5980 | """Constants for the Alexa integration."""
from collections import OrderedDict
from homeassistant.components.climate import const as climate
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
DOMAIN = "alexa"
EVENT_ALEXA_SMART_HOME = "alexa_smart_home"
# Flash briefing constants
CONF_UID = "uid"
CONF_TITLE = "title"
CONF_AUDIO = "audio"
CONF_TEXT = "text"
CONF_DISPLAY_URL = "display_url"
CONF_FILTER = "filter"
CONF_ENTITY_CONFIG = "entity_config"
CONF_ENDPOINT = "endpoint"
CONF_LOCALE = "locale"
ATTR_UID = "uid"
ATTR_UPDATE_DATE = "updateDate"
ATTR_TITLE_TEXT = "titleText"
ATTR_STREAM_URL = "streamUrl"
ATTR_MAIN_TEXT = "mainText"
ATTR_REDIRECTION_URL = "redirectionURL"
SYN_RESOLUTION_MATCH = "ER_SUCCESS_MATCH"
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.0Z"
API_DIRECTIVE = "directive"
API_ENDPOINT = "endpoint"
API_EVENT = "event"
API_CONTEXT = "context"
API_HEADER = "header"
API_PAYLOAD = "payload"
API_SCOPE = "scope"
API_CHANGE = "change"
API_PASSWORD = "password"
CONF_DISPLAY_CATEGORIES = "display_categories"
CONF_SUPPORTED_LOCALES = (
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"es-US",
"fr-CA",
"fr-FR",
"hi-IN",
"it-IT",
"ja-JP",
"pt-BR",
)
API_TEMP_UNITS = {TEMP_FAHRENHEIT: "FAHRENHEIT", TEMP_CELSIUS: "CELSIUS"}
# Needs to be ordered dict for `async_api_set_thermostat_mode` which does a
# reverse mapping of this dict and we want to map the first occurrence of OFF
# back to HA state.
API_THERMOSTAT_MODES = OrderedDict(
[
(climate.HVAC_MODE_HEAT, "HEAT"),
(climate.HVAC_MODE_COOL, "COOL"),
(climate.HVAC_MODE_HEAT_COOL, "AUTO"),
(climate.HVAC_MODE_AUTO, "AUTO"),
(climate.HVAC_MODE_OFF, "OFF"),
(climate.HVAC_MODE_FAN_ONLY, "OFF"),
(climate.HVAC_MODE_DRY, "CUSTOM"),
]
)
API_THERMOSTAT_MODES_CUSTOM = {climate.HVAC_MODE_DRY: "DEHUMIDIFY"}
API_THERMOSTAT_PRESETS = {climate.PRESET_ECO: "ECO"}
class Cause:
"""Possible causes for property changes.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#cause-object
"""
# Indicates that the event was caused by a customer interaction with an
# application. For example, a customer switches on a light, or locks a door
# using the Alexa app or an app provided by a device vendor.
APP_INTERACTION = "APP_INTERACTION"
# Indicates that the event was caused by a physical interaction with an
# endpoint. For example manually switching on a light or manually locking a
# door lock
PHYSICAL_INTERACTION = "PHYSICAL_INTERACTION"
# Indicates that the event was caused by the periodic poll of an appliance,
# which found a change in value. For example, you might poll a temperature
# sensor every hour, and send the updated temperature to Alexa.
PERIODIC_POLL = "PERIODIC_POLL"
# Indicates that the event was caused by the application of a device rule.
# For example, a customer configures a rule to switch on a light if a
# motion sensor detects motion. In this case, Alexa receives an event from
# the motion sensor, and another event from the light to indicate that its
# state change was caused by the rule.
RULE_TRIGGER = "RULE_TRIGGER"
# Indicates that the event was caused by a voice interaction with Alexa.
# For example a user speaking to their Echo device.
VOICE_INTERACTION = "VOICE_INTERACTION"
class Inputs:
"""Valid names for the InputController.
https://developer.amazon.com/docs/device-apis/alexa-property-schemas.html#input
"""
VALID_SOURCE_NAME_MAP = {
"antenna": "TUNER",
"antennatv": "TUNER",
"aux": "AUX 1",
"aux1": "AUX 1",
"aux2": "AUX 2",
"aux3": "AUX 3",
"aux4": "AUX 4",
"aux5": "AUX 5",
"aux6": "AUX 6",
"aux7": "AUX 7",
"bluray": "BLURAY",
"blurayplayer": "BLURAY",
"cable": "CABLE",
"cd": "CD",
"coax": "COAX 1",
"coax1": "COAX 1",
"coax2": "COAX 2",
"composite": "COMPOSITE 1",
"composite1": "COMPOSITE 1",
"dvd": "DVD",
"game": "GAME",
"gameconsole": "GAME",
"hdradio": "HD RADIO",
"hdmi": "HDMI 1",
"hdmi1": "HDMI 1",
"hdmi2": "HDMI 2",
"hdmi3": "HDMI 3",
"hdmi4": "HDMI 4",
"hdmi5": "HDMI 5",
"hdmi6": "HDMI 6",
"hdmi7": "HDMI 7",
"hdmi8": "HDMI 8",
"hdmi9": "HDMI 9",
"hdmi10": "HDMI 10",
"hdmiarc": "HDMI ARC",
"input": "INPUT 1",
"input1": "INPUT 1",
"input2": "INPUT 2",
"input3": "INPUT 3",
"input4": "INPUT 4",
"input5": "INPUT 5",
"input6": "INPUT 6",
"input7": "INPUT 7",
"input8": "INPUT 8",
"input9": "INPUT 9",
"input10": "INPUT 10",
"ipod": "IPOD",
"line": "LINE 1",
"line1": "LINE 1",
"line2": "LINE 2",
"line3": "LINE 3",
"line4": "LINE 4",
"line5": "LINE 5",
"line6": "LINE 6",
"line7": "LINE 7",
"mediaplayer": "MEDIA PLAYER",
"optical": "OPTICAL 1",
"optical1": "OPTICAL 1",
"optical2": "OPTICAL 2",
"phono": "PHONO",
"playstation": "PLAYSTATION",
"playstation3": "PLAYSTATION 3",
"playstation4": "PLAYSTATION 4",
"rokumediaplayer": "MEDIA PLAYER",
"satellite": "SATELLITE",
"satellitetv": "SATELLITE",
"smartcast": "SMARTCAST",
"tuner": "TUNER",
"tv": "TV",
"usbdac": "USB DAC",
"video": "VIDEO 1",
"video1": "VIDEO 1",
"video2": "VIDEO 2",
"video3": "VIDEO 3",
"xbox": "XBOX",
}
VALID_SOUND_MODE_MAP = {
"movie": "MOVIE",
"music": "MUSIC",
"night": "NIGHT",
"sport": "SPORT",
"tv": "TV",
}
| mit | -140,176,370,243,246,670 | 28.170732 | 104 | 0.582107 | false |
niphlod/w2p_scheduler_tests | languages/ro.py | 1 | 16746 | # coding: utf8
{
'!=': '!=',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" (actualizează) este o expresie opțională precum "câmp1=\'valoare_nouă\'". Nu puteți actualiza sau șterge rezultatele unui JOIN',
'%(nrows)s records found': '%(nrows)s înregistrări găsite',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s linii șterse',
'%s rows updated': '%s linii actualizate',
'(something like "it-it")': '(ceva ce seamănă cu "it-it")',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'A new version of web2py is available': 'O nouă versiune de web2py este disponibilă',
'A new version of web2py is available: %s': 'O nouă versiune de web2py este disponibilă: %s',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENȚIE: Nu vă puteți conecta decât utilizând o conexiune securizată (HTTPS) sau rulând aplicația pe computerul local.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENȚIE: Nu puteți efectua mai multe teste o dată deoarece lansarea în execuție a mai multor subpocese nu este sigură.',
'ATTENTION: you cannot edit the running application!': 'ATENȚIE: nu puteți edita o aplicație în curs de execuție!',
'About': 'Despre',
'About application': 'Despre aplicație',
'Access Control': 'Control acces',
'Add': 'Adaugă',
'Admin is disabled because insecure channel': 'Adminstrarea este dezactivată deoarece conexiunea nu este sigură',
'Admin is disabled because unsecure channel': 'Administrarea este dezactivată deoarece conexiunea nu este securizată',
'Administration': 'Administrare',
'Administrative Interface': 'Interfață administrare',
'Administrator Password:': 'Parolă administrator:',
'Ajax Recipes': 'Rețete Ajax',
'And': 'Și',
'Are you sure you want to delete file "%s"?': 'Sigur ștergeți fișierul "%s"?',
'Are you sure you want to delete this object?': 'Sigur ștergeți acest obiect?',
'Are you sure you want to uninstall application "%s"': 'Sigur dezinstalați aplicația "%s"',
'Are you sure you want to uninstall application "%s"?': 'Sigur dezinstalați aplicația "%s"?',
'Authentication': 'Autentificare',
'Available databases and tables': 'Baze de date și tabele disponibile',
'Back': 'Înapoi',
'Buy this book': 'Cumpără această carte',
'Cache Keys': 'Chei cache',
'Cannot be empty': 'Nu poate fi vid',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Compilare imposibilă: aplicația conține erori. Debogați aplicația și încercați din nou.',
'Change Password': 'Schimbare parolă',
'Change password': 'Schimbare parolă',
'Check to delete': 'Coșați pentru a șterge',
'Clear': 'Golește',
'Client IP': 'IP client',
'Community': 'Comunitate',
'Components and Plugins': 'Componente și plugin-uri',
'Controller': 'Controlor',
'Controllers': 'Controlori',
'Copyright': 'Drepturi de autor',
'Create new application': 'Creați aplicație nouă',
'Current request': 'Cerere curentă',
'Current response': 'Răspuns curent',
'Current session': 'Sesiune curentă',
'DB Model': 'Model bază de date',
'DESIGN': 'DESIGN',
'Database': 'Baza de date',
'Date and Time': 'Data și ora',
'Delete': 'Șterge',
'Delete:': 'Șterge:',
'Demo': 'Demo',
'Deploy on Google App Engine': 'Instalare pe Google App Engine',
'Deployment Recipes': 'Rețete de instalare',
'Description': 'Descriere',
'Design for': 'Design pentru',
'Disk Cache Keys': 'Chei cache de disc',
'Documentation': 'Documentație',
"Don't know what to do?": 'Nu știți ce să faceți?',
'Download': 'Descărcare',
'E-mail': 'E-mail',
'E-mail invalid': 'E-mail invalid',
'EDIT': 'EDITARE',
'Edit': 'Editare',
'Edit Profile': 'Editare profil',
'Edit This App': 'Editați această aplicație',
'Edit application': 'Editare aplicație',
'Edit current record': 'Editare înregistrare curentă',
'Editing file': 'Editare fișier',
'Editing file "%s"': 'Editare fișier "%s"',
'Email and SMS': 'E-mail și SMS',
'Error logs for "%(app)s"': 'Log erori pentru "%(app)s"',
'Errors': 'Erori',
'Export': 'Export',
'FAQ': 'Întrebări frecvente',
'False': 'Neadevărat',
'First name': 'Prenume',
'Forbidden': 'Interzis',
'Forms and Validators': 'Formulare și validatori',
'Free Applications': 'Aplicații gratuite',
'Functions with no doctests will result in [passed] tests.': 'Funcțiile fără doctests vor genera teste [trecute].',
'Group %(group_id)s created': 'Grup %(group_id)s creat',
'Group ID': 'ID grup',
'Group uniquely assigned to user %(id)s': 'Grup asociat în mod unic utilizatorului %(id)s',
'Groups': 'Grupuri',
'Hello World': 'Salutare lume',
'Home': 'Acasă',
'How did you get here?': 'Cum ați ajuns aici?',
'Import/Export': 'Import/Export',
'Index': 'Index',
'Installed applications': 'Aplicații instalate',
'Internal State': 'Stare internă',
'Introduction': 'Introducere',
'Invalid Query': 'Interogare invalidă',
'Invalid action': 'Acțiune invalidă',
'Invalid email': 'E-mail invalid',
'Invalid password': 'Parolă invalidă',
'Language files (static strings) updated': 'Fișierele de limbă (șirurile statice de caractere) actualizate',
'Languages': 'Limbi',
'Last name': 'Nume',
'Last saved on:': 'Ultima salvare:',
'Layout': 'Șablon',
'Layout Plugins': 'Șablon plugin-uri',
'Layouts': 'Șabloane',
'License for': 'Licență pentru',
'Live Chat': 'Chat live',
'Logged in': 'Logat',
'Logged out': 'Delogat',
'Login': 'Autentificare',
'Login to the Administrative Interface': 'Logare interfață de administrare',
'Logout': 'Ieșire',
'Lost Password': 'Parolă pierdută',
'Lost password?': 'Parolă pierdută?',
'Main Menu': 'Meniu principal',
'Menu Model': 'Model meniu',
'Models': 'Modele',
'Modules': 'Module',
'My Sites': 'Site-urile mele',
'NO': 'NU',
'Name': 'Nume',
'New': 'Nou',
'New Record': 'Înregistrare nouă',
'New password': 'Parola nouă',
'No databases in this application': 'Aplicație fără bază de date',
'Object or table name': 'Obiect sau nume de tabel',
'Old password': 'Parola veche',
'Online examples': 'Exemple online',
'Or': 'Sau',
'Origin': 'Origine',
'Original/Translation': 'Original/Traducere',
'Other Plugins': 'Alte plugin-uri',
'Other Recipes': 'Alte rețete',
'Overview': 'Prezentare de ansamblu',
'Password': 'Parola',
"Password fields don't match": 'Câmpurile de parolă nu se potrivesc',
'Peeking at file': 'Vizualizare fișier',
'Plugins': 'Plugin-uri',
'Powered by': 'Pus în mișcare de',
'Preface': 'Prefață',
'Profile': 'Profil',
'Python': 'Python',
'Query': 'Interogare',
'Query:': 'Interogare:',
'Quick Examples': 'Exemple rapide',
'RAM Cache Keys': 'Chei cache RAM',
'Recipes': 'Rețete',
'Record ID': 'ID înregistrare',
'Register': 'Înregistrare',
'Registration identifier': 'Identificator de autentificare',
'Registration key': 'Cheie înregistrare',
'Registration successful': 'Autentificare reușită',
'Remember me (for 30 days)': 'Ține-mă minte (timp de 30 de zile)',
'Request reset password': 'Cerere resetare parolă',
'Reset Password key': 'Cheie restare parolă',
'Resolve Conflict file': 'Fișier rezolvare conflict',
'Role': 'Rol',
'Rows in table': 'Linii în tabel',
'Rows selected': 'Linii selectate',
'Save profile': 'Salvează profil',
'Saved file hash:': 'Hash fișier salvat:',
'Search': 'Căutare',
'Semantic': 'Semantică',
'Services': 'Servicii',
'Static files': 'Fișiere statice',
'Stylesheet': 'Foaie de stiluri',
'Submit': 'Înregistrează',
'Support': 'Suport',
'Sure you want to delete this object?': 'Sigur ștergeți acest obiect?',
'Table name': 'Nume tabel',
'Testing application': 'Testare aplicație',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Interogarea (query)" este o condiție de tipul "db.tabel1.câmp1==\'valoare\'". Ceva de genul "db.tabel1.câmp1==db.tabel2.câmp2" generează un JOIN SQL.',
'The Core': 'Nucleul',
'The Views': 'Vederile',
'The output of the file is a dictionary that was rendered by the view': 'Fișierul produce un dicționar care a fost prelucrat de vederea',
'There are no controllers': 'Nu există controlori',
'There are no models': 'Nu există modele',
'There are no modules': 'Nu există module',
'There are no static files': 'Nu există fișiere statice',
'There are no translators, only default language is supported': 'Nu există traduceri, doar limba implicită este suportată',
'There are no views': 'Nu există vederi',
'This App': 'Această aplicație',
'This is a copy of the scaffolding application': 'Aceasta este o copie a aplicației schelet',
'This is the %(filename)s template': 'Aceasta este șablonul fișierului %(filename)s',
'Ticket': 'Tichet',
'Timestamp': 'Moment în timp (timestamp)',
'True': 'Adevărat',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'Imposibil de verificat dacă există actualizări',
'Unable to download': 'Imposibil de descărcat',
'Unable to download app': 'Imposibil de descărcat aplicația',
'Update:': 'Actualizare:',
'Upload existing application': 'Încarcă aplicația existentă',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Folosiți (...)&(...) pentru AND, (...)|(...) pentru OR, și ~(...) pentru NOT, pentru a crea interogări complexe.',
'User %(id)s Logged-in': 'Utilizator %(id)s autentificat',
'User %(id)s Logged-out': 'Utilizator %(id)s delogat',
'User %(id)s Password changed': 'Parola utilizatorului %(id)s a fost schimbată',
'User %(id)s Password reset': 'Resetare parola utilizator %(id)s',
'User %(id)s Profile updated': 'Profil utilizator %(id)s actualizat',
'User %(id)s Registered': 'Utilizator %(id)s înregistrat',
'User ID': 'ID utilizator',
'Verify Password': 'Verifică parola',
'Videos': 'Video-uri',
'View': 'Vedere',
'Views': 'Vederi',
'Welcome': 'Bine ați venit',
'Welcome %s': 'Bine ați venit %s',
'Welcome to web2py': 'Bun venit la web2py',
'Welcome to web2py!': 'Bun venit la web2py!',
'Which called the function': 'Care a apelat funcția',
'YES': 'DA',
'You are successfully running web2py': 'Rulați cu succes web2py',
'You can modify this application and adapt it to your needs': 'Puteți modifica și adapta aplicația nevoilor dvs.',
'You visited the url': 'Ați vizitat adresa',
'about': 'despre',
'additional code for your application': 'cod suplimentar pentru aplicația dvs.',
'admin disabled because no admin password': 'administrare dezactivată deoarece parola de administrator nu a fost furnizată',
'admin disabled because not supported on google app engine': 'administrare dezactivată deoarece funcționalitatea nu e suportat pe Google App Engine',
'admin disabled because unable to access password file': 'administrare dezactivată deoarece nu există acces la fișierul cu parole',
'and rename it (required):': 'și renumiți (obligatoriu):',
'and rename it:': ' și renumiți:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin dezactivat deoarece conexiunea nu e sigură',
'application "%s" uninstalled': 'aplicația "%s" a fost dezinstalată',
'application compiled': 'aplicația a fost compilată',
'application is compiled and cannot be designed': 'aplicația este compilată și nu poate fi editată',
'cache': 'cache',
'cache, errors and sessions cleaned': 'cache, erori și sesiuni golite',
'cannot create file': 'fișier imposibil de creat',
'cannot upload file "%(filename)s"': 'imposibil de încărcat fișierul "%(filename)s"',
'change password': 'schimbare parolă',
'check all': 'coșați tot',
'clean': 'golire',
'click to check for upgrades': 'Clic pentru a verifica dacă există upgrade-uri',
'compile': 'compilare',
'compiled application removed': 'aplicația compilată a fost ștearsă',
'contains': 'conține',
'controllers': 'controlori',
'create file with filename:': 'crează fișier cu numele:',
'create new application:': 'crează aplicație nouă:',
'crontab': 'crontab',
'currently saved or': 'în prezent salvat sau',
'customize me!': 'Personalizează-mă!',
'data uploaded': 'date încărcate',
'database': 'bază de date',
'database %s select': 'selectare bază de date %s',
'database administration': 'administrare bază de date',
'db': 'db',
'defines tables': 'definire tabele',
'delete': 'șterge',
'delete all checked': 'șterge tot ce e coșat',
'design': 'design',
'done!': 'gata!',
'edit': 'editare',
'edit controller': 'editare controlor',
'edit profile': 'editare profil',
'enter a number between %(min)g and %(max)g': 'introduceți un număr între %(min)g și %(max)g',
'enter an integer between %(min)g and %(max)g': 'introduceți un întreg între %(min)g și %(max)g',
'errors': 'erori',
'export as csv file': 'exportă ca fișier csv',
'exposes': 'expune',
'extends': 'extinde',
'failed to reload module': 'reîncarcare modul nereușită',
'file "%(filename)s" created': 'fișier "%(filename)s" creat',
'file "%(filename)s" deleted': 'fișier "%(filename)s" șters',
'file "%(filename)s" uploaded': 'fișier "%(filename)s" încărcat',
'file "%(filename)s" was not deleted': 'fișierul "%(filename)s" n-a fost șters',
'file "%s" of %s restored': 'fișier "%s" de %s restaurat',
'file changed on disk': 'fișier modificat pe disc',
'file does not exist': 'fișier inexistent',
'file saved on %(time)s': 'fișier salvat %(time)s',
'file saved on %s': 'fișier salvat pe %s',
'help': 'ajutor',
'htmledit': 'editare html',
'includes': 'include',
'insert new': 'adaugă nou',
'insert new %s': 'adaugă nou %s',
'internal error': 'eroare internă',
'invalid password': 'parolă invalidă',
'invalid request': 'cerere invalidă',
'invalid ticket': 'tichet invalid',
'language file "%(filename)s" created/updated': 'fișier de limbă "%(filename)s" creat/actualizat',
'languages': 'limbi',
'languages updated': 'limbi actualizate',
'loading...': 'încarc...',
'located in the file': 'prezentă în fișierul',
'login': 'autentificare',
'logout': 'ieșire',
'merge': 'unește',
'models': 'modele',
'modules': 'module',
'new application "%s" created': 'aplicația nouă "%s" a fost creată',
'new record inserted': 'înregistrare nouă adăugată',
'next 100 rows': 'următoarele 100 de linii',
'or import from csv file': 'sau importă din fișier csv',
'or provide application url:': 'sau furnizează adresă url:',
'pack all': 'împachetează toate',
'pack compiled': 'pachet compilat',
'please input your password again': 'introduceți parola din nou',
'previous 100 rows': '100 de linii anterioare',
'record': 'înregistrare',
'record does not exist': 'înregistrare inexistentă',
'record id': 'id înregistrare',
'register': 'înregistrare',
'remove compiled': 'șterge compilate',
'restore': 'restaurare',
'revert': 'revenire',
'save': 'salvare',
'selected': 'selectat(e)',
'session expired': 'sesiune expirată',
'shell': 'line de commandă',
'site': 'site',
'some files could not be removed': 'anumite fișiere n-au putut fi șterse',
'starts with': 'începe cu',
'state': 'stare',
'static': 'static',
'table': 'tabel',
'test': 'test',
'the application logic, each URL path is mapped in one exposed function in the controller': 'logica aplicației, fiecare rută URL este mapată într-o funcție expusă de controlor',
'the data representation, define database tables and sets': 'reprezentarea datelor, definește tabelele bazei de date și seturile (de date)',
'the presentations layer, views are also known as templates': 'nivelul de prezentare, vederile sunt de asemenea numite și șabloane',
'these files are served without processing, your images go here': 'aceste fișiere sunt servite fără procesare, imaginea se plasează acolo',
'to previous version.': 'la versiunea anterioară.',
'too short': 'prea scurt',
'translation strings for the application': 'șiruri de caractere folosite la traducerea aplicației',
'try': 'încearcă',
'try something like': 'încearcă ceva de genul',
'unable to create application "%s"': 'imposibil de creat aplicația "%s"',
'unable to delete file "%(filename)s"': 'imposibil de șters fișierul "%(filename)s"',
'unable to parse csv file': 'imposibil de analizat fișierul csv',
'unable to uninstall "%s"': 'imposibil de dezinstalat "%s"',
'uncheck all': 'decoșează tot',
'uninstall': 'dezinstalează',
'update': 'actualizează',
'update all languages': 'actualizează toate limbile',
'upload application:': 'incarcă aplicația:',
'upload file:': 'încarcă fișier:',
'value already in database or empty': 'Valoare existentă în baza de date sau vidă',
'versioning': 'versiuni',
'view': 'vedere',
'views': 'vederi',
'web2py Recent Tweets': 'Ultimele tweet-uri web2py',
'web2py is up to date': 'web2py este la zi',
}
| lgpl-3.0 | 8,991,768,111,454,995,000 | 45.245763 | 294 | 0.709059 | false |
ElDeveloper/qiime | tests/test_make_2d_plots.py | 15 | 13517 | #!/usr/bin/env python
# file test_make_2d_plots.py
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME Project" # consider project name
# remember to add yourself
__credits__ = ["Jesse Stombaugh", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "jesse.stombaugh@colorado.edu"
from string import digits
import matplotlib
from matplotlib import use
use('Agg', warn=False)
from numpy import array
from os.path import exists, join
from StringIO import StringIO
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from os import remove
from qiime.make_2d_plots import (make_interactive_scatter, transform_xy_coords,
draw_scatterplot, draw_pcoa_graph,
extract_and_color_xy_coords, write_html_file,
create_html_filename,
convert_coord_data_to_dict, generate_xmap,
draw_scree_graph, make_line_plot)
from qiime.colors import data_colors
from qiime.util import get_qiime_temp_dir
class TopLevelTests(TestCase):
"""Tests of top-level functions"""
def setUp(self):
"""define some top-level data"""
self.tmp_dir = get_qiime_temp_dir()
self.props = {
"title": "PCoA - PC1 vs PC2",
"ylabel": "PC2",
"xlabel": "PC1"}
self.props_scree = {
"title": "Scree plor",
"ylabel": "Fraction of variance",
"xlabel": "Principal component"}
self.data = {}
self.data['coord'] = [['Sample1', 'Sample2'], array([[-0.2, 0.07],
[-0.04, 0.2]]), array(
[0.7, 0.6]),
array([25.00, 30.00])]
self.data[
'map'] = [['#SampleID', 'Day'], ['Sample1', 'Day1'], ['Sample2',
'Day1']]
self.coord_tups = [("1", "2"), ("3", "2"), ("1", "3")]
self.generate_eps = True
self.data['alpha'] = 0.33
self.groups = {}
self.groups['Day1'] = ['Sample1', 'Sample2']
self.colors = {}
self.colors['Day1'] = 'blue1'
self.prefs = {}
self.prefs['Sample'] = {}
self.prefs['Sample']['column'] = 'Day'
self.data_color_hsv = {
'blue1': (240, 100, 100)
}
self.data_color_order = ['blue1', []]
self.background_color = 'black'
self.label_color = 'white'
self.dir_path = '/tmp/'
self.data_file_link = '/tmp/'
self.xy_coords = {}
self.xy_coords['Sample1'] = ([-0.2], [0.07], ['Sample1: Day1'],
['#0000ff'], ['s'], [None], [None], [None])
self.xy_coords['Sample2'] = ([-0.04], [0.2], ['Sample2: Day1'],
['#0000ff'], ['s'], [None], [None], [None])
self.xy_coords_scree = {}
self.xy_coords_scree['Variance'] = ([1, 2], [0.28, 0.12], 's', 'b')
self.xy_coords_scree['Cum Variance'] = ([1, 2], [0.28, 0.40], 'o', 'r')
self.coord_1 = '1'
self.coord_2 = '2'
self.p2d = {}
self.p2d['Sample1'] = -0.2
self.p2d['Sample2'] = -0.04
self.p1d = {}
self.p1d['Sample1'] = 0.07
self.p1d['Sample2'] = 0.2
self.all_cids = {}
self.all_cids = ['Sample1: Day1', 'Sample2: Day1']
self.all_xcoords = [100.79999999999998, 279.36000000000001]
self.all_ycoords = [54.000000000000014, 288.0]
self.plot_label = 'SampleID'
self.coords = {'pc vector number': ['Sample1', 'Sample2'], '1':
array([-0.2, -0.04]), '2': array([0.07, 0.2])}
self.x_len = 4.5
self.y_len = 4.5
self.size = 20
self.alpha = 0.33
self._paths_to_clean_up = []
def tearDown(self):
map(remove, self._paths_to_clean_up)
def remove_nums(self, text):
"""Removes all digits from the given string.
Returns the string will all digits removed. Useful for testing strings
for equality in unit tests where you don't care about numeric values,
or if some values are random.
This code was taken from http://bytes.com/topic/python/answers/
850562-finding-all-numbers-string-replacing
Arguments:
text - the string to remove digits from
"""
return text.translate(None, digits)
def test_make_line_plot(self):
""" make_line_plot: creates HTML source for scree plot"""
filename1 = join(self.tmp_dir, 'scree_plot.png')
filename2 = join(self.tmp_dir, 'scree_plot.eps.gz')
self._paths_to_clean_up = [filename1, filename2]
obs1, obs2 = make_line_plot(self.tmp_dir, self.tmp_dir,
self.background_color, self.label_color,
self.xy_coords_scree, self.props_scree,
x_len=4.5, y_len=4.5, generate_eps=True)
self.assertEqual(obs1, filename_scree % filename1)
self.assertEqual(obs2, expdownlink_scree % filename2)
self.assertTrue(
exists(filename1),
'The png file was not created in the appropiate location')
self.assertTrue(
exists(filename2),
'The eps file was not created in the appropiate location')
def test_make_interactive_scatter(self):
"""make_interactive_scatter: creates HTML source for interactive \
images"""
filename1 = '/tmp/PC1_vs_PC2_plot.png'
filename2 = '/tmp/PC1vsPC2plot.eps.gz'
self._paths_to_clean_up = [filename1, filename2]
obs1, obs2, obs3 = make_interactive_scatter(
self.plot_label, self.dir_path,
self.data_file_link, self.background_color,
self.label_color, None, self.alpha,
self.xy_coords, self.props,
self.x_len, self.y_len, self.size,
draw_axes=False, generate_eps=True)
self.assertEqual(self.remove_nums(obs1), self.remove_nums(expsrcmap1))
self.assertEqual(self.remove_nums(obs2), self.remove_nums(expimgmap1))
self.assertEqual(self.remove_nums(obs3), self.remove_nums(expeps1))
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename2), 'The eps file was not created in \
the appropriate location')
def test_generate_xmap(self):
"""generate_xmap: generates the html area map"""
exp2 = 360
exp3 = 360
obs1, obs2, obs3 = generate_xmap(self.x_len, self.y_len, self.all_cids,
self.all_xcoords, self.all_ycoords)
self.assertEqual(obs1, exparea)
self.assertEqual(obs2, exp2)
self.assertEqual(obs3, exp3)
def test_draw_scatterplot(self):
"""draw_scatterplot: draws the matplotlib scatterplot"""
exp = array([[-0.04, 0.2]])
sc_plot = draw_scatterplot(self.props, self.xy_coords, self.x_len,
self.y_len, self.size,
self.background_color, self.label_color, None,
self.alpha)
obs = sc_plot.get_offsets()
assert_almost_equal(obs, exp)
def test_transform_xy_coords(self):
"""transform_xy_coords: transforms the xy coords from the matplotlib \
plot into html spatial coords which allows for mouseovers"""
sc_plot = draw_scatterplot(self.props, self.xy_coords, self.x_len,
self.y_len, self.size,
self.background_color, self.label_color, None,
self.alpha)
obs1, obs2, obs3 = transform_xy_coords(self.xy_coords, sc_plot)
self.assertEqual(len(obs1), len(self.all_cids))
self.assertEqual(len(obs2), len(self.all_xcoords))
self.assertEqual(len(obs3), len(self.all_ycoords))
def test_draw_scree_graph(self):
"""draw_scree_graph: draws the matplotlib figure"""
filename1 = join(self.tmp_dir, 'scree_plot.png')
filename2 = join(self.tmp_dir, 'scree_plot.eps.gz')
self._paths_to_clean_up = [filename1, filename2]
obs1, obs2 = draw_scree_graph(self.tmp_dir, self.tmp_dir,
self.background_color, self.label_color,
generate_eps=True, data=self.data)
self.assertEqual(obs1, expimgsrc_scree % filename1)
self.assertEqual(obs2, expdownlink_scree % filename2)
self.assertTrue(
exists(filename1),
'The png file was not created in the appropriate location')
self.assertTrue(
exists(filename2),
'The eps file was not created in the appropriate location')
def test_draw_pcoa_graph(self):
"""draw_pcoa_graph: draws the matplotlib figure"""
filename1 = '/tmp/PC1_vs_PC2_plot.png'
filename2 = '/tmp/PC1vsPC2plot.eps.gz'
self._paths_to_clean_up = [filename1, filename2]
obs1, obs2 = draw_pcoa_graph(self.plot_label, self.dir_path,
self.data_file_link, self.coord_1, self.coord_2,
None, None, None, None,
self.data, self.prefs, self.groups, self.colors,
self.background_color, self.label_color,
data_colors, self.data_color_order,
generate_eps=True)
self.assertEqual(obs1, expsrcmap2 + expimgmap2)
self.assertEqual(obs2, expeps2)
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename2), 'The eps file was not created in \
the appropriate location')
def test_extract_and_color_xy_coords(self):
"""extract_and_color_xy_coords: gets coords from coords file and \
associates colors to those coords based on its group"""
obs = extract_and_color_xy_coords(
self.p1d, self.p2d, None, None, None, self.colors,
data_colors, self.groups, self.coords)
self.assertEqual(obs['Sample1'], self.xy_coords['Sample1'])
self.assertEqual(obs['Sample2'], self.xy_coords['Sample2'])
def test_create_html_filename(self):
"""create_html_filename: using the pcoa filename, generates an html \
filename for the plots"""
exp = 'test_2D.html'
obs = create_html_filename(
coord_filename='test',
name_ending='_2D.html')
self.assertEqual(obs, exp)
def test_convert_coord_data_to_dict(self):
"""convert_coord_data_to_dict: converts the coords list into a \
dictionary"""
exp1 = {
'pc vector number': ['Sample1', 'Sample2'],
'1': array([-0.2, -0.04]),
'2': array([0.07, 0.2])}
exp2 = {'1': [25.00], '2': [30.00], }
obs1, obs2 = convert_coord_data_to_dict(self.data)
self.assertEqual(exp1['pc vector number'], obs1['pc vector number'])
assert_almost_equal(exp1['1'], obs1['1'])
assert_almost_equal(exp1['2'], obs1['2'])
assert_almost_equal(exp2['1'], obs2['1'])
assert_almost_equal(exp2['2'], obs2['2'])
def test_write_html_file(self):
"Write html and make sure it gets cleaned up"""
filename1 = '/tmp/test.html'
self._paths_to_clean_up = [filename1]
write_html_file('Test', '/tmp/test.html')
self.assertTrue(exists(filename1), 'The file was not created in \
the appropriate location')
# expected results for the unit testing
exparea = [
'<AREA shape="circle" coords="100,306,5" href="#Sample1: Day1" onmouseover="return overlib(\'Sample1: Day1\');" onmouseout="return nd();">\n',
'<AREA shape="circle" coords="279,72,5" href="#Sample2: Day1" onmouseover="return overlib(\'Sample2: Day1\');" onmouseout="return nd();">\n']
expsrcmap1 = '<img src="/tmp/PC1_vs_PC2_plot.png" border="0" ismap usemap="#pointsSampleID12" width="360" height="360" />\n'
expimgmap1 = '\n<MAP name="pointsSampleID12">\n\
<AREA shape="circle" coords="100,306,5" href="#Sample1: Day1" onmouseover="return overlib(\'Sample1: Day1\');" onmouseout="return nd();">\n\
<AREA shape="circle" coords="279,72,5" href="#Sample2: Day1" onmouseover="return overlib(\'Sample2: Day1\');" onmouseout="return nd();">\n\n\
</MAP>\n'
expeps1 = '<a href="/tmp/PC1vsPC2plot.eps.gz" >Download Figure</a>'
expsrcmap2 = '<img src="/tmp/PC1_vs_PC2_plot.png" border="0" ismap usemap="#pointsSampleID12" width="360" height="360" />\n'
expimgmap2 = '\n<MAP name="pointsSampleID12">\n\
<AREA shape="circle" coords="100,208,5" href="#Sample1: Day1" onmouseover="return overlib(\'Sample1: Day1\');" onmouseout="return nd();">\n\
<AREA shape="circle" coords="279,84,5" href="#Sample2: Day1" onmouseover="return overlib(\'Sample2: Day1\');" onmouseout="return nd();">\n\n\
</MAP>\n'
expeps2 = '<a href="/tmp/PC1vsPC2plot.eps.gz" >Download Figure</a>'
filename_scree = '%s'
expdownlink_scree = '<a href="%s" >Download Figure</a>'
expimgsrc_scree = '<img src="%s" border=0 />'
# run tests if called from command line
if __name__ == "__main__":
main()
| gpl-2.0 | 2,405,689,031,635,484,700 | 41.109034 | 147 | 0.570689 | false |
onceuponatimeforever/oh-mainline | vendor/packages/zope.interface/src/zope/interface/tests/test_document.py | 22 | 1450 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Documentation tests.
"""
from unittest import TestCase, main, makeSuite
from zope.interface import Interface, Attribute
class Test(TestCase):
def testBlech(self):
from zope.interface.document import asStructuredText
self.assertEqual(asStructuredText(I2), '''\
I2
I2 doc
This interface extends:
o _I1
Attributes:
a1 -- no documentation
a2 -- a2 doc
Methods:
f21() -- f21 doc
f22() -- no documentation
f23() -- f23 doc
''')
def test_suite():
return makeSuite(Test)
class _I1(Interface):
def f11(): pass
def f12(): pass
class I2(_I1):
"I2 doc"
a1 = Attribute('a1')
a2 = Attribute('a2', 'a2 doc')
def f21(): "f21 doc"
def f22(): pass
def f23(): "f23 doc"
if __name__=='__main__':
main(defaultTest='test_suite')
| agpl-3.0 | 7,525,929,161,569,225,000 | 20.014493 | 78 | 0.59931 | false |
KiChjang/servo | tests/wpt/web-platform-tests/tools/manifest/manifest.py | 4 | 18236 | import io
import os
import sys
from atomicwrites import atomic_write
from copy import deepcopy
from multiprocessing import Pool, cpu_count
from six import ensure_text
from . import jsonlib
from . import vcs
from .item import (ConformanceCheckerTest,
CrashTest,
ManifestItem,
ManualTest,
PrintRefTest,
RefTest,
SupportFile,
TestharnessTest,
VisualTest,
WebDriverSpecTest)
from .log import get_logger
from .sourcefile import SourceFile
from .typedata import TypeData
MYPY = False
if MYPY:
# MYPY is set to True when run under Mypy.
from logging import Logger
from typing import Any
from typing import Container
from typing import Dict
from typing import IO
from typing import Iterator
from typing import Iterable
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
from typing import Type
from typing import Union
CURRENT_VERSION = 8 # type: int
class ManifestError(Exception):
pass
class ManifestVersionMismatch(ManifestError):
pass
class InvalidCacheError(Exception):
pass
item_classes = {u"testharness": TestharnessTest,
u"reftest": RefTest,
u"print-reftest": PrintRefTest,
u"crashtest": CrashTest,
u"manual": ManualTest,
u"wdspec": WebDriverSpecTest,
u"conformancechecker": ConformanceCheckerTest,
u"visual": VisualTest,
u"support": SupportFile} # type: Dict[Text, Type[ManifestItem]]
def compute_manifest_items(source_file):
# type: (SourceFile) -> Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]
rel_path_parts = source_file.rel_path_parts
new_type, manifest_items = source_file.manifest_items()
file_hash = source_file.hash
return rel_path_parts, new_type, set(manifest_items), file_hash
if MYPY:
ManifestDataType = Dict[Any, TypeData]
else:
ManifestDataType = dict
class ManifestData(ManifestDataType):
def __init__(self, manifest):
# type: (Manifest) -> None
"""Dictionary subclass containing a TypeData instance for each test type,
keyed by type name"""
self.initialized = False # type: bool
for key, value in item_classes.items():
self[key] = TypeData(manifest, value)
self.initialized = True
self.json_obj = None # type: None
def __setitem__(self, key, value):
# type: (Text, TypeData) -> None
if self.initialized:
raise AttributeError
dict.__setitem__(self, key, value)
def paths(self):
# type: () -> Set[Text]
"""Get a list of all paths containing test items
without actually constructing all the items"""
rv = set() # type: Set[Text]
for item_data in self.values():
for item in item_data:
rv.add(os.path.sep.join(item))
return rv
def type_by_path(self):
# type: () -> Dict[Tuple[Text, ...], Text]
rv = {}
for item_type, item_data in self.items():
for item in item_data:
rv[item] = item_type
return rv
class Manifest(object):
def __init__(self, tests_root, url_base="/"):
# type: (Text, Text) -> None
assert url_base is not None
self._data = ManifestData(self) # type: ManifestData
self.tests_root = tests_root # type: Text
self.url_base = url_base # type: Text
def __iter__(self):
# type: () -> Iterator[Tuple[Text, Text, Set[ManifestItem]]]
return self.itertypes()
def itertypes(self, *types):
# type: (*Text) -> Iterator[Tuple[Text, Text, Set[ManifestItem]]]
for item_type in (types or sorted(self._data.keys())):
for path in self._data[item_type]:
rel_path = os.sep.join(path)
tests = self._data[item_type][path]
yield item_type, rel_path, tests
def iterpath(self, path):
# type: (Text) -> Iterable[ManifestItem]
tpath = tuple(path.split(os.path.sep))
for type_tests in self._data.values():
i = type_tests.get(tpath, set())
assert i is not None
for test in i:
yield test
def iterdir(self, dir_name):
# type: (Text) -> Iterable[ManifestItem]
tpath = tuple(dir_name.split(os.path.sep))
tpath_len = len(tpath)
for type_tests in self._data.values():
for path, tests in type_tests.items():
if path[:tpath_len] == tpath:
for test in tests:
yield test
def update(self, tree, parallel=True):
# type: (Iterable[Tuple[Text, Optional[Text], bool]], bool) -> bool
"""Update the manifest given an iterable of items that make up the updated manifest.
The iterable must either generate tuples of the form (SourceFile, True) for paths
that are to be updated, or (path, False) for items that are not to be updated. This
unusual API is designed as an optimistaion meaning that SourceFile items need not be
constructed in the case we are not updating a path, but the absence of an item from
the iterator may be used to remove defunct entries from the manifest."""
logger = get_logger()
changed = False
# Create local variable references to these dicts so we avoid the
# attribute access in the hot loop below
data = self._data
types = data.type_by_path()
remaining_manifest_paths = set(types)
to_update = []
for path, file_hash, updated in tree:
path_parts = tuple(path.split(os.path.sep))
is_new = path_parts not in remaining_manifest_paths
if not updated and is_new:
# This is kind of a bandaid; if we ended up here the cache
# was invalid but we've been using it anyway. That's obviously
# bad; we should fix the underlying issue that we sometimes
# use an invalid cache. But at least this fixes the immediate
# problem
raise InvalidCacheError
if not updated:
remaining_manifest_paths.remove(path_parts)
else:
assert self.tests_root is not None
source_file = SourceFile(self.tests_root,
path,
self.url_base,
file_hash)
hash_changed = False # type: bool
if not is_new:
if file_hash is None:
file_hash = source_file.hash
remaining_manifest_paths.remove(path_parts)
old_type = types[path_parts]
old_hash = data[old_type].hashes[path_parts]
if old_hash != file_hash:
hash_changed = True
del data[old_type][path_parts]
if is_new or hash_changed:
to_update.append(source_file)
if to_update:
logger.debug("Computing manifest update for %s items" % len(to_update))
changed = True
# 25 items was derived experimentally (2020-01) to be approximately the
# point at which it is quicker to create a Pool and parallelize update.
pool = None
if parallel and len(to_update) > 25 and cpu_count() > 1:
# On Python 3 on Windows, using >= MAXIMUM_WAIT_OBJECTS processes
# causes a crash in the multiprocessing module. Whilst this enum
# can technically have any value, it is usually 64. For safety,
# restrict manifest regeneration to 48 processes on Windows.
#
# See https://bugs.python.org/issue26903 and https://bugs.python.org/issue40263
processes = cpu_count()
if sys.platform == "win32" and processes > 48:
processes = 48
pool = Pool(processes)
# chunksize set > 1 when more than 10000 tests, because
# chunking is a net-gain once we get to very large numbers
# of items (again, experimentally, 2020-01)
chunksize = max(1, len(to_update) // 10000)
logger.debug("Doing a multiprocessed update. CPU count: %s, "
"processes: %s, chunksize: %s" % (cpu_count(), processes, chunksize))
results = pool.imap_unordered(compute_manifest_items,
to_update,
chunksize=chunksize
) # type: Iterator[Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]]
else:
results = map(compute_manifest_items, to_update)
for result in results:
rel_path_parts, new_type, manifest_items, file_hash = result
data[new_type][rel_path_parts] = manifest_items
data[new_type].hashes[rel_path_parts] = file_hash
# Make sure to terminate the Pool, to avoid hangs on Python 3.
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.pool.Pool
if pool is not None:
pool.terminate()
if remaining_manifest_paths:
changed = True
for rel_path_parts in remaining_manifest_paths:
for test_data in data.values():
if rel_path_parts in test_data:
del test_data[rel_path_parts]
return changed
def to_json(self, caller_owns_obj=True):
# type: (bool) -> Dict[Text, Any]
"""Dump a manifest into a object which can be serialized as JSON
If caller_owns_obj is False, then the return value remains
owned by the manifest; it is _vitally important_ that _no_
(even read) operation is done on the manifest, as otherwise
objects within the object graph rooted at the return value can
be mutated. This essentially makes this mode very dangerous
and only to be used under extreme care.
"""
out_items = {
test_type: type_paths.to_json()
for test_type, type_paths in self._data.items() if type_paths
}
if caller_owns_obj:
out_items = deepcopy(out_items)
rv = {"url_base": self.url_base,
"items": out_items,
"version": CURRENT_VERSION} # type: Dict[Text, Any]
return rv
@classmethod
def from_json(cls, tests_root, obj, types=None, callee_owns_obj=False):
# type: (Text, Dict[Text, Any], Optional[Container[Text]], bool) -> Manifest
"""Load a manifest from a JSON object
This loads a manifest for a given local test_root path from an
object obj, potentially partially loading it to only load the
types given by types.
If callee_owns_obj is True, then ownership of obj transfers
to this function when called, and the caller must never mutate
the obj or anything referred to in the object graph rooted at
obj.
"""
version = obj.get("version")
if version != CURRENT_VERSION:
raise ManifestVersionMismatch
self = cls(tests_root, url_base=obj.get("url_base", "/"))
if not hasattr(obj, "items"):
raise ManifestError
for test_type, type_paths in obj["items"].items():
if test_type not in item_classes:
raise ManifestError
if types and test_type not in types:
continue
if not callee_owns_obj:
type_paths = deepcopy(type_paths)
self._data[test_type].set_json(type_paths)
return self
def load(tests_root, manifest, types=None):
# type: (Text, Union[IO[bytes], Text], Optional[Container[Text]]) -> Optional[Manifest]
logger = get_logger()
logger.warning("Prefer load_and_update instead")
return _load(logger, tests_root, manifest, types)
__load_cache = {} # type: Dict[Text, Manifest]
def _load(logger, # type: Logger
tests_root, # type: Text
manifest, # type: Union[IO[bytes], Text]
types=None, # type: Optional[Container[Text]]
allow_cached=True # type: bool
):
# type: (...) -> Optional[Manifest]
manifest_path = (manifest if isinstance(manifest, str)
else manifest.name)
if allow_cached and manifest_path in __load_cache:
return __load_cache[manifest_path]
if isinstance(manifest, str):
if os.path.exists(manifest):
logger.debug("Opening manifest at %s" % manifest)
else:
logger.debug("Creating new manifest at %s" % manifest)
try:
with io.open(manifest, "r", encoding="utf-8") as f:
rv = Manifest.from_json(tests_root,
jsonlib.load(f),
types=types,
callee_owns_obj=True)
except IOError:
return None
except ValueError:
logger.warning("%r may be corrupted", manifest)
return None
else:
rv = Manifest.from_json(tests_root,
jsonlib.load(manifest),
types=types,
callee_owns_obj=True)
if allow_cached:
__load_cache[manifest_path] = rv
return rv
def load_and_update(tests_root, # type: Union[Text, bytes]
manifest_path, # type: Union[Text, bytes]
url_base, # type: Text
update=True, # type: bool
rebuild=False, # type: bool
metadata_path=None, # type: Optional[Union[Text, bytes]]
cache_root=None, # type: Optional[Union[Text, bytes]]
working_copy=True, # type: bool
types=None, # type: Optional[Container[Text]]
write_manifest=True, # type: bool
allow_cached=True, # type: bool
parallel=True # type: bool
):
# type: (...) -> Manifest
# This function is now a facade for the purposes of type conversion, so that
# the external API can accept paths as text or (utf8) bytes, but internal
# functions always use Text.
metadata_path_text = ensure_text(metadata_path) if metadata_path is not None else None
cache_root_text = ensure_text(cache_root) if cache_root is not None else None
return _load_and_update(ensure_text(tests_root),
ensure_text(manifest_path),
url_base,
update=update,
rebuild=rebuild,
metadata_path=metadata_path_text,
cache_root=cache_root_text,
working_copy=working_copy,
types=types,
write_manifest=write_manifest,
allow_cached=allow_cached,
parallel=parallel)
def _load_and_update(tests_root, # type: Text
manifest_path, # type: Text
url_base, # type: Text
update=True, # type: bool
rebuild=False, # type: bool
metadata_path=None, # type: Optional[Text]
cache_root=None, # type: Optional[Text]
working_copy=True, # type: bool
types=None, # type: Optional[Container[Text]]
write_manifest=True, # type: bool
allow_cached=True, # type: bool
parallel=True # type: bool
):
# type: (...) -> Manifest
logger = get_logger()
manifest = None
if not rebuild:
try:
manifest = _load(logger,
tests_root,
manifest_path,
types=types,
allow_cached=allow_cached)
except ManifestVersionMismatch:
logger.info("Manifest version changed, rebuilding")
except ManifestError:
logger.warning("Failed to load manifest, rebuilding")
if manifest is not None and manifest.url_base != url_base:
logger.info("Manifest url base did not match, rebuilding")
manifest = None
if manifest is None:
manifest = Manifest(tests_root, url_base)
rebuild = True
update = True
if rebuild or update:
logger.info("Updating manifest")
for retry in range(2):
try:
tree = vcs.get_tree(tests_root, manifest, manifest_path, cache_root,
working_copy, rebuild)
changed = manifest.update(tree, parallel)
break
except InvalidCacheError:
logger.warning("Manifest cache was invalid, doing a complete rebuild")
rebuild = True
else:
# If we didn't break there was an error
raise
if write_manifest and changed:
write(manifest, manifest_path)
tree.dump_caches()
return manifest
def write(manifest, manifest_path):
# type: (Manifest, Text) -> None
dir_name = os.path.dirname(manifest_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with atomic_write(manifest_path, overwrite=True) as f:
# Use ',' instead of the default ', ' separator to prevent trailing
# spaces: https://docs.python.org/2/library/json.html#json.dump
jsonlib.dump_dist(manifest.to_json(caller_owns_obj=True), f)
f.write("\n")
| mpl-2.0 | 8,873,013,402,789,353,000 | 36.292434 | 117 | 0.556098 | false |
almlab/adaptml-angst-server | adaptmlprogram/wrapper/clusters/trunk/branch.py | 6 | 1502 | import copy;
class branch:
def __init__(self,length):
self.ends = [] # nodes connected to
self.length = float(length) # length (can change during rooting)
self.immutable_length = self.length # don't ever change this
self.visited = False # used for traversing the tree
def __repr__(self):
if len(self.ends) == 2:
print_string = "(" + self.ends[0].name + ","
print_string += self.ends[1].name + "):" + str(self.immutable_length)
else:
print_string = ":" + str(self.immutable_length)
return print_string
def addNode(self,node):
self.ends.append(node)
node.branch_list.append(self)
# recursion for finding all of the branches in an unrooted tree
def findBranches(self,all_branches):
all_branches.append(self)
self.visited = True
for node in self.ends:
for brch in node.branch_list:
if not brch.visited:
all_branches = brch.findBranches(all_branches)
return all_branches
# recusion to dump all the branches of an unrooted tree into the
# provided dictionary
def FillBranchDict(this_branch,branch_dict):
for parent_node in this_branch.ends:
for branch in parent_node.branch_list:
if branch is not this_branch:
for child_node in branch.ends:
if child_node is not parent_node:
print child_node
print child_node.leaves
| mit | 6,430,824,306,003,370,000 | 30.291667 | 74 | 0.604527 | false |
AdrianNunez/deeplearning-activity-recognition | temporalnet_working.py | 1 | 25255 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 17 11:08:21 2017
@author: adrian
"""
from __future__ import print_function
import sys
import caffe
sys.path.insert(0, './keras-Spatial-Transformer-Layer/')
sys.path.insert(0, '/home/adrian/caffe/python')
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import os
import time
import urllib2
from zipfile import ZipFile
from PIL import Image
import io
from sklearn.model_selection import StratifiedShuffleSplit
from functions import load_gazeplus_dataset, load_adl_dataset, load_model, save_model, createGenerator
#from keras.applications.vgg16 import VGG16
from vgg16module import VGG16
from keras.applications.resnet50 import ResNet50
from keras.models import Model, model_from_json, model_from_yaml, Sequential
from keras.layers import Input, Convolution2D, MaxPooling2D, LSTM, Reshape, Merge, TimeDistributed, Flatten, Activation, Dense, Dropout, merge, AveragePooling2D
from keras.regularizers import l2, activity_l2
from keras.optimizers import Adam, SGD
from keras.layers.normalization import BatchNormalization
from keras import backend as K
K.set_image_dim_ordering('th')
from attention import SpatialTransformer
from keras.utils import np_utils
from keras.utils.np_utils import probas_to_classes
from sklearn.metrics import confusion_matrix
from skimage.io import imsave
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler
from keras.utils.np_utils import to_categorical
import json
from scipy.ndimage import minimum, maximum, imread
import math
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy.ma as ma
import matplotlib.cm as cm
import h5py
import random
from collections import OrderedDict
import scipy.io as sio
import cv2
import glob
import gc
#import transcaffe as tc
def get_caffe_params(netname, paramname):
net = caffe.Net(netname, paramname, caffe.TEST)
net.save_hdf5('/home/adrian/project/caffedata.h5')
params = OrderedDict()
for layername in net.params:
caffelayer = net.params[layername]
params[layername] = []
for sublayer in caffelayer:
params[layername].append(sublayer.data)
print("layer " +layername+ " has " +str(len(caffelayer))+ " sublayers, shape "+str(params[layername][0].shape))
return params, net
def make_mosaic(imgs, nrows, ncols, border=1):
"""
Given a set of images with all the same shape, makes a
mosaic with nrows and ncols
"""
nimgs = imgs.shape[0]
imshape = imgs.shape[1:]
mosaic = ma.masked_all((nrows * imshape[0] + (nrows - 1) * border,
ncols * imshape[1] + (ncols - 1) * border),
dtype=np.float32)
paddedh = imshape[0] + border
paddedw = imshape[1] + border
for i in xrange(nimgs):
row = int(np.floor(i / ncols))
col = i % ncols
mosaic[row * paddedh:row * paddedh + imshape[0],
col * paddedw:col * paddedw + imshape[1]] = imgs[i]
return mosaic
def nice_imshow(ax, data, vmin=None, vmax=None, cmap=None):
"""Wrapper around pl.imshow"""
if cmap is None:
cmap = cm.jet
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
#divider = make_axes_locatable(ax)
#cax = divider.append_axes("right", size="5%", pad=0.05)
fig = plt.figure()
plt.plot(data)
#im = ax.imshow(data, vmin=vmin, vmax=vmax, interpolation='nearest', cmap=cmap)
plt.savefig('imagen.jpg')
plt.gcf().clear()
plt.close(fig)
class printbatch(Callback):
def on_batch_end(self, epoch, logs={}):
print(logs)
def plot_training_info(case, metrics, save, history):
# summarize history for accuracy
plt.ioff()
if 'accuracy' in metrics:
fig = plt.figure()
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
if save == True:
plt.savefig(case + 'accuracy.png')
plt.gcf().clear()
else:
plt.show()
plt.close(fig)
# summarize history for loss
if 'loss' in metrics:
fig = plt.figure()
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
#plt.ylim(1e-3, 1e-2)
plt.yscale("log")
plt.legend(['train', 'test'], loc='upper left')
if save == True:
plt.savefig(case + 'loss.png')
plt.gcf().clear()
else:
plt.show()
plt.close(fig)
def step_decay(epoch):
initial_lrate = 0.1
drop = 0.5
epochs_drop = 10.0
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
def countData():
data_folder = '/ssd_drive/ucf101_flow_img_tvl1_gpu/'
total_data = np.zeros((9))
idx = 0
data = ['training', 'validation', 'testing']
start = time.time()
for set in data:
if set == 'training':
end = 7
elif set == 'validation':
end = 1
elif set == 'testing':
end = 1
for j in range(0,end):
activity_folders = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))]
activity_folders.sort()
for activity_folder in activity_folders:
path1 = data_folder + activity_folder + '/'
video_folders = [f for f in os.listdir(path1) if os.path.isdir(os.path.join(path1, f))]
video_folders.sort()
l = len(video_folders)
if data == 'training':
s = 0.10*j
t = s + 0.10
video_folders = video_folders[int(l*s):int(l*t)]
elif data == 'validation':
video_folders = video_folders[int(l*0.7):int(l*0.85)]
else:
video_folders = video_folders[int(l*0.85):]
for video_folder in video_folders[0:1]:
path2 = path1 + video_folder + '/'
images = [f for f in os.listdir(path2) if os.path.isfile(os.path.join(path2, f))]
total_data[idx] += len(images)
total_data[idx] /= 2
idx += 1
return total_data
def getData(param, classes, batch_size, data, specific_set, instances):
if data == 'training':
print('='*30)
print(' PART %d OF THE TRAINING SET' % specific_set)
print('='*30)
data_folder = 'opticalflow_ucf101/'
activities = []
i = 0
X, Y = [], []
batches = []
activity_folders = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))]
activity_folders.sort()
j = 0
print('Starting the loading')
start = time.time()
for activity_folder in activity_folders:
print('Loading %d/%d' % (j, len(activity_folders)))
j += 1
activities.append(activity_folder)
path1 = data_folder + activity_folder + '/'
video_folders = [f for f in os.listdir(path1) if os.path.isdir(os.path.join(path1, f))]
video_folders.sort()
l = len(video_folders)
if data == 'training':
if specific_set == 1:
video_folders = video_folders[:int(l*0.15)]
elif specific_set == 2:
video_folders = video_folders[int(l*0.15):int(l*0.3)]
elif specific_set == 3:
video_folders = video_folders[int(l*0.3):int(l*0.45)]
elif specific_set == 4:
video_folders = video_folders[int(l*0.45):int(l*0.6)]
elif data == 'validation':
video_folders = video_folders[int(l*0.6):int(l*0.7)]
instances[1] += len(video_folders)
else:
if specific_set == 1:
video_folders = video_folders[int(l*0.7):int(l*0.8)]
elif specific_set == 2:
video_folders = video_folders[int(l*0.8):int(l*0.9)]
elif specific_set == 3:
video_folders = video_folders[int(l*0.9):]
instances[2] += len(video_folders)
for video_folder in video_folders:
path2 = path1 + video_folder + '/'
images = [f for f in os.listdir(path2) if os.path.isfile(os.path.join(path2, f))]
images.sort()
if data == 'training':
instances[0] += len(video_folders)
elif data == 'validation':
instances[1] += len(video_folders)
else:
instances[2] += len(video_folders)
stack = []
#stack.append(np.zeros((1, 1, 227, 227)).astype('uint8'))
for image in images:
x = np.asarray(imread(path2 + image))
x = np.dstack([x[...,0],x[...,2]])
# Optical flow is not normalized
#m = minimum(x)
#x = (x-m)/(maximum(x)-m+param['epsilon'])
x = np.expand_dims(np.transpose(x, (2,0,1)), 0).astype('uint8')
stack.append(x)
del x
if len(stack) == 10:
#stack.append(np.zeros((1, 1, 227, 227)).astype('uint8'))
X.append(np.hstack(stack))
Y.append(to_categorical([i], classes).astype('uint8'))
del stack
stack = []
#stack.append(np.zeros((1, 1, 227, 227)).astype('uint8'))
if len(X) == batch_size:
batches.append([np.vstack(X).astype('uint8'), np.vstack(Y).astype('uint8')])
del X, Y
X, Y = [], []
i += 1
print('Tiempo para cargar: ', str(time.time()-start))
while True:
print('='*20)
print(' Full dataset segment seen')
print('='*20)
random.shuffle(batches)
for batch in batches:
yield batch[0], batch[1]
def getDataChinese(param, classes, batch_size, segment, amount_of_data):
while True:
data_folder = '/ssd_drive/ucf101_flow_img_tvl1_gpu/'
mean_file = '/home/anunez/project/flow_mean.mat'
L = 10
num_samples = int(int(amount_of_data[8]/20)/10)
num_batches = int(num_samples/batch_size)
i, j, h = 0, 0, 0
label = 0
dim = (256, 340, 2*L, num_samples)
flow = np.zeros(shape=dim, dtype=np.float64)
flow_flip = np.zeros(shape=dim, dtype=np.float64)
labels = np.zeros(shape=(num_samples, classes), dtype=np.float64)
print('Number of samples: {}'.format(num_samples))
# ===============================================
print('Starting the loading')
start = time.time()
activity_folders = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))]
activity_folders.sort()
for activity_folder in activity_folders:
if i == num_samples:
break
h += 1
path1 = data_folder + activity_folder + '/'
video_folders = [f for f in os.listdir(path1) if os.path.isdir(os.path.join(path1, f))]
video_folders.sort()
l = len(video_folders)
offset = segment*0.015
video_folders = video_folders[int(l*(0.85+offset)):int(l*(0.85+offset+0.015))]
print('Loading %d/%d - samples: %d' % (h+1, len(activity_folders), len(video_folders)))
for video_folder in video_folders:
if i == num_samples:
break
path2 = path1 + video_folder + '/'
x_images = glob.glob(path2 + 'flow_x*.jpg')
x_images.sort()
y_images = glob.glob(path2 + 'flow_y*.jpg')
y_images.sort()
j = 0
for flow_x_file, flow_y_file in zip(x_images, y_images):
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
img_x = cv2.resize(img_x, dim[1::-1])
img_y = cv2.resize(img_y, dim[1::-1])
flow[:,:,j*2 ,i] = img_x
flow[:,:,j*2+1,i] = img_y
flow_flip[:,:,j*2 ,i] = 255 - img_x[:, ::-1]
flow_flip[:,:,j*2+1,i] = img_y[:, ::-1]
j += 1
if j == 10:
labels[i, label] = 1
i += 1
if i == num_samples:
break
j = 0
label += 1
print('Transformaciones')
flow = flow[:224, :224, :,:]
print('Cargar media')
# substract mean
d = sio.loadmat(mean_file)
flow_mean = d['image_mean']
print('flow mean shape' + str(flow_mean.shape))
flow = flow - np.tile(flow_mean[...,np.newaxis], (1, 1, 1, flow.shape[3]))
# width, height, channels, nb_samples
if K.image_dim_ordering() == 'th':
flow = np.transpose(flow, (3, 2, 0, 1))
else:
flow = np.transpose(flow, (3, 0, 1, 2))
print('Tiempo para cargar: ', str(time.time()-start))
for bb in range(num_batches):
print(bb)
span = range(batch_size*bb, min(flow.shape[0],batch_size*(bb+1)))
if len(span) == batch_size:
print('yielded')
yield flow[span, :,:,:].astype(np.float32), labels[span,:].astype(np.float32)
#del flow_1
del flow
del labels
gc.collect()
continue
flow_2 = flow[:224, -224:, :,:]
flow_3 = flow[16:240, 60:284, :,:]
flow_4 = flow[-224:, :224, :,:]
flow_5 = flow[-224:, -224:, :,:]
flow_f_1 = flow_flip[:224, :224, :,:]
flow_f_2 = flow_flip[:224, -224:, :,:]
flow_f_3 = flow_flip[16:240, 60:284, :,:]
flow_f_4 = flow_flip[-224:, :224, :,:]
flow_f_5 = flow_flip[-224:, -224:, :,:]
def getDataFromHDF5(filename, classes, batch_size, set):
while True:
lim = 0
if set == 'training':
lim = 10
data = 'training_images_'
labels = 'training_labels'
elif set == 'validation':
data = 'validation_images_'
labels = 'validation_labels'
lim = 2
else:
data = 'testing_images_'
labels = 'testing_labels'
lim = 5
Y = []
with hdf5.File(filename, 'r') as f:
Y = f[labels].value
for i in range(0,lim):
X = []
with hdf5.File(filename, 'r') as f:
X = f[data + '{}'.format(i)].value
for i in range(0, len(X)/batch_size):
pos = i*batch_size
yield X[pos:pos+batch_size,...], Y[pos:pos+batch_size,...]
i += 1
def main(parameter_file):
netname = 'caffe_vgg16_temporalnet/cuhk_action_temporal_vgg_16_flow_deploy.prototxt'
paramname = 'caffe_vgg16_temporalnet/cuhk_action_temporal_vgg_16_split2.caffemodel'
best_model = 'best_weights.hdf5'
with open(parameter_file) as data_file:
param = json.load(data_file)
#params, net = get_caffe_params(netname, paramname)
if K.image_dim_ordering() == 'th':
data = Input(shape=(param['input_channels'], param['input_width'], param['input_height']), dtype='float32', name='input')
else:
data = Input(shape=(param['input_width'], param['input_height'], param['input_channels']), dtype='float32', name='input')
# VGG16 =====================================================
vgg16 = VGG16(weights=None, include_top=True, input_tensor=data, classes=101)
# VGG16 =====================================================
x = vgg16.output
#x = Flatten(name='flatten')(x)
#for (i, nb_neurons) in zip(range(len(param['final_dense'])), param['final_dense']):
# if not param['batch_normalization']['add']:
# x = Dense(nb_neurons, activation='relu', init='glorot_uniform', W_regularizer=l2(param['w_regularizer']), b_regularizer=l2(param['b_regularizer']), name='final_fc%d' % (i+1))(x)
# else:
# x = Dense(nb_neurons, init='glorot_uniform', W_regularizer=l2(param['w_regularizer']), b_regularizer=l2(param['w_regularizer']), name='final_fc%d' % (i+1))(x)
# x = BatchNormalization(epsilon=param['batch_normalization']['epsilon'], mode=param['batch_normalization']['mode'], axis=param['batch_normalization']['axis'], momentum=param['batch_normalization']['momentum'])(x)
# x = Activation(param['activation_function'])(x)
# if param['dropout']['add']:
# x = Dropout(param['dropout']['p'])(x)
#
#final = Dense(param['classes'], activation='softmax', name='predictions')(x)
model = Model(data, x)
#weights = np.load('./weights/weights.npy').item()
#weights = np.load('../caffe-tensorflow/weights.npy').item()
#keys = weights.keys()
layerskeras = ['block1_conv1', 'block1_conv2', 'block2_conv1', 'block2_conv2', 'block3_conv1', 'block3_conv2', 'block3_conv3', 'block4_conv1', 'block4_conv2', 'block4_conv3', 'block5_conv1', 'block5_conv2', 'block5_conv3', 'fc1', 'fc2', 'predictions']
layerscaffe = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3', 'fc6', 'fc7', 'fc8']
#keys.sort()
#print(params.keys())
i = 0
#w2, b2 = model.layers[1].get_weights()
#for layer in layerscaffe[:-3]:
# w, b = model.get_layer(layerskeras[i]).get_weights()
# params[layer][0][...] = params[layer][0][:,:,::-1,::-1]
# model.get_layer(layerskeras[i]).W.set_value(params[layer][0])
# model.get_layer(layerskeras[i]).b.set_value(params[layer][1])
# print(layer, params[layer][0].shape, params[layer][1].shape, w.shape, b.shape)
# i += 1
#for layer in layerscaffe[-3:]:
# w, b = model.get_layer(layerskeras[i]).get_weights()
#
# model.get_layer(layerskeras[i]).W.set_value(np.transpose(params[layer][0],(1, 0)))
# model.get_layer(layerskeras[i]).b.set_value(params[layer][1])
# print(layer, params[layer][0].shape, params[layer][1].shape, w.shape, b.shape)
# i += 1
#w, b = model.layers[1].get_weights()
h5 = h5py.File('/home/anunez/project/caffedata.h5')
for layer in layerscaffe[:-3]:
w, b = model.get_layer(layerskeras[i]).get_weights()
print('--')
print(model.get_layer(layerskeras[i]).output_shape)
#print(w.shape, b.shape)
w2, b2 = h5['data'][layer]['0'], h5['data'][layer]['1']
if K.image_dim_ordering() == 'tf':
w2 = np.transpose(w2, (0,2,3,1))
w2 = w2[:, ::-1, ::-1, :]
else:
w2 = np.transpose(w2, (0,1,2,3))
w2 = w2[:, :, ::-1, ::-1]
b2 = np.asarray(b2)
model.get_layer(layerskeras[i]).W.set_value(w2)
model.get_layer(layerskeras[i]).b.set_value(b2)
print(model.get_layer(layerskeras[i]).output_shape)
print('--')
i += 1
for layer in layerscaffe[-3:]:
w, b = model.get_layer(layerskeras[i]).get_weights()
w2, b2 = [], []
#print(w.shape[1])
for j in range(w.shape[1]):
w2.append(h5['data'][layer]['0'][j])
b2.append(h5['data'][layer]['1'][j])
print(w.shape, b.shape)
w2 = np.vstack([w2])
w2 = np.transpose(w2,(1,0))
b2 = np.squeeze(np.vstack(b2))
print(w2.shape, b2.shape)
model.get_layer(layerskeras[i]).set_weights([w2, b2])
i += 1
#rows = 4
#cols = 20
#w, b = model.layers[1].get_weights()
#fig = plt.figure()
#nb = 1
#for i in range(20):
# ax = plt.subplot(rows, cols, nb)
# nb+=1
# ax.imshow(w[0,i,...], interpolation='nearest', cmap=plt.get_cmap('gray'))
#for i in range(20):
## ax = plt.subplot(rows, cols, nb)
# nb+=1
# ax.imshow(w[1,i,...], interpolation='nearest', cmap=plt.get_cmap('gray'))
#for i in range(20):
# ax = plt.subplot(rows, cols, nb)
# nb+=1
# ax.imshow(w[2,i,...], interpolation='nearest', cmap=plt.get_cmap('gray'))
#for i in range(20):
# ax = plt.subplot(rows, cols, nb)
# nb+=1
# ax.imshow(w[3,i,...], interpolation='nearest', cmap=plt.get_cmap('gray'))
#plt.show()
#plt.close(fig)
#i = 0
#for layer in layers:
# layer_weights = weights[keys[i]]
# b = layer_weights['biases']
# w = layer_weights['weights']
# print(w.shape)
# if 'conv' in layer:
# w = np.flip(w, axis=0)
# w = np.flip(w, axis=1)
# w = np.transpose(w, (3,2,0,1))
# model.get_layer(layer).W.set_value(w)
# model.get_layer(layer).b.set_value(b)
# i += 1
#model.summary()
#model = tc.load(netname, paramname, target_lib="keras")
for layer in model.layers:
layer.trainable = False
if param['load_model'] and os.path.exists(param['weights_file']) and os.path.exists(param['model_file']):
pass #model = load_model(param['weights_file'], param['model_file'])
adam = Adam(lr=param['lr'], beta_1=param['beta_1'], beta_2=param['beta_2'], epsilon=param['adam_eps'], decay=param['decay'])
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=param['metrics'])
#model.optimizer.lr.set_value(param['lr'])
c = ModelCheckpoint(filepath=best_model, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto')
#e = EarlyStopping(monitor='loss', min_delta=0, patience=10, verbose=0, mode='auto')
r = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
#l = LearningRateScheduler(step_decay)
#pb = printbatch()
callbacks = [c, r]
amount_of_data = countData()
print(amount_of_data)
idx = 0
nb_instances = [0, 0, 0]
#validationGenerator = getData(param, param['classes'], param['batch_size'], 'validation', -1, nb_instances)
if not os.path.isdir('train_history_results'):
os.mkdir('train_history_results')
#model.load_weights(best_model)
for i in range(1,1):
trainingGenerator = getData(param, param['classes'], param['batch_size'], 'training', i, nb_instances)
#history = model.fit_generator(trainingGenerator, param['samples_per_epoch'], param['epochs'], validation_data=validationGenerator, nb_val_samples=1000, nb_worker=param['nb_worker'], pickle_safe=param['pickle_safe'], max_q_size=param['max_q_size'], callbacks=callbacks)
del trainingGenerator
#with open('train_history_results/test' + parameter_file[:-5] + str(i) + '.txt', 'w+') as metrics_file:
#metrics_file.write(history.history)
#plot_training_info('train_' + parameter_file[:-5] + str(i), param['metrics'], param['save_plots'], history.history)
model.load_weights(best_model)
model.optimizer.lr.set_value(param['lr'])
#save_model(model, param['weights_file'], param['model_file'])
if not os.path.isdir('test_metrics_results'):
os.mkdir('test_metrics_results')
print('TEST TIME')
for i in range(10):
print('='*20)
print('Iteration {} of testing'.format(i))
print('='*20)
testingGenerator = getDataChinese(param, param['classes'], param['batch_size'], i, amount_of_data)
metrics = model.evaluate_generator(testingGenerator, (int(amount_of_data[8]/20)/10), nb_worker=param['nb_worker'], pickle_safe=param['pickle_safe'], max_q_size=param['max_q_size'])
print('Evaluation results: {}'.format(metrics))
print('Loss: {}, Accuracy: {}'.format(metrics[0], metrics[1]))
with open('test_metrics_results/test' + parameter_file[:-5] + str(i) + '.txt', 'w+') as metrics_file:
metrics_file.write(str(metrics))
del testingGenerator
#plot_training_info('test_' + parameter_file[:-5] + str(i), param['metrics'], param['save_plots'], history.history)
with open('stats.txt', 'a+') as stats:
stats.write('Training instances: ' + str(nb_instances[0]) +'\n')
stats.write('Validation instances: ' + str(nb_instances[1]) +'\n')
stats.write('Testing instances: ' + str(nb_instances[2])+'\n')
if __name__ == '__main__':
parameter_files = ['not_freezed.json', 'last_block_freezed.json', 'first_layer_and_last_block_freezed.json']
for parameter_file in parameter_files:
main(parameter_file)
| mit | 5,728,998,120,165,741,000 | 40.199021 | 277 | 0.551455 | false |
bboe/reddit_irc | reddit_irc.py | 1 | 5933 | #!/usr/bin/env python
import asyncore
import re
import praw
import sys
import time
from ircutils import bot
from six import text_type
from six.moves import configparser
debug = True
__version__ = '0.1.3'
class RedditBot(bot.SimpleBot):
MSG_FORMAT = u'{shortlink} New post to /r/{subreddit} by {author}: {title}'
IGNORE_EVENTS = set(('CONN_CONNECT', 'CTCP_VERSION', 'JOIN', 'KICK',
'MODE', 'NICK', 'PART', 'PING', 'PRIVMSG', 'QUIT',
'RPL_BOUNCE', 'RPL_CREATED', 'RPL_ENDOFMOTD',
'RPL_ENDOFNAMES', 'RPL_GLOBALUSERS', 'RPL_LOCALUSERS',
'RPL_LUSERCHANNELS', 'RPL_LUSERCLIENT', 'RPL_LUSERME',
'RPL_LUSEROP', 'RPL_LUSERUNKNOWN', 'RPL_MOTD',
'RPL_MOTDSTART', 'RPL_MYINFO', 'RPL_NAMREPLY',
'RPL_STATSCONN', 'RPL_TOPIC', 'RPL_TOPICWHOTIME',
'RPL_YOURHOST', 'RPL_YOURID', 'RPL_WELCOME', 'TOPIC'))
def __init__(self, nick, server):
bot.SimpleBot.__init__(self, nick)
self.real_name = '%s (https://github.com/bboe/reddit_irc)' % nick
self.server = server
def on_any(self, event):
if event.command in self.IGNORE_EVENTS:
return
print('\t%r %s (%s->%s) %s' % (self.server, event.command,
event.source, event.target,
event.params))
def on_channel_message(self, event):
sys.stderr.write('%r (%s) <%s> %s\n' %
(self.server, event.target, event.source,
event.message))
sys.stderr.flush()
def on_private_message(self, event):
print('(PRIVATE %r) <%s> %s' % (self.server, event.source,
event.message))
def announce(self, submission, channel):
msg = self.MSG_FORMAT.format(
url=submission.url,
permalink=submission.permalink,
shortlink=submission.short_link,
subreddit=text_type(submission.subreddit),
author=text_type(submission.author),
title=submission.title).encode('utf-8')
msg = re.sub('\s+', ' ', msg).strip()
if debug:
print(msg)
self.send_message(channel, msg)
class RedditUpdater(object):
MSG_LIMIT = 3
class_reddit = None
def __init__(self, subreddit):
self.sr_name = subreddit
self.subreddit = self.class_reddit.get_subreddit(subreddit)
self.previous = self.subreddit.get_new().next()
self.associations = []
if debug:
print('Added %s' % subreddit)
print('\tLast submission: %r' % self.previous.title)
def add(self, server_bot, channel):
self.associations.append((server_bot, channel))
def update(self):
submissions = []
try:
for submission in self.subreddit.get_new():
if submission.created_utc <= self.previous.created_utc:
break
submissions.append(submission)
except Exception as error:
print(text_type(error))
return
if not submissions:
return
if len(submissions) > self.MSG_LIMIT:
submissions = submissions[-self.MSG_LIMIT:]
self.previous = submissions[0]
for submission in reversed(submissions):
for server_bot, channel in self.associations:
server_bot.announce(submission, channel)
class Runner(object):
CHECK_TIME = 30
def __init__(self):
self.bots = {}
self.reddits = {}
self.load_configuration()
def load_configuration(self):
config = configparser.RawConfigParser()
if not config.read(['reddit_irc.ini']):
raise Exception('Could not find settings file.')
RedditUpdater.class_reddit = praw.Reddit(config.get('DEFAULT',
'reddit_agent'))
if config.has_option('DEFAULT', 'check_time'):
self.CHECK_TIME = int(config.get('DEFAULT', 'check_time'))
for server in config.sections():
self.parse_server(server, dict(config.items(server)))
def parse_server(self, server, items):
mappings = re.sub('\s', '', items['mapping']).split(',')
if not mappings:
raise Exception('No mappings for %r' % server)
bot = RedditBot(items['irc_name'], server)
self.bots[server] = bot
channels = []
for mapping in mappings:
channel, subs = mapping.split(':', 1)
norm_subs = '+'.join(sorted(subs.split('+')))
if not norm_subs:
raise Exception('No subreddits for %r:%r' % (server, channel))
channels.append(channel)
if norm_subs not in self.reddits:
self.reddits[norm_subs] = RedditUpdater(norm_subs)
self.reddits[norm_subs].add(bot, channel)
use_ssl = items['irc_ssl'].lower() in ('1', 'yes', 'true', 'on')
bot.connect(items['irc_host'], int(items['irc_port']),
channel=channels,
use_ssl=use_ssl)
if 'irc_msg' in items:
bot.MSG_FORMAT = text_type(items['irc_msg'])
if 'irc_pswd' in items:
bot.identify(items['irc_pswd'])
def run(self):
now = time.time()
check_time = now + self.CHECK_TIME
while True:
wait_time = check_time - now
asyncore.loop(timeout=wait_time, count=1)
now = time.time()
if now >= check_time:
for reddit in self.reddits.values():
reddit.update()
check_time = now + self.CHECK_TIME
def main():
runner = Runner()
runner.run()
if __name__ == '__main__':
sys.exit(main())
| bsd-2-clause | -3,101,209,999,318,727,000 | 35.398773 | 79 | 0.537839 | false |
lucienfostier/gaffer | python/GafferTest/ArrayPlugTest.py | 7 | 17171 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import gc
import imath
import IECore
import Gaffer
import GafferTest
class ArrayPlugTest( GafferTest.TestCase ) :
def test( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
self.assertTrue( n["in"]["e1"].isSame( n["in"][0] ) )
n["in"][0].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 2 )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" in n["in"] )
n["in"][0].setInput( None )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
def testConnectionGaps( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
n["in"][0].setInput( a["sum"] )
n["in"][1].setInput( a["sum"] )
n["in"][2].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
self.assertEqual( n["in"]["e1"].getInput(), a["sum"] )
self.assertEqual( n["in"]["e2"].getInput(), a["sum"] )
self.assertEqual( n["in"]["e3"].getInput(), a["sum"] )
self.assertIsNone( n["in"]["e4"].getInput() )
n["in"][1].setInput( None )
self.assertEqual( len( n["in"] ), 4 )
self.assertEqual( n["in"]["e1"].getInput(), a["sum"] )
self.assertIsNone( n["in"]["e2"].getInput() )
self.assertEqual( n["in"]["e3"].getInput(), a["sum"] )
self.assertIsNone( n["in"]["e4"].getInput() )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( None )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].isSame( s["n"]["in"][0] ) )
self.assertTrue( s["n"]["in"]["e2"].isSame( s["n"]["in"][1] ) )
self.assertTrue( s["n"]["in"]["e3"].isSame( s["n"]["in"][2] ) )
self.assertTrue( s["n"]["in"]["e4"].isSame( s["n"]["in"][3] ) )
self.assertEqual( s["n"]["in"]["e1"].getInput(), s["a"]["sum"] )
self.assertIsNone( s["n"]["in"]["e2"].getInput() )
self.assertEqual( s["n"]["in"]["e3"].getInput(), s["a"]["sum"] )
self.assertIsNone( s["n"]["in"]["e4"].getInput() )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["n"]["in"] ), 4 )
self.assertTrue( s2["n"]["in"]["e1"].isSame( s2["n"]["in"][0] ) )
self.assertTrue( s2["n"]["in"]["e2"].isSame( s2["n"]["in"][1] ) )
self.assertTrue( s2["n"]["in"]["e3"].isSame( s2["n"]["in"][2] ) )
self.assertTrue( s2["n"]["in"]["e4"].isSame( s2["n"]["in"][3] ) )
self.assertEqual( s2["n"]["in"]["e1"].getInput(), s2["a"]["sum"] )
self.assertIsNone( s2["n"]["in"]["e2"].getInput() )
self.assertEqual( s2["n"]["in"]["e3"].getInput(), s2["a"]["sum"] )
self.assertIsNone( s2["n"]["in"]["e4"].getInput() )
def testMaximumInputs( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
# connect all inputs
for i in range( 0, 6 ) :
n["in"][i].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
# check that removing the one before the last
# leaves the last in place.
n["in"][4].setInput( None )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
if i != 4 :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
else :
self.assertTrue( n["in"][i].getInput() is None )
def testMakeConnectionAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
with Gaffer.UndoScope( s ) :
s["n"]["in"][0].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
s.redo()
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( "in" in s["n"] )
self.assertFalse( "in1" in s["n"] )
def testMinimumInputs( self ) :
a = GafferTest.AddNode()
n = Gaffer.Node()
n["in"] = Gaffer.ArrayPlug( "in", element = Gaffer.IntPlug( "e1" ), minSize=3 )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the middle input shouldn't create
# any new inputs, because there is still one free on the end
n["in"]["e2"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the last input should create a new
# one - there should always be one free input on the
# end (until the maximum is reached).
n["in"]["e3"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
n["in"]["e3"].setInput( None )
self.assertEqual( len( n["in"] ), 3 )
def testDeleteAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"]["e1"].setInput( s["a"]["sum"] )
s["n"]["in"]["e2"].setInput( s["a"]["sum"] )
s["n"]["in"]["e3"].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoScope( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["n"] ] ) )
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
def testDeleteInputNodeAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
n = s["n"]
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoScope( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["a"] ] ) )
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
def testFixedLengthDynamic( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = Gaffer.Node()
s["n"]["a"] = Gaffer.ArrayPlug( "a", element = Gaffer.IntPlug(), minSize = 4, maxSize = 4, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["a"][1].setInput( s["a"]["sum"] )
s["n"]["a"][2].setInput( s["a"]["sum"] )
self.assertEqual( s["n"]["a"].minSize(), 4 )
self.assertEqual( s["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s["n"]["a"] ), 4 )
self.assertTrue( s["n"]["a"][0].getInput() is None )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][3].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["a"].minSize(), 4 )
self.assertEqual( s2["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s2["n"]["a"] ), 4 )
self.assertTrue( s2["n"]["a"][0].getInput() is None )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][3].getInput() is None )
def testPythonElement( self ) :
class PythonElement( Gaffer.Plug ) :
def __init__( self, name = "PythonElement", direction = Gaffer.Plug.Direction.In, flags = Gaffer.Plug.Flags.Default ) :
Gaffer.Plug.__init__( self, name, direction, flags )
def createCounterpart( self, name, direction ) :
return PythonElement( name, direction, self.getFlags() )
n = Gaffer.Node()
n["a"] = Gaffer.ArrayPlug( element = PythonElement() )
self.assertEqual( len( n["a"] ), 1 )
self.assertTrue( isinstance( n["a"][0], PythonElement ) )
p = PythonElement()
n["a"][0].setInput( p )
self.assertEqual( len( n["a"] ), 2 )
self.assertTrue( isinstance( n["a"][1], PythonElement ) )
def testTopLevelConnection( self ) :
n = Gaffer.Node()
n["a"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
n["b"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
n["b"].setInput( n["a"] )
def assertInput( plug, input ) :
self.assertEqual( len( plug ), len( input ) )
for i in range( 0, len( plug ) ) :
self.assertTrue( plug[i].getInput().isSame( input[i] ) )
assertInput( n["b"], n["a"] )
a = GafferTest.AddNode()
n["a"][0].setInput( a["sum"] )
self.assertEqual( len( n["a"] ), 2 )
assertInput( n["b"], n["a"] )
n["a"][1].setInput( a["sum"] )
self.assertEqual( len( n["a"] ), 3 )
assertInput( n["b"], n["a"] )
n["a"][0].setInput( None )
self.assertEqual( len( n["a"] ), 3 )
assertInput( n["b"], n["a"] )
def testArrayPlugCopiesColors( self ) :
n = Gaffer.Node()
n2 = Gaffer.Node()
n2.addChild(Gaffer.IntPlug("test"))
connectionColor = imath.Color3f( 0.1 , 0.2 , 0.3 )
noodleColor = imath.Color3f( 0.4, 0.5 , 0.6 )
element = Gaffer.IntPlug()
Gaffer.Metadata.registerValue( element, "connectionGadget:color", connectionColor )
Gaffer.Metadata.registerValue( element, "nodule:color", noodleColor )
n["a"] = Gaffer.ArrayPlug( element = element )
n["a"][0].setInput(n2["test"])
self.assertEqual( Gaffer.Metadata.value( n["a"][1], "connectionGadget:color" ), connectionColor )
self.assertEqual( Gaffer.Metadata.value( n["a"][1], "nodule:color" ), noodleColor )
def testOnlyOneChildType( self ) :
p = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
self.assertTrue( p.acceptsChild( Gaffer.IntPlug() ) )
self.assertFalse( p.acceptsChild( Gaffer.FloatPlug() ) )
def testDenyInputFromNonArrayPlugs( self ) :
a = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
p = Gaffer.V2iPlug()
self.assertFalse( a.acceptsInput( p ) )
def testPartialConnections( self ) :
n = Gaffer.Node()
n["p"] = Gaffer.ArrayPlug( element = Gaffer.V3fPlug( "e" ) )
self.assertEqual( len( n["p"] ), 1 )
p = Gaffer.FloatPlug()
n["p"][0]["x"].setInput( p )
self.assertEqual( len( n["p"] ), 2 )
n["p"][0]["y"].setInput( p )
self.assertEqual( len( n["p"] ), 2 )
n["p"][1]["y"].setInput( p )
self.assertEqual( len( n["p"] ), 3 )
n["p"][2]["z"].setInput( p )
self.assertEqual( len( n["p"] ), 4 )
n["p"][1]["y"].setInput( None )
self.assertEqual( len( n["p"] ), 4 )
n["p"][2]["z"].setInput( None )
self.assertEqual( len( n["p"] ), 2 )
def testResizeWhenInputsChange( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["p"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug(), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, resizeWhenInputsChange = False )
self.assertEqual( s["n"]["user"]["p"].resizeWhenInputsChange(), False )
self.assertEqual( len( s["n"]["user"]["p"] ), 1 )
s["n"]["user"]["p"][0].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["user"]["p"] ), 1 )
s["n"]["user"]["p"][0].setInput( None )
self.assertEqual( len( s["n"]["user"]["p"] ), 1 )
p = s["n"]["user"]["p"].createCounterpart( "p", Gaffer.Plug.Direction.In )
self.assertEqual( p.resizeWhenInputsChange(), False )
def testNext( self ) :
a = GafferTest.AddNode()
n = Gaffer.Node()
n["a1"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
n["a2"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug(), maxSize = 3, resizeWhenInputsChange = False )
self.assertEqual( len( n["a1"] ), 1 )
self.assertEqual( len( n["a2"] ), 1 )
self.assertEqual( n["a1"].next(), n["a1"][0] )
self.assertEqual( n["a2"].next(), n["a2"][0] )
n["a1"][0].setInput( a["sum"] )
n["a2"][0].setInput( a["sum"] )
self.assertEqual( len( n["a1"] ), 2 )
self.assertEqual( len( n["a2"] ), 1 )
self.assertEqual( n["a1"].next(), n["a1"][1] )
self.assertEqual( n["a2"].next(), n["a2"][1] )
self.assertEqual( len( n["a2"] ), 2 )
self.assertEqual( n["a1"].next(), n["a1"][1] )
self.assertEqual( n["a2"].next(), n["a2"][1] )
n["a2"].next().setInput( a["sum"] )
n["a2"].next().setInput( a["sum"] )
self.assertEqual( len( n["a2"] ), 3 )
self.assertEqual( n["a2"].next(), None )
def testResize( self ) :
p = Gaffer.ArrayPlug( element = Gaffer.IntPlug(), minSize = 1, maxSize = 3, resizeWhenInputsChange = False )
self.assertEqual( len( p ), p.minSize() )
p.resize( 2 )
self.assertEqual( len( p ), 2 )
self.assertIsInstance( p[1], Gaffer.IntPlug )
p.resize( 3 )
self.assertEqual( len( p ), 3 )
self.assertIsInstance( p[2], Gaffer.IntPlug )
with self.assertRaises( RuntimeError ) :
p.resize( p.minSize() - 1 )
with self.assertRaises( RuntimeError ) :
p.resize( p.maxSize() + 1 )
def testSerialisationUsesIndices( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
ss = s.serialise()
self.assertNotIn( "[\"" + s["n"]["in"][0].getName() + "\"]", ss )
self.assertNotIn( "[\"" + s["n"]["in"][1].getName() + "\"]", ss )
self.assertIn( "[0].setInput", ss )
self.assertIn( "[1].setInput", ss )
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertEqual( s2["n"]["in"][0].getInput(), s2["a"]["sum"] )
self.assertEqual( s2["n"]["in"][1].getInput(), s2["a"]["sum"] )
def tearDown( self ) :
# some bugs in the InputGenerator only showed themselves when
# the ScriptNode was deleted during garbage collection, often
# in totally unrelated tests. so we run the garbage collector
# here to localise any problems to this test, making them
# easier to diagnose and fix.
while gc.collect() :
pass
IECore.RefCounted.collectGarbage()
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -2,321,613,042,768,154,600 | 31.3371 | 165 | 0.58558 | false |
mwmuni/LIGGGHTS_GUI | networkx/algorithms/connectivity/connectivity.py | 21 | 29325 | # -*- coding: utf-8 -*-
"""
Flow based connectivity algorithms
"""
from __future__ import division
import itertools
import networkx as nx
# Define the default maximum flow function to use in all flow based
# connectivity algorithms.
from networkx.algorithms.flow import edmonds_karp, shortest_augmenting_path
from networkx.algorithms.flow import build_residual_network
default_flow_func = edmonds_karp
from .utils import (build_auxiliary_node_connectivity,
build_auxiliary_edge_connectivity)
__author__ = '\n'.join(['Jordi Torrents <jtorrents@milnou.net>'])
__all__ = ['average_node_connectivity',
'local_node_connectivity',
'node_connectivity',
'local_edge_connectivity',
'edge_connectivity',
'all_pairs_node_connectivity']
def local_node_connectivity(G, s, t, flow_func=None, auxiliary=None,
residual=None, cutoff=None):
r"""Computes local node connectivity for nodes s and t.
Local node connectivity for two non adjacent nodes s and t is the
minimum number of nodes that must be removed (along with their incident
edges) to disconnect them.
This is a flow based implementation of node connectivity. We compute the
maximum flow on an auxiliary digraph build from the original input
graph (see below for details).
Parameters
----------
G : NetworkX graph
Undirected graph
s : node
Source node
t : node
Target node
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The choice
of the default function may change from version to version and
should not be relied on. Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based node connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
cutoff : integer, float
If specified, the maximum flow algorithm will terminate when the
flow value reaches or exceeds the cutoff. This is only for the
algorithms that support the cutoff parameter: :meth:`edmonds_karp`
and :meth:`shortest_augmenting_path`. Other algorithms will ignore
this parameter. Default value: None.
Returns
-------
K : integer
local node connectivity for nodes s and t
Examples
--------
This function is not imported in the base NetworkX namespace, so you
have to explicitly import it from the connectivity package:
>>> from networkx.algorithms.connectivity import local_node_connectivity
We use in this example the platonic icosahedral graph, which has node
connectivity 5.
>>> G = nx.icosahedral_graph()
>>> local_node_connectivity(G, 0, 6)
5
If you need to compute local connectivity on several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for node connectivity, and the residual
network for the underlying maximum flow computation.
Example of how to compute local node connectivity among
all pairs of nodes of the platonic icosahedral graph reusing
the data structures.
>>> import itertools
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_node_connectivity)
...
>>> H = build_auxiliary_node_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> result = dict.fromkeys(G, dict())
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as parameters
>>> for u, v in itertools.combinations(G, 2):
... k = local_node_connectivity(G, u, v, auxiliary=H, residual=R)
... result[u][v] = k
...
>>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2))
True
You can also use alternative flow algorithms for computing node
connectivity. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> local_node_connectivity(G, 0, 6, flow_func=shortest_augmenting_path)
5
Notes
-----
This is a flow based implementation of node connectivity. We compute the
maximum flow using, by default, the :meth:`edmonds_karp` algorithm (see:
:meth:`maximum_flow`) on an auxiliary digraph build from the original
input graph:
For an undirected graph G having `n` nodes and `m` edges we derive a
directed graph H with `2n` nodes and `2m+n` arcs by replacing each
original node `v` with two nodes `v_A`, `v_B` linked by an (internal)
arc in H. Then for each edge (`u`, `v`) in G we add two arcs
(`u_B`, `v_A`) and (`v_B`, `u_A`) in H. Finally we set the attribute
capacity = 1 for each arc in H [1]_ .
For a directed graph G having `n` nodes and `m` arcs we derive a
directed graph H with `2n` nodes and `m+n` arcs by replacing each
original node `v` with two nodes `v_A`, `v_B` linked by an (internal)
arc (`v_A`, `v_B`) in H. Then for each arc (`u`, `v`) in G we add one arc
(`u_B`, `v_A`) in H. Finally we set the attribute capacity = 1 for
each arc in H.
This is equal to the local node connectivity because the value of
a maximum s-t-flow is equal to the capacity of a minimum s-t-cut.
See also
--------
:meth:`local_edge_connectivity`
:meth:`node_connectivity`
:meth:`minimum_node_cut`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
References
----------
.. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and
Erlebach, 'Network Analysis: Methodological Foundations', Lecture
Notes in Computer Science, Volume 3418, Springer-Verlag, 2005.
http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
"""
if flow_func is None:
flow_func = default_flow_func
if auxiliary is None:
H = build_auxiliary_node_connectivity(G)
else:
H = auxiliary
mapping = H.graph.get('mapping', None)
if mapping is None:
raise nx.NetworkXError('Invalid auxiliary digraph.')
kwargs = dict(flow_func=flow_func, residual=residual)
if flow_func is shortest_augmenting_path:
kwargs['cutoff'] = cutoff
kwargs['two_phase'] = True
elif flow_func is edmonds_karp:
kwargs['cutoff'] = cutoff
return nx.maximum_flow_value(H, '%sB' % mapping[s], '%sA' % mapping[t], **kwargs)
def node_connectivity(G, s=None, t=None, flow_func=None):
r"""Returns node connectivity for a graph or digraph G.
Node connectivity is equal to the minimum number of nodes that
must be removed to disconnect G or render it trivial. If source
and target nodes are provided, this function returns the local node
connectivity: the minimum number of nodes that must be removed to break
all paths from source to target in G.
Parameters
----------
G : NetworkX graph
Undirected graph
s : node
Source node. Optional. Default value: None.
t : node
Target node. Optional. Default value: None.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The
choice of the default function may change from version
to version and should not be relied on. Default value: None.
Returns
-------
K : integer
Node connectivity of G, or local node connectivity if source
and target are provided.
Examples
--------
>>> # Platonic icosahedral graph is 5-node-connected
>>> G = nx.icosahedral_graph()
>>> nx.node_connectivity(G)
5
You can use alternative flow algorithms for the underlying maximum
flow computation. In dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better
than the default :meth:`edmonds_karp`, which is faster for
sparse networks with highly skewed degree distributions. Alternative
flow functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> nx.node_connectivity(G, flow_func=shortest_augmenting_path)
5
If you specify a pair of nodes (source and target) as parameters,
this function returns the value of local node connectivity.
>>> nx.node_connectivity(G, 3, 7)
5
If you need to perform several local computations among different
pairs of nodes on the same graph, it is recommended that you reuse
the data structures used in the maximum flow computations. See
:meth:`local_node_connectivity` for details.
Notes
-----
This is a flow based implementation of node connectivity. The
algorithm works by solving `O((n-\delta-1+\delta(\delta-1)/2))`
maximum flow problems on an auxiliary digraph. Where `\delta`
is the minimum degree of G. For details about the auxiliary
digraph and the computation of local node connectivity see
:meth:`local_node_connectivity`. This implementation is based
on algorithm 11 in [1]_.
See also
--------
:meth:`local_node_connectivity`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
References
----------
.. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
if (s is not None and t is None) or (s is None and t is not None):
raise nx.NetworkXError('Both source and target must be specified.')
# Local node connectivity
if s is not None and t is not None:
if s not in G:
raise nx.NetworkXError('node %s not in graph' % s)
if t not in G:
raise nx.NetworkXError('node %s not in graph' % t)
return local_node_connectivity(G, s, t, flow_func=flow_func)
# Global node connectivity
if G.is_directed():
if not nx.is_weakly_connected(G):
return 0
iter_func = itertools.permutations
# It is necessary to consider both predecessors
# and successors for directed graphs
def neighbors(v):
return itertools.chain.from_iterable([G.predecessors_iter(v),
G.successors_iter(v)])
else:
if not nx.is_connected(G):
return 0
iter_func = itertools.combinations
neighbors = G.neighbors_iter
# Reuse the auxiliary digraph and the residual network
H = build_auxiliary_node_connectivity(G)
R = build_residual_network(H, 'capacity')
kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R)
# Pick a node with minimum degree
degree = G.degree()
minimum_degree = min(degree.values())
v = next(n for n, d in degree.items() if d == minimum_degree)
# Node connectivity is bounded by degree.
K = minimum_degree
# compute local node connectivity with all its non-neighbors nodes
for w in set(G) - set(neighbors(v)) - set([v]):
kwargs['cutoff'] = K
K = min(K, local_node_connectivity(G, v, w, **kwargs))
# Also for non adjacent pairs of neighbors of v
for x, y in iter_func(neighbors(v), 2):
if y in G[x]:
continue
kwargs['cutoff'] = K
K = min(K, local_node_connectivity(G, x, y, **kwargs))
return K
def average_node_connectivity(G, flow_func=None):
r"""Returns the average connectivity of a graph G.
The average connectivity `\bar{\kappa}` of a graph G is the average
of local node connectivity over all pairs of nodes of G [1]_ .
.. math::
\bar{\kappa}(G) = \frac{\sum_{u,v} \kappa_{G}(u,v)}{{n \choose 2}}
Parameters
----------
G : NetworkX graph
Undirected graph
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See :meth:`local_node_connectivity`
for details. The choice of the default function may change from
version to version and should not be relied on. Default value: None.
Returns
-------
K : float
Average node connectivity
See also
--------
:meth:`local_node_connectivity`
:meth:`node_connectivity`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
References
----------
.. [1] Beineke, L., O. Oellermann, and R. Pippert (2002). The average
connectivity of a graph. Discrete mathematics 252(1-3), 31-45.
http://www.sciencedirect.com/science/article/pii/S0012365X01001807
"""
if G.is_directed():
iter_func = itertools.permutations
else:
iter_func = itertools.combinations
# Reuse the auxiliary digraph and the residual network
H = build_auxiliary_node_connectivity(G)
R = build_residual_network(H, 'capacity')
kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R)
num, den = 0, 0
for u, v in iter_func(G, 2):
num += local_node_connectivity(G, u, v, **kwargs)
den += 1
if den == 0: # Null Graph
return 0
return num / den
def all_pairs_node_connectivity(G, nbunch=None, flow_func=None):
"""Compute node connectivity between all pairs of nodes of G.
Parameters
----------
G : NetworkX graph
Undirected graph
nbunch: container
Container of nodes. If provided node connectivity will be computed
only over pairs of nodes in nbunch.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The
choice of the default function may change from version
to version and should not be relied on. Default value: None.
Returns
-------
all_pairs : dict
A dictionary with node connectivity between all pairs of nodes
in G, or in nbunch if provided.
See also
--------
:meth:`local_node_connectivity`
:meth:`edge_connectivity`
:meth:`local_edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
"""
if nbunch is None:
nbunch = G
else:
nbunch = set(nbunch)
directed = G.is_directed()
if directed:
iter_func = itertools.permutations
else:
iter_func = itertools.combinations
all_pairs = {n: {} for n in nbunch}
# Reuse auxiliary digraph and residual network
H = build_auxiliary_node_connectivity(G)
mapping = H.graph['mapping']
R = build_residual_network(H, 'capacity')
kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R)
for u, v in iter_func(nbunch, 2):
K = local_node_connectivity(G, u, v, **kwargs)
all_pairs[u][v] = K
if not directed:
all_pairs[v][u] = K
return all_pairs
def local_edge_connectivity(G, u, v, flow_func=None, auxiliary=None,
residual=None, cutoff=None):
r"""Returns local edge connectivity for nodes s and t in G.
Local edge connectivity for two nodes s and t is the minimum number
of edges that must be removed to disconnect them.
This is a flow based implementation of edge connectivity. We compute the
maximum flow on an auxiliary digraph build from the original
network (see below for details). This is equal to the local edge
connectivity because the value of a maximum s-t-flow is equal to the
capacity of a minimum s-t-cut (Ford and Fulkerson theorem) [1]_ .
Parameters
----------
G : NetworkX graph
Undirected or directed graph
s : node
Source node
t : node
Target node
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The
choice of the default function may change from version
to version and should not be relied on. Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph for computing flow based edge connectivity. If
provided it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
cutoff : integer, float
If specified, the maximum flow algorithm will terminate when the
flow value reaches or exceeds the cutoff. This is only for the
algorithms that support the cutoff parameter: :meth:`edmonds_karp`
and :meth:`shortest_augmenting_path`. Other algorithms will ignore
this parameter. Default value: None.
Returns
-------
K : integer
local edge connectivity for nodes s and t.
Examples
--------
This function is not imported in the base NetworkX namespace, so you
have to explicitly import it from the connectivity package:
>>> from networkx.algorithms.connectivity import local_edge_connectivity
We use in this example the platonic icosahedral graph, which has edge
connectivity 5.
>>> G = nx.icosahedral_graph()
>>> local_edge_connectivity(G, 0, 6)
5
If you need to compute local connectivity on several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for edge connectivity, and the residual
network for the underlying maximum flow computation.
Example of how to compute local edge connectivity among
all pairs of nodes of the platonic icosahedral graph reusing
the data structures.
>>> import itertools
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_edge_connectivity)
>>> H = build_auxiliary_edge_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> result = dict.fromkeys(G, dict())
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as parameters
>>> for u, v in itertools.combinations(G, 2):
... k = local_edge_connectivity(G, u, v, auxiliary=H, residual=R)
... result[u][v] = k
>>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2))
True
You can also use alternative flow algorithms for computing edge
connectivity. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> local_edge_connectivity(G, 0, 6, flow_func=shortest_augmenting_path)
5
Notes
-----
This is a flow based implementation of edge connectivity. We compute the
maximum flow using, by default, the :meth:`edmonds_karp` algorithm on an
auxiliary digraph build from the original input graph:
If the input graph is undirected, we replace each edge (`u`,`v`) with
two reciprocal arcs (`u`, `v`) and (`v`, `u`) and then we set the attribute
'capacity' for each arc to 1. If the input graph is directed we simply
add the 'capacity' attribute. This is an implementation of algorithm 1
in [1]_.
The maximum flow in the auxiliary network is equal to the local edge
connectivity because the value of a maximum s-t-flow is equal to the
capacity of a minimum s-t-cut (Ford and Fulkerson theorem).
See also
--------
:meth:`edge_connectivity`
:meth:`local_node_connectivity`
:meth:`node_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
References
----------
.. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
if flow_func is None:
flow_func = default_flow_func
if auxiliary is None:
H = build_auxiliary_edge_connectivity(G)
else:
H = auxiliary
kwargs = dict(flow_func=flow_func, residual=residual)
if flow_func is shortest_augmenting_path:
kwargs['cutoff'] = cutoff
kwargs['two_phase'] = True
elif flow_func is edmonds_karp:
kwargs['cutoff'] = cutoff
return nx.maximum_flow_value(H, u, v, **kwargs)
def edge_connectivity(G, s=None, t=None, flow_func=None):
r"""Returns the edge connectivity of the graph or digraph G.
The edge connectivity is equal to the minimum number of edges that
must be removed to disconnect G or render it trivial. If source
and target nodes are provided, this function returns the local edge
connectivity: the minimum number of edges that must be removed to
break all paths from source to target in G.
Parameters
----------
G : NetworkX graph
Undirected or directed graph
s : node
Source node. Optional. Default value: None.
t : node
Target node. Optional. Default value: None.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The
choice of the default function may change from version
to version and should not be relied on. Default value: None.
Returns
-------
K : integer
Edge connectivity for G, or local edge connectivity if source
and target were provided
Examples
--------
>>> # Platonic icosahedral graph is 5-edge-connected
>>> G = nx.icosahedral_graph()
>>> nx.edge_connectivity(G)
5
You can use alternative flow algorithms for the underlying
maximum flow computation. In dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better
than the default :meth:`edmonds_karp`, which is faster for
sparse networks with highly skewed degree distributions.
Alternative flow functions have to be explicitly imported
from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> nx.edge_connectivity(G, flow_func=shortest_augmenting_path)
5
If you specify a pair of nodes (source and target) as parameters,
this function returns the value of local edge connectivity.
>>> nx.edge_connectivity(G, 3, 7)
5
If you need to perform several local computations among different
pairs of nodes on the same graph, it is recommended that you reuse
the data structures used in the maximum flow computations. See
:meth:`local_edge_connectivity` for details.
Notes
-----
This is a flow based implementation of global edge connectivity.
For undirected graphs the algorithm works by finding a 'small'
dominating set of nodes of G (see algorithm 7 in [1]_ ) and
computing local maximum flow (see :meth:`local_edge_connectivity`)
between an arbitrary node in the dominating set and the rest of
nodes in it. This is an implementation of algorithm 6 in [1]_ .
For directed graphs, the algorithm does n calls to the maximum
flow function. This is an implementation of algorithm 8 in [1]_ .
See also
--------
:meth:`local_edge_connectivity`
:meth:`local_node_connectivity`
:meth:`node_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
References
----------
.. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
if (s is not None and t is None) or (s is None and t is not None):
raise nx.NetworkXError('Both source and target must be specified.')
# Local edge connectivity
if s is not None and t is not None:
if s not in G:
raise nx.NetworkXError('node %s not in graph' % s)
if t not in G:
raise nx.NetworkXError('node %s not in graph' % t)
return local_edge_connectivity(G, s, t, flow_func=flow_func)
# Global edge connectivity
# reuse auxiliary digraph and residual network
H = build_auxiliary_edge_connectivity(G)
R = build_residual_network(H, 'capacity')
kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R)
if G.is_directed():
# Algorithm 8 in [1]
if not nx.is_weakly_connected(G):
return 0
# initial value for \lambda is minimum degree
L = min(G.degree().values())
nodes = G.nodes()
n = len(nodes)
for i in range(n):
kwargs['cutoff'] = L
try:
L = min(L, local_edge_connectivity(G, nodes[i], nodes[i+1],
**kwargs))
except IndexError: # last node!
L = min(L, local_edge_connectivity(G, nodes[i], nodes[0],
**kwargs))
return L
else: # undirected
# Algorithm 6 in [1]
if not nx.is_connected(G):
return 0
# initial value for \lambda is minimum degree
L = min(G.degree().values())
# A dominating set is \lambda-covering
# We need a dominating set with at least two nodes
for node in G:
D = nx.dominating_set(G, start_with=node)
v = D.pop()
if D:
break
else:
# in complete graphs the dominating sets will always be of one node
# thus we return min degree
return L
for w in D:
kwargs['cutoff'] = L
L = min(L, local_edge_connectivity(G, v, w, **kwargs))
return L
| gpl-3.0 | -6,581,907,839,444,734,000 | 36.073325 | 88 | 0.658721 | false |
hsarmiento/people_finder_chile | tests/test_main.py | 15 | 3414 | #!/usr/bin/python2.7
# encoding: utf-8
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Main handler."""
import unittest
from google.appengine.ext import webapp
import webob
import config
import django.utils
import main
import test_handler
def setup_request(path):
"""Constructs a webapp.Request object for a given request path."""
return webapp.Request(webob.Request.blank(path).environ)
class MainTests(unittest.TestCase):
def test_get_repo_and_action(self):
def check(path, repo, action):
request = setup_request(path)
assert main.get_repo_and_action(request) == (repo, action)
check('/personfinder/foo', 'foo', '')
check('/personfinder/foo/query', 'foo', 'query')
check('/personfinder', None, '')
check('/personfinder/global', None, '')
check('/personfinder/global/faq', None, 'faq')
check('/foo', 'foo', '')
check('/foo/view', 'foo', 'view')
def test_lang_vulnerability(self):
"""Regression test for bad characters in the lang parameter."""
request = setup_request('/haiti/start&lang=abc%0adef:ghi')
env = main.setup_env(request)
assert '\n' not in env.lang, env.lang
assert ':' not in env.lang, env.lang
def test_shiftjis_get(self):
"""Tests Shift-JIS encoding of GET query parameters."""
request = setup_request(
'/japan/results?charsets=shift_jis&query=%8D%B2%93%A1&role=seek&')
handler = main.Main(request, webapp.Response())
assert handler.env.charset == 'shift_jis'
assert request.charset == 'shift_jis'
assert request.get('query') == u'\u4F50\u85E4'
def test_shiftjis_post(self):
"""Tests Shift-JIS encoding of POST query parameters."""
request = setup_request('/japan/post?')
request.body = 'charsets=shift_jis&given_name=%8D%B2%93%A1'
request.method = 'POST'
handler = main.Main(request, webapp.Response())
assert handler.env.charset == 'shift_jis'
assert request.charset == 'shift_jis'
assert request.get('given_name') == u'\u4F50\u85E4'
def test_default_language(self):
"""Verify that language_menu_options[0] is used as the default."""
request = setup_request('/haiti/start')
handler = main.Main(request, webapp.Response())
assert handler.env.lang == 'en' # first language in the options list
assert django.utils.translation.get_language() == 'en'
config.set_for_repo('haiti', language_menu_options=['fr', 'ht', 'es'])
request = setup_request('/haiti/start')
handler = main.Main(request, webapp.Response())
assert handler.env.lang == 'fr' # first language in the options list
assert django.utils.translation.get_language() == 'fr'
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,671,082,563,253,596,000 | 38.241379 | 78 | 0.650557 | false |
debuti/checksystemcron | src/checks/dropboxCheck.py | 1 | 1281 | #!/usr/bin/env python
###############################################################################################
# Author:
_author = '<a href="mailto:debuti@gmail.com">Borja Garcia</a>'
# Program:
_name = 'dropboxCheck'
# Descrip:
_description = '''Check if there are errors or inconsistencies in dropbox'''
# Version:
_version = '0.0.1'
# Date:
_date = '20101107'
# License: This script doesn't require any license since it's not intended to be redistributed.
# In such case, unless stated otherwise, the purpose of the author is to follow GPLv3.
# History:
# 0.0.1 (20101107)
# -Initial release
###############################################################################################
# Imports
import logging
import sys
import doctest
import datetime, time
import os
import subprocess
import optparse
import inspect
import glob
import shellutils
def check (properties):
'''This procedure checks the whole dropbox tree looking for errors
and returns a list with suspicious file
'''
try:
code, output, error = shellutils.run(["find", properties.get('dropboxCheck', 'dropboxpath')])
return shellutils.grep("Case Conflict", output)
except Exception as error:
print "Error:", error
| gpl-3.0 | 1,989,935,058,595,374,000 | 30.243902 | 101 | 0.583919 | false |