repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
hevvieevvie/osmc | package/mediacenter-addon-osmc/src/script.module.osmccommon/resources/lib/cli_remote.py | 2 | 5945 | #!/usr/bin/env python2
import curses
import fcntl
import json
import os
import requests
import sys
import termios
import traceback
settings = {
'ip' : '127.0.0.1',
'port' : '80',
'user' : '',
'pssw' : '',
}
keymap = {
'i' : 'ip',
'p' : 'port',
'u' : 'user',
'w' : 'pssw',
}
try:
with open('/home/osmc/cli_remote.conf' , 'r') as f:
lines = f.readlines()
single = ''.join(lines)
raw_sets = json.loads(single)
settings.update(raw_sets)
except:
print 'USAGE : cli-remote i=Your_ip_address p=your_port u=your_username w=your_password'
print 'All the settings are optional. The default will be used in their place if you dont specifiy them.'
print 'Defaults:'
print ' ip : 127.0.0.1'
print ' port : 80'
print ' user : ""'
print ' pass : ""'
print ''
print 'If you are using this script on the device (via ssh or something) then you dont need to put in the IP address.'
print 'The default of 127.0.0.1 already points to the local host.'
print ''
print 'Alternatively, you can save a file called /home/osmc/cli_remote.conf with this:'
print '{"ip": "your_ip", "port": "your_port", "user" : "your_user", "pssw": "your_pass"}'
print 'Or just {"port": "your_port"} if that is all you would like to change.'
print ''
for arg in sys.argv[1:]:
try:
k, v = arg.split('=')
key = keymap.get(k, None)
if key is not None:
settings[key] = v
except:
continue
def call(settings, action, params=None):
url = 'http://%s:%s/jsonrpc' % (settings['ip'], settings['port'])
headers = {'Content-Type': 'application/json'}
command = {"jsonrpc":"2.0","method":"%s" % action, "id": 1}
if params is not None:
command['params'] = params
data=json.dumps(command)
data = data.replace('"true"', 'true').replace('"false"', 'false')
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
def call_keyboard(settings, text, params=None):
url = 'http://%s:%s/jsonrpc' % (settings['ip'], settings['port'])
headers = {'Content-Type': 'application/json'}
command = {"jsonrpc":"2.0","method":"Input.SendText", "params": {"text": text}, "id": 1}
data=json.dumps(command)
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
def test(settings):
url = 'http://%s:%s/jsonrpc' % (settings['ip'], settings['port'])
headers = {'Content-Type': 'application/json'}
data=json.dumps({"jsonrpc":"2.0","method":"JSONRPC.Ping", "id": 1})
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
data=json.dumps({"jsonrpc":"2.0", "method":"GUI.ShowNotification", "params":{"title":"Kodi CLI Remote", "message":"Connected!"}, "id":1})
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
def redraw(stdscr):
stdscr.erase()
stdscr.refresh()
stdscr.addstr(2,0,">>> 'Arrow Keys' to navigate")
stdscr.addstr(3,0,">>> 'Enter' to select")
stdscr.addstr(4,0,">>> 'Backspace' or 'Esc' to navigate back")
stdscr.addstr(5,0,">>> 'c' for the context menu")
stdscr.addstr(6,0,">>> 'i' for info")
stdscr.addstr(7,0,">>> 'o' to toggle the OSD")
stdscr.addstr(8,0,">>> 's' to show codec info")
stdscr.addstr(9,0,">>> '[' and ']' volume up and down")
stdscr.addstr(10,0,">>> 'm' to toggle mute")
stdscr.addstr(11,0,">>> 'k' to enter keyboard mode (send text to Kodi's keyboard)")
stdscr.addstr(12,0,">>> 'd' debugger on, 'f' debugger off")
stdscr.addstr(13,0,">>> 'q' to quit")
stdscr.refresh()
key_map = {
curses.KEY_UP : {'name' : 'Up', 'action' : 'Input.Up'},
curses.KEY_DOWN : {'name' : 'Down', 'action' : 'Input.Down'},
curses.KEY_LEFT : {'name' : 'Left', 'action' : 'Input.Left'},
curses.KEY_RIGHT : {'name' : 'Right', 'action' : 'Input.Right'},
curses.KEY_BACKSPACE : {'name' : 'Back', 'action' : 'Input.Back'},
27 : {'name' : 'Back', 'action' : 'Input.Back'}, # ESC
99 : {'name' : 'ContextMenu', 'action' : 'Input.ContextMenu'}, # c
13 : {'name' : 'Select', 'action' : 'Input.Select'}, # ENTER
105 : {'name' : 'Info', 'action' : 'Input.Info'}, # i
104 : {'name' : 'Home', 'action' : 'Input.Home'}, # h
111 : {'name' : 'ShowOSD', 'action' : 'Input.ShowOSD'}, # o
115 : {'name' : 'ShowCodec', 'action' : 'Input.ShowCodec'}, #s
91 : {'name' : 'VolDown', 'action' : 'Application.SetVolume', # [
"params": { "volume": "decrement" }},
93 : {'name' : 'VolUp', 'action' : 'Application.SetVolume', # ]
"params": { "volume": "increment" }},
100 : {'name' : 'Debugger On', 'action' : 'Settings.SetSettingValue', # d
"params": {"setting":"debug.showloginfo", "value":"true"}},
102 : {'name' : 'Debugger Off', 'action' : 'Settings.SetSettingValue', # f
"params": {"setting":"debug.showloginfo", "value":"false"}},
109 : {'name' : 'Toggle Mute', 'action' : 'Application.SetMute', # m
"params": {"mute":"toggle"}},
}
try:
test(settings)
except requests.ConnectionError:
print 'Failed to connect.'
print 'Ensure that Kodi is able to be controlled via HTTP'
print 'Open the Kodi settings, Service, Web Server, and Enable HTTP remote.'
sys.exit()
stdscr = curses.initscr()
curses.cbreak()
curses.nonl()
stdscr.keypad(1)
redraw(stdscr)
curses.noecho()
key = ''
name = ''
while key != ord('q'):
redraw(stdscr)
if name:
stdscr.addstr(0,0, name)
key = stdscr.getch()
stdscr.refresh()
action = key_map.get(key, {}).get('action', None)
params = key_map.get(key, {}).get('params', None)
name = key_map.get(key, {}).get('name' , None)
if action is not None:
curses.setsyx(0, 0)
call(settings, action, params)
continue
if key == ord('k'):
curses.echo()
redraw(stdscr)
stdscr.addstr(0,0,"<<< KEYBOARD MODE >>>")
text = stdscr.getstr(0,23)
call_keyboard(settings, text)
curses.noecho()
redraw(stdscr)
curses.endwin()
| gpl-2.0 | -2,989,239,561,721,670,000 | 29.64433 | 138 | 0.608579 | false | 2.682762 | false | false | false |
udapi/udapi-python | udapi/tool/udpipe.py | 1 | 5219 | """Wrapper for UDPipe (more pythonic than ufal.udpipe)."""
import io
import sys
from ufal.udpipe import Model, Pipeline, ProcessingError, Sentence # pylint: disable=no-name-in-module
from udapi.core.resource import require_file
from udapi.block.read.conllu import Conllu as ConlluReader
from udapi.core.root import Root
class UDPipe:
"""Wrapper for UDPipe (more pythonic than ufal.udpipe)."""
def __init__(self, model):
"""Create the UDPipe tool object."""
self.model = model
path = require_file(model)
self.tool = Model.load(path)
if not self.tool:
raise IOError("Cannot load model from file '%s'" % path)
self.error = ProcessingError()
self.conllu_reader = ConlluReader()
self.tokenizer = self.tool.newTokenizer(Model.DEFAULT)
def tag_parse_tree(self, root):
"""Tag (+lemmatize, fill FEATS) and parse a tree (already tokenized)."""
descendants = root.descendants
if not descendants:
return
pipeline = Pipeline(self.tool, 'horizontal', Pipeline.DEFAULT, Pipeline.DEFAULT, 'conllu')
in_data = " ".join([n.form for n in descendants])
out_data = pipeline.process(in_data, self.error)
if self.error.occurred():
raise IOError("UDPipe error " + self.error.message)
self.conllu_reader.files.filehandle = io.StringIO(out_data)
parsed_root = self.conllu_reader.read_tree()
nodes = [root] + descendants
for parsed_node in parsed_root.descendants:
node = nodes[parsed_node.ord]
node.parent = nodes[parsed_node.parent.ord]
for attr in 'upos xpos lemma feats deprel'.split():
setattr(node, attr, getattr(parsed_node, attr))
# TODO: benchmark which solution is the fastest one. E.g. we could also do
# for node, parsed_node in zip(root.descendants, parsed_root.descendants):
# parsed_node.misc = node.misc
# pylint: disable=protected-access
#root._children, root._descendants = parsed_root._children, parsed_root._descendants
def tokenize_tag_parse_tree(self, root, resegment=False, tag=True, parse=True):
"""Tokenize, tag (+lemmatize, fill FEATS) and parse the text stored in `root.text`.
If resegment=True, the returned list of Udapi trees may contain multiple trees.
"""
if root.children:
raise ValueError('Tree already contained nodes before tokenization')
# Tokenize and segment the text (segmentation cannot be turned off in older UDPipe versions).
self.tokenizer.setText(root.text)
is_another = True
u_sentences = []
while is_another:
u_sentence = Sentence()
is_another = self.tokenizer.nextSentence(u_sentence)
if is_another:
u_sentences.append(u_sentence)
# If resegmentation was not required, we need to join the segments.
if not resegment and len(u_sentences) > 1:
first_sent = u_sentences[0]
n_words = first_sent.words.size() - 1
for other_sent in u_sentences[1:]:
other_words = other_sent.words.size() - 1
for i in range(1, other_words + 1):
u_w = other_sent.words[i]
n_words += 1
u_w.id = n_words
first_sent.words.append(u_w)
u_sentences = [first_sent]
# tagging and parsing
if tag:
for u_sentence in u_sentences:
self.tool.tag(u_sentence, Model.DEFAULT)
if parse:
self.tool.parse(u_sentence, Model.DEFAULT)
elif parse:
raise ValueError('Combination parse=True tag=False is not allowed.')
# converting UDPipe nodes to Udapi nodes
new_root = root
trees = []
for u_sentence in u_sentences:
if not new_root:
new_root = Root()
new_root.text = u_sentence.getText() if resegment else root.text
heads, nodes = [], [new_root]
u_words = u_sentence.words
for i in range(1, u_words.size()):
u_w = u_words[i]
node = new_root.create_child(
form=u_w.form, lemma=u_w.lemma, upos=u_w.upostag,
xpos=u_w.xpostag, feats=u_w.feats, deprel=u_w.deprel, misc=u_w.misc,
)
if parse:
heads.append(u_w.head)
nodes.append(node)
if parse:
for node in nodes[1:]:
head = heads.pop(0)
node.parent = nodes[head]
trees.append(new_root)
new_root = None
return trees
def segment_text(self, text):
"""Segment the provided text into sentences."""
self.tokenizer.setText(text)
is_another = True
sentences = []
while is_another:
u_sentence = Sentence()
is_another = self.tokenizer.nextSentence(u_sentence)
if is_another:
sentences.append(u_sentence.getText())
return sentences
| gpl-3.0 | 4,394,618,749,325,299,000 | 40.420635 | 103 | 0.57808 | false | 3.932931 | false | false | false |
barrettd/Gimp-python-scripts | resize.py | 1 | 4370 | # --------------------------------------------------------------------------------------------
# Image collection generation.
# I used this python script to generate a number of scaled images for Apple device slices.
#
# Run from Gimp->Filters->Python-Fu console
#
# Assume that we have a collection of related images that are scaled the same way
# to the Apple device slices.
#
# We choose one image as the 'key image' that the other images use for resizing ratios.
# 8 Jan 2017 - Barrett Davis
# --------------------------------------------------------------------------------------------
def load_png( directory, filebase ):
filetype = '.png'
filename = filebase + filetype
filepath = directory + filename
return pdb.file_png_load(filepath, filename)
def export_png( img, width, height, directory, filebase, descriptor ):
filetype = '.png'
filename = filebase + '_' + descriptor + filetype
filepath = directory + filename
dupe = pdb.gimp_image_duplicate(img)
dupe.scale( width, height )
layer = pdb.gimp_image_merge_visible_layers(dupe, CLIP_TO_IMAGE)
# print 'saving ' + filepath
pdb.file_png_save2(dupe, layer, filepath, filename,1,9,1,1,1,1,1,0,1)
pdb.gimp_image_delete(dupe)
def generate_png( img, keySize, key1xSize, multiplier, directory, filebase, descriptor ):
ratio = (float(key1xSize) * float( multiplier )) / float(keySize)
width = int(round( float( img.width ) * ratio ))
height = int(round( float( img.height ) * ratio ))
export_png( img, width, height, directory, filebase, descriptor )
def generate_iphone( img, keySize, key1xSize, directory, filebase ):
descriptor = 'iPhone'
generate_png( img, keySize, key1xSize, 1.0, directory, filebase, descriptor + '1x')
generate_png( img, keySize, key1xSize, 2.0, directory, filebase, descriptor + '2x')
generate_png( img, keySize, key1xSize, 3.0, directory, filebase, descriptor + '3x')
def generate_ipad( img, keySize, key1xSize, directory, filebase ):
descriptor = 'iPad'
generate_png( img, keySize, key1xSize, 1.0, directory, filebase, descriptor + '1x')
generate_png( img, keySize, key1xSize, 2.0, directory, filebase, descriptor + '2x')
def generate_apple_tv( img, keySize, key1xSize, directory, filebase ):
descriptor = 'AppleTV'
generate_png( img, keySize, key1xSize, 1.0, directory, filebase, descriptor + '1x')
def generate_mac( img, keySize, key1xSize, directory, filebase ):
descriptor = 'Mac'
generate_png( img, keySize, key1xSize, 1.0, directory, filebase, descriptor + '1x')
generate_png( img, keySize, key1xSize, 2.0, directory, filebase, descriptor + '2x')
# Images
imageDir = '/Volumes/Data/Pictures/Games/tumble/master/'
# Bot - key image
botName = 'bot'
botDir = imageDir + botName + '/'
botImage = load_png( botDir, botName );
# Collar
collarName = 'collar'
collarDir = imageDir + collarName + '/'
collarImage = load_png( collarDir, collarName );
# Strut
strutName = 'strut'
strutDir = imageDir + strutName + '/'
strutImage = load_png( strutDir, strutName );
# Sizes should be float
keySize = float(botImage.height) # All resizing keys off of the bot height
iPhone1xSize = 64.0 # Bot height for iPhone 1x
iPad1xSize = 154.0 # Bot height for iPad 1x
tv1xSize = 154.0 # Bot height for Apple TV 1x
mac1xSize = 288.0 # Bot height for Mac 1x
# iPhone scale
generate_iphone( botImage, keySize, iPhone1xSize, botDir, botName )
generate_iphone( collarImage, keySize, iPhone1xSize, collarDir, collarName )
generate_iphone( strutImage, keySize, iPhone1xSize, strutDir, strutName )
# iPad scale
generate_ipad( botImage, keySize, iPad1xSize, botDir, botName )
generate_ipad( collarImage, keySize, iPad1xSize, collarDir, collarName )
generate_ipad( strutImage, keySize, iPad1xSize, strutDir, strutName )
# Apple TV scale
generate_apple_tv( botImage, keySize, tv1xSize, botDir, botName )
generate_apple_tv( collarImage, keySize, tv1xSize, collarDir, collarName )
generate_apple_tv( strutImage, keySize, tv1xSize, strutDir, strutName )
# Mac scale
generate_mac( botImage, keySize, mac1xSize, botDir, botName )
generate_mac( collarImage, keySize, mac1xSize, collarDir, collarName )
generate_mac( strutImage, keySize, mac1xSize, strutDir, strutName )
| mit | -5,434,776,783,087,981,000 | 41.019231 | 94 | 0.668879 | false | 3.152958 | false | false | false |
felipeadner/python-udacity | movie/entertainment_center.py | 1 | 1951 | import fresh_tomatoes
import media
the_matrix = media.Movie("The Matrix",
"A computer hacker learns from mysterious rebels about the true nature of his reality and his role in the war against its controllers.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMDMyMmQ5YzgtYWMxOC00OTU0LWIwZjEtZWUwYTY5MjVkZjhhXkEyXkFqcGdeQXVyNDYyMDk5MTU@._V1_SY1000_CR0,0,723,1000_AL_.jpg",
"https://www.youtube.com/watch?v=m8e-FF8MsqU")
#print (the_matrix.storyline)
#the_matrix.show_trailer()
the_matrix_reloaded = media.Movie("The Matrix Reloaded",
"Neo and the rebel leaders estimate that they have 72 hours until 250,000 probes discover Zion and destroy it and its inhabitants. During this, Neo must decide how he can save Trinity from a dark fate in his dreams.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMjA0NDM5MDY2OF5BMl5BanBnXkFtZTcwNzg5OTEzMw@@._V1_SY1000_CR0,0,674,1000_AL_.jpg",
"https://www.youtube.com/watch?v=kYzz0FSgpSU")
#print (the_matrix_reloaded.storyline)
#the_matrix_reloaded.show_trailer()
the_matrix_revolutions = media.Movie("The Matrix Revolutions",
"The human city of Zion defends itself against the massive invasion of the machines as Neo fights to end the war at another front while also opposing the rogue Agent Smith.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTkyNjc4NTQzOV5BMl5BanBnXkFtZTcwNDYzMTQyMQ@@._V1_.jpg",
"https://www.youtube.com/watch?v=hMbexEPAOQI")
#print (the_matrix_revolutions.storyline)
#the_matrix_revolutions.show_trailer()
movies = [the_matrix, the_matrix_reloaded, the_matrix_revolutions]
#fresh_tomatoes.open_movies_page(movies)
print (media.Movie.VALID_RATINGS)
| gpl-3.0 | 1,046,688,482,501,946,400 | 70.259259 | 251 | 0.663762 | false | 2.996928 | false | false | false |
caronc/nzbget-subliminal | Subliminal/chared/detector.py | 4 | 8144 | # Copyright (c) 2011-2012 Vit Suchomel and Jan Pomikalek
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Character encoding detection library."""
import os
import sys
import struct
ENCODE_REPLACEMENT_CHARACTER = '\x00'
MODEL_VERSION = '1.3'
def list_models():
"Returns a list of inbuilt models."
models = []
models_dir = os.path.join(
os.path.dirname(sys.modules['chared'].__file__), 'models')
for filename in os.listdir(models_dir):
if filename.endswith('.edm'):
models.append(filename.rsplit('.', 1)[0])
return sorted(models)
def get_model_path(model_id):
"""
Returns the full path to the model with given id or None if no model with
the ID exists.
"""
models_dir = os.path.join(
os.path.dirname(sys.modules['chared'].__file__), 'models')
filepath = os.path.join(models_dir, model_id + '.edm')
if os.path.isfile(filepath):
return filepath
else:
return None
def scalar_product(vec1, vec2):
"Returns a scalar product of the two vectors."
result = 0
for key in vec1.keys():
if vec2.has_key(key):
result += vec1[key] * vec2[key]
return result
def replace_by_zero(error):
"""
Replaces unknown bytes while encoding/decoding.
The function has to be registered using codecs.register_error.
"""
if isinstance(error, UnicodeEncodeError):
return (unicode(ENCODE_REPLACEMENT_CHARACTER), error.end)
elif isinstance(error, UnicodeDecodeError):
return (u'\ufffd', error.end)
raise error
class EncodingDetector(object):
VECTOR_TUPLE_LENGTH = 3
def __init__(self, version=MODEL_VERSION, vectors={}, enc_order=()):
self._version = version
self._vectors = vectors
self._encodings_order = enc_order
def get_version(self):
return self._version
def save(self, path):
"""
Saves the model to the specified path.
File format:
general row: <verison><TAB><tuple length><TAB><encodings count>
for each encoding:
info row: <name><TAB><order><TAB><vector length>
vector row: <key><packed value>...
"""
with open(path, 'wb') as fp:
#basic attributes
fp.write('%s\t%d\t%d\n' %
(self._version, self.VECTOR_TUPLE_LENGTH, len(self._vectors)))
#vectors
for enc, vector in self._vectors.iteritems():
#encoding name, encoding order
vect_len = len(vector)
enc_order = self.get_encoding_order(enc)
fp.write('%s\t%d\t%d\n' % (enc, enc_order, vect_len))
#vector keys & values
for k, v in vector.iteritems():
fp.write('%s%s' % (k, struct.pack('=I', v)))
fp.write('\n')
@classmethod
def load(cls, path):
"""
Loads the model from the specified path.
Returns a new instance of EncodingDetector.
"""
version = ''
vectors = {}
enc_order = {}
with open(path, 'rb') as fp:
#basic attributes
version, vect_tuple_length, enc_count = fp.readline().split('\t')
if MODEL_VERSION != version:
sys.stderr.write('WARNING: Potentially incompatible model versions!\n')
sys.stderr.write('\t%s: %s\n\tthis module: %s\n' % (path, version, MODEL_VERSION))
vect_tuple_length = int(vect_tuple_length)
#vectors
for i in range(int(enc_count)):
#encoding name, encoding order
enc, order, vect_len = fp.readline().split('\t')
enc_order[int(order)] = enc
#vector keys & values
vectors[enc] = {}
for j in range(int(vect_len)):
key = fp.read(vect_tuple_length)
vectors[enc][key] = struct.unpack('=I', fp.read(4))[0]
fp.read(1)
return EncodingDetector(version, vectors, enc_order.values())
def vectorize(self, string):
"""
Transforms the input strings into a frequency vector of n-grams of
contained characters.
Omits vector keys containing the encoding replacement character.
"""
str_len = len(string)
if self.VECTOR_TUPLE_LENGTH > str_len:
return {}
vector = {}
for i in range(str_len - self.VECTOR_TUPLE_LENGTH + 1):
key = string[i:i + self.VECTOR_TUPLE_LENGTH]
if ENCODE_REPLACEMENT_CHARACTER not in key:
vector[key] = vector.get(key, 0) + 1
return vector
def train(self, string, encoding):
"Trains the detector. The input must be a string and its encoding."
self._vectors[encoding] = self.vectorize(string)
def set_encodings_order(self, encodings):
"""
Defines the order (importance / frequency of use) of the encodings
the classifier has been trained on. The input must be a list or a
tuple of encodings. The first is the most important and the last is
the least important.
"""
if not isinstance(encodings, (tuple, list)):
raise TypeError
self._encodings_order = tuple(encodings)
def get_encoding_order(self, encoding):
"""
Returns the order of the encoding or sys.maxint if no order is
defined for it.
"""
if encoding in self._encodings_order:
return self._encodings_order.index(encoding)
return sys.maxint
def classify(self, string):
"""
Returns the predicted character encoding(s) for the input string as
a list. The list may contain more than one element if there are
multiple equally likely candidates. In this case, the candidates are
returned in the order of importance (see set_encodings_order). Empty
list may be returned if there are no valid candidates.
"""
input_vector = self.vectorize(string)
classification = []
for clas, vector in self._vectors.iteritems():
score = scalar_product(input_vector, vector)
clas_info = {'clas': clas, 'score': score,
'order': self.get_encoding_order(clas)}
classification.append(clas_info)
if not classification:
return []
#order result classes
# 1.) by vector similarity score (higher score is better)
# 2.) by the encoding order (lower index is better)
classification.sort(lambda x, y:
cmp(y['score'], x['score']) or cmp(x['order'], y['order']))
#return a list of the top classes
# the top classes have the same score and order as the first one
first = classification[0]
result = []
for clas in classification:
if first['score'] == clas['score']:
result.append(clas['clas'])
return result
def reduce_vectors(self):
"""
Remove the common parts of all vectors. Should be called after all
training data has been loaded. Provided the training has been performed
on the same data for all encodings, reducing vectors increases both
efficiency and accuracy of the classification.
"""
#get frequencies of (key, value) pairs
key_value_count = {}
for vect in self._vectors.values():
for key, value in vect.iteritems():
key_value_count[(key, value)] = key_value_count.get(
(key, value), 0) + 1
#remove common parts of vectors (the (key, value) pairs with the
#frequency equal to the number of vectors)
encodings_count = len(self._vectors)
for (key, value), count in key_value_count.iteritems():
if count >= encodings_count:
for vect in self._vectors.values():
if vect.has_key(key):
del vect[key]
| gpl-3.0 | 3,656,220,390,151,236,000 | 36.529954 | 98 | 0.583374 | false | 4.161472 | false | false | false |
google/atheris | example_fuzzers/idna_fuzzer/idna_uts46_fuzzer.py | 1 | 3534 | #!/usr/bin/python3
# coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" IDNA encoding/decoding differential fuzzer for Python idna vs libidn2
This is a differential fuzzer that compares the Python `idna` package with the
`libidn2` package. It only considers situations where both libraries consider a
domain to be valid, but produce different results. libidn2 is called via a thin
wrapper that defines libidn2 Python bindings.
This fuzzer enables UTS#46 translation (a feature that transforms certain
invalid characters into valid ones), and fuzzes against other encoding options.
To run this fuzzer, you'll need to install a thin wrapper to make libidn2
callable from Python; install libidn2, then cd to `libidn2_wrapper/` and run
`pip3 install .`.
This fuzzer found a number of domains which encode differently in Python `idna`
vs. `libidn2`. The fuzzer was designed to find mistakes in the Python idna
package, but actually found problems with libidn2.
As an example, `a.İ᷹` (codepoints `['61', '2e', '130', '1df9']`) encodes to the
Punycode `a.xn--i-9bb708r` in Python, but `a.xn--i-9bb808r` in libidn2. This
error occurs because libidn2 supports Unicode 11 and therefore accepts the
domain as valid; but it relies on `libunistring`, which only supports
Unicode 9 and therefore produces incorrect metadata about Unicode 11 characters.
"""
import atheris
import idna
import sys
import unicodedata
import libidn2
def TestOneInput(input_bytes):
global total_iters
global comparison_iters
fdp = atheris.FuzzedDataProvider(input_bytes)
transitional = fdp.ConsumeBool()
std3 = fdp.ConsumeBool()
original = "a." + fdp.ConsumeUnicode(253)
try:
nfc_original = unicodedata.normalize("NFC", original)
libidn2_encoded = libidn2.encode(
original,
uts46=True,
transitional=transitional,
nfc=True,
std3=std3)
idna_encoded = idna.encode(
original,
strict=False,
uts46=True,
transitional=transitional,
std3_rules=std3).lower()
except Exception as e:
return
if idna_encoded != libidn2_encoded:
sys.stderr.write("Transitional=%s, std3=%s\n" % (transitional, std3))
sys.stderr.write("Input codepoints: %s\n" %
[hex(ord(x))[2:] for x in original])
raise RuntimeError(
"IDNA encoding disagrees with libidn2 encoding.\nInput: %s\nIDNA encoding: %s\nlibidn2 encoding: %s\n"
% (original, idna_encoded, libidn2_encoded))
idna_decoded = idna.decode(idna_encoded, uts46=True, std3_rules=std3)
libidn2_decoded = libidn2.decode(idna_encoded, uts46=True, std3=std3)
if idna_decoded != libidn2_decoded:
raise RuntimeError(
"IDNA decoding disagrees with libidn2 decoding.\nInput: %s\nEncoding: %s\nIDNA decoding: %s\nlibidn2 decoding: %s"
% (original, idna_encoded, idna_decoded, libidn2_decoded))
def main():
atheris.Setup(sys.argv, TestOneInput)
atheris.Fuzz()
if __name__ == "__main__":
main()
| apache-2.0 | -3,643,984,312,124,780,000 | 35.030612 | 125 | 0.721609 | false | 3.353276 | false | false | false |
lduarte1991/edx-platform | pavelib/utils/envs.py | 2 | 9977 | """
Helper functions for loading environment settings.
"""
from __future__ import print_function
import json
import os
import sys
from time import sleep
import memcache
from lazy import lazy
from path import Path as path
from paver.easy import sh
from pavelib.utils.cmd import django_cmd
def repo_root():
"""
Get the root of the git repository (edx-platform).
This sometimes fails on Docker Devstack, so it's been broken
down with some additional error handling. It usually starts
working within 30 seconds or so; for more details, see
https://openedx.atlassian.net/browse/PLAT-1629 and
https://github.com/docker/for-mac/issues/1509
"""
file_path = path(__file__)
attempt = 1
while True:
try:
absolute_path = file_path.abspath()
break
except OSError:
print('Attempt {}/180 to get an absolute path failed'.format(attempt))
if attempt < 180:
attempt += 1
sleep(1)
else:
print('Unable to determine the absolute path of the edx-platform repo, aborting')
raise
return absolute_path.parent.parent.parent
class Env(object):
"""
Load information about the execution environment.
"""
# Root of the git repository (edx-platform)
REPO_ROOT = repo_root()
# Reports Directory
REPORT_DIR = REPO_ROOT / 'reports'
METRICS_DIR = REPORT_DIR / 'metrics'
# Generic log dir
GEN_LOG_DIR = REPO_ROOT / "test_root" / "log"
# Python unittest dirs
PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc"
# Bok_choy dirs
BOK_CHOY_DIR = REPO_ROOT / "common" / "test" / "acceptance"
BOK_CHOY_LOG_DIR = GEN_LOG_DIR
BOK_CHOY_REPORT_DIR = REPORT_DIR / "bok_choy"
BOK_CHOY_A11Y_REPORT_DIR = REPORT_DIR / "a11y"
BOK_CHOY_COVERAGERC = BOK_CHOY_DIR / ".coveragerc"
BOK_CHOY_A11Y_COVERAGERC = BOK_CHOY_DIR / ".a11ycoveragerc"
BOK_CHOY_A11Y_CUSTOM_RULES_FILE = (
REPO_ROOT / "node_modules" / "edx-custom-a11y-rules" /
"lib" / "custom_a11y_rules.js"
)
PA11YCRAWLER_REPORT_DIR = REPORT_DIR / "pa11ycrawler"
PA11YCRAWLER_COVERAGERC = BOK_CHOY_DIR / ".pa11ycrawlercoveragerc"
# If set, put reports for run in "unique" directories.
# The main purpose of this is to ensure that the reports can be 'slurped'
# in the main jenkins flow job without overwriting the reports from other
# build steps. For local development/testing, this shouldn't be needed.
if os.environ.get("SHARD", None):
shard_str = "shard_{}".format(os.environ.get("SHARD"))
BOK_CHOY_REPORT_DIR = BOK_CHOY_REPORT_DIR / shard_str
BOK_CHOY_LOG_DIR = BOK_CHOY_LOG_DIR / shard_str
# For the time being, stubs are used by both the bok-choy and lettuce acceptance tests
# For this reason, the stubs package is currently located in the Django app called "terrain"
# where other lettuce configuration is stored.
BOK_CHOY_STUB_DIR = REPO_ROOT / "common" / "djangoapps" / "terrain"
# Directory that videos are served from
VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video"
# Detect if in a Docker container, and if so which one
SERVER_HOST = os.environ.get('BOK_CHOY_HOSTNAME', '0.0.0.0')
USING_DOCKER = SERVER_HOST != '0.0.0.0'
SETTINGS = 'bok_choy_docker' if USING_DOCKER else 'bok_choy'
DEVSTACK_SETTINGS = 'devstack_docker' if USING_DOCKER else 'devstack'
TEST_SETTINGS = 'test'
BOK_CHOY_SERVERS = {
'lms': {
'host': SERVER_HOST,
'port': os.environ.get('BOK_CHOY_LMS_PORT', '8003'),
'log': BOK_CHOY_LOG_DIR / "bok_choy_lms.log"
},
'cms': {
'host': SERVER_HOST,
'port': os.environ.get('BOK_CHOY_CMS_PORT', '8031'),
'log': BOK_CHOY_LOG_DIR / "bok_choy_studio.log"
}
}
BOK_CHOY_STUBS = {
'xqueue': {
'port': 8040,
'log': BOK_CHOY_LOG_DIR / "bok_choy_xqueue.log",
'config': 'register_submission_url=http://0.0.0.0:8041/test/register_submission',
},
'ora': {
'port': 8041,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ora.log",
'config': '',
},
'comments': {
'port': 4567,
'log': BOK_CHOY_LOG_DIR / "bok_choy_comments.log",
},
'video': {
'port': 8777,
'log': BOK_CHOY_LOG_DIR / "bok_choy_video_sources.log",
'config': "root_dir={}".format(VIDEO_SOURCE_DIR),
},
'youtube': {
'port': 9080,
'log': BOK_CHOY_LOG_DIR / "bok_choy_youtube.log",
},
'edxnotes': {
'port': 8042,
'log': BOK_CHOY_LOG_DIR / "bok_choy_edxnotes.log",
},
'ecommerce': {
'port': 8043,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ecommerce.log",
},
'catalog': {
'port': 8091,
'log': BOK_CHOY_LOG_DIR / "bok_choy_catalog.log",
},
}
# Mongo databases that will be dropped before/after the tests run
MONGO_HOST = 'edx.devstack.mongo' if USING_DOCKER else 'localhost'
BOK_CHOY_MONGO_DATABASE = "test"
BOK_CHOY_CACHE_HOST = 'edx.devstack.memcached' if USING_DOCKER else '0.0.0.0'
BOK_CHOY_CACHE = memcache.Client(['{}:11211'.format(BOK_CHOY_CACHE_HOST)], debug=0)
# Test Ids Directory
TEST_DIR = REPO_ROOT / ".testids"
# Configured browser to use for the js test suites
SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox')
if USING_DOCKER:
KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker'
else:
KARMA_BROWSER = 'FirefoxNoUpdates'
# Files used to run each of the js test suites
# TODO: Store this as a dict. Order seems to matter for some
# reason. See issue TE-415.
KARMA_CONFIG_FILES = [
REPO_ROOT / 'cms/static/karma_cms.conf.js',
REPO_ROOT / 'cms/static/karma_cms_squire.conf.js',
REPO_ROOT / 'lms/static/karma_lms.conf.js',
REPO_ROOT / 'lms/static/karma_lms_coffee.conf.js',
REPO_ROOT / 'common/lib/xmodule/xmodule/js/karma_xmodule.conf.js',
REPO_ROOT / 'common/static/karma_common.conf.js',
REPO_ROOT / 'common/static/karma_common_requirejs.conf.js',
]
JS_TEST_ID_KEYS = [
'cms',
'cms-squire',
'lms',
'lms-coffee',
'xmodule',
'common',
'common-requirejs'
]
JS_REPORT_DIR = REPORT_DIR / 'javascript'
# Directories used for common/lib/tests
IGNORED_TEST_DIRS = ('__pycache__', '.cache')
LIB_TEST_DIRS = []
for item in (REPO_ROOT / "common/lib").listdir():
dir_name = (REPO_ROOT / 'common/lib' / item)
if dir_name.isdir() and not dir_name.endswith(IGNORED_TEST_DIRS):
LIB_TEST_DIRS.append(path("common/lib") / item.basename())
LIB_TEST_DIRS.append(path("pavelib/paver_tests"))
# Directory for i18n test reports
I18N_REPORT_DIR = REPORT_DIR / 'i18n'
# Service variant (lms, cms, etc.) configured with an environment variable
# We use this to determine which envs.json file to load.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# If service variant not configured in env, then pass the correct
# environment for lms / cms
if not SERVICE_VARIANT: # this will intentionally catch "";
if any(i in sys.argv[1:] for i in ('cms', 'studio')):
SERVICE_VARIANT = 'cms'
else:
SERVICE_VARIANT = 'lms'
@classmethod
def get_django_setting(self, django_setting, system, settings=None):
"""
Interrogate Django environment for specific settings values
:param django_setting: the django setting to get
:param system: the django app to use when asking for the setting (lms | cms)
:param settings: the settings file to use when asking for the value
:return: unicode value of the django setting
"""
if not settings:
settings = os.environ.get("EDX_PLATFORM_SETTINGS", "aws")
value = sh(
django_cmd(
system,
settings,
"print_setting {django_setting} 2>/dev/null".format(
django_setting=django_setting
)
),
capture=True
)
return unicode(value).strip()
@lazy
def env_tokens(self):
"""
Return a dict of environment settings.
If we couldn't find the JSON file, issue a warning and return an empty dict.
"""
# Find the env JSON file
if self.SERVICE_VARIANT:
env_path = self.REPO_ROOT.parent / "{service}.env.json".format(service=self.SERVICE_VARIANT)
else:
env_path = path("env.json").abspath()
# If the file does not exist, here or one level up,
# issue a warning and return an empty dict
if not env_path.isfile():
env_path = env_path.parent.parent / env_path.basename()
if not env_path.isfile():
print(
"Warning: could not find environment JSON file "
"at '{path}'".format(path=env_path),
file=sys.stderr,
)
return dict()
# Otherwise, load the file as JSON and return the resulting dict
try:
with open(env_path) as env_file:
return json.load(env_file)
except ValueError:
print(
"Error: Could not parse JSON "
"in {path}".format(path=env_path),
file=sys.stderr,
)
sys.exit(1)
@lazy
def feature_flags(self):
"""
Return a dictionary of feature flags configured by the environment.
"""
return self.env_tokens.get('FEATURES', dict())
| agpl-3.0 | -4,616,542,683,458,343,000 | 33.051195 | 104 | 0.585747 | false | 3.481158 | true | false | false |
pagepart/chinup | chinup/allauth.py | 1 | 3972 | from __future__ import absolute_import, unicode_literals
from allauth.socialaccount.models import SocialToken
from django.conf import settings as django_settings
from django.contrib.auth import get_user_model
from django.db.models import Q
from . import chinup, exceptions
class NoSuchUser(exceptions.ChinupError):
pass
exceptions.NoSuchUser = NoSuchUser
class MissingToken(exceptions.ChinupError):
pass
exceptions.MissingToken = MissingToken
class Chinup(chinup.Chinup):
def __init__(self, **kwargs):
self.user = kwargs.pop('user', None)
super(Chinup, self).__init__(**kwargs)
def __unicode__(self, extra=''):
extra = '{}user={}'.format(extra and extra + ' ', self.user)
return super(Chinup, self).__unicode__(extra=extra)
def __getstate__(self):
return dict(super(Chinup, self).__getstate__(),
user=getattr(self.user, 'pk', self.user))
@classmethod
def prepare_batch(cls, chinups):
# Populate user tokens into chinups. This also immediately "completes"
# any chinups which require a token that isn't available, by setting
# chinup.exception.
cls._fetch_users(chinups)
cls._fetch_user_tokens(chinups)
# Weed out any chinups that didn't pass token stage.
chinups = [c for c in chinups if not c.completed]
return super(Chinup, cls).prepare_batch(chinups)
@classmethod
def _fetch_users(cls, chinups):
chinups = [c for c in chinups if not c.completed and not c.token
and isinstance(c.user, (int, basestring))]
if chinups:
users = cls._users_dict(chinups)
for c in chinups:
user = users.get(c.user)
if user:
c.user = user
else:
c.exception = NoSuchUser("No user %r" % c.user)
@classmethod
def _users_dict(cls, chinups):
User = get_user_model()
db_users = User.objects.filter(
Q(pk__in=set(c.user for c in chinups if isinstance(c.user, int))) |
Q(username__in=set(c.user for c in chinups if isinstance(c.user, basestring))))
users = {u.pk: u for u in db_users}
users.update({u.username: u for u in db_users})
return users
@classmethod
def _fetch_user_tokens(cls, chinups):
chinups = [c for c in chinups if not c.completed and not c.token
and c.user]
if chinups:
social_tokens = cls._social_token_queryset(chinups)
social_tokens = social_tokens.select_related('account')
assert (len(set(st.account.user_id for st in social_tokens)) ==
len(social_tokens))
tokens = {st.account.user_id: st.token for st in social_tokens}
for c in chinups:
token = tokens.get(c.user.pk)
if token:
c.token = token
else:
c.exception = MissingToken("No token for %r" % c.user)
@classmethod
def _social_token_queryset(cls, chinups, **kwargs):
site_id = getattr(django_settings, 'SITE_ID', None)
if site_id:
kwargs.setdefault('app__sites__id', site_id)
return SocialToken.objects.filter(
account__provider='facebook',
account__user__in=set(c.user for c in chinups),
**kwargs)
class ChinupBar(chinup.ChinupBar):
chinup_class = Chinup
def __init__(self, **kwargs):
self.user = kwargs.pop('user', None)
super(ChinupBar, self).__init__(**kwargs)
def _get_chinup(self, **kwargs):
return super(ChinupBar, self)._get_chinup(
user=self.user, **kwargs)
def __getstate__(self):
return dict(super(ChinupBar, self).__getstate__(),
user=getattr(self.user, 'pk', self.user))
__all__ = ['Chinup', 'ChinupBar', 'NoSuchUser', 'MissingToken']
| mit | -4,171,972,535,024,200,000 | 32.661017 | 91 | 0.590383 | false | 3.650735 | false | false | false |
PAIR-code/lit | lit_nlp/examples/lm_demo.py | 1 | 4428 | # Lint as: python3
r"""Example demo loading pre-trained language models.
Currently supports the following model types:
- BERT (bert-*) as a masked language model
- GPT-2 (gpt2* or distilgpt2) as a left-to-right language model
To run locally:
python -m lit_nlp.examples.pretrained_lm_demo \
--models=bert-base-uncased --top_k 10 --port=5432
Then navigate to localhost:5432 to access the demo UI.
"""
import os
import sys
from absl import app
from absl import flags
from absl import logging
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.api import dtypes as lit_dtypes
from lit_nlp.components import word_replacer
from lit_nlp.examples.datasets import classification
from lit_nlp.examples.datasets import glue
from lit_nlp.examples.datasets import lm
from lit_nlp.examples.models import pretrained_lms
# NOTE: additional flags defined in server_flags.py
FLAGS = flags.FLAGS
flags.DEFINE_list(
"models", ["bert-base-uncased", "gpt2"],
"Models to load. Currently supports variants of BERT and GPT-2.")
flags.DEFINE_integer("top_k", 10,
"Rank to which the output distribution is pruned.")
flags.DEFINE_integer(
"max_examples", 1000,
"Maximum number of examples to load from each evaluation set. Set to None to load the full set."
)
flags.DEFINE_bool(
"load_bwb", False,
"If true, will load examples from the Billion Word Benchmark dataset. This may download a lot of data the first time you run it, so disable by default for the quick-start example."
)
# Custom frontend layout; see client/lib/types.ts
LM_LAYOUT = lit_dtypes.LitComponentLayout(
components={
"Main": [
"embeddings-module",
"data-table-module",
"datapoint-editor-module",
"lit-slice-module",
"color-module",
],
"Predictions": [
"lm-prediction-module",
"confusion-matrix-module",
],
"Counterfactuals": ["generator-module"],
},
description="Custom layout for language models.",
)
CUSTOM_LAYOUTS = {"lm": LM_LAYOUT}
# You can also change this via URL param e.g. localhost:5432/?layout=default
FLAGS.set_default("default_layout", "lm")
def get_wsgi_app():
FLAGS.set_default("server_type", "external")
FLAGS.set_default("demo_mode", True)
# Parse flags without calling app.run(main), to avoid conflict with
# gunicorn command line flags.
unused = flags.FLAGS(sys.argv, known_only=True)
return main(unused)
def main(_):
##
# Load models, according to the --models flag.
models = {}
for model_name_or_path in FLAGS.models:
# Ignore path prefix, if using /path/to/<model_name> to load from a
# specific directory rather than the default shortcut.
model_name = os.path.basename(model_name_or_path)
if model_name.startswith("bert-"):
models[model_name] = pretrained_lms.BertMLM(
model_name_or_path, top_k=FLAGS.top_k)
elif model_name.startswith("gpt2") or model_name in ["distilgpt2"]:
models[model_name] = pretrained_lms.GPT2LanguageModel(
model_name_or_path, top_k=FLAGS.top_k)
else:
raise ValueError(
f"Unsupported model name '{model_name}' from path '{model_name_or_path}'"
)
datasets = {
# Single sentences from movie reviews (SST dev set).
"sst_dev": glue.SST2Data("validation").remap({"sentence": "text"}),
# Longer passages from movie reviews (IMDB dataset, test split).
"imdb_train": classification.IMDBData("test"),
# Empty dataset, if you just want to type sentences into the UI.
"blank": lm.PlaintextSents(""),
}
# Guard this with a flag, because TFDS will download and process 1.67 GB
# of data if you haven't loaded `lm1b` before.
if FLAGS.load_bwb:
# A few sentences from the Billion Word Benchmark (Chelba et al. 2013).
datasets["bwb"] = lm.BillionWordBenchmark(
"train", max_examples=FLAGS.max_examples)
for name in datasets:
datasets[name] = datasets[name].slice[:FLAGS.max_examples]
logging.info("Dataset: '%s' with %d examples", name, len(datasets[name]))
generators = {"word_replacer": word_replacer.WordReplacer()}
lit_demo = dev_server.Server(
models,
datasets,
generators=generators,
layouts=CUSTOM_LAYOUTS,
**server_flags.get_flags())
return lit_demo.serve()
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 8,364,995,779,032,836,000 | 31.8 | 184 | 0.677507 | false | 3.472941 | false | false | false |
bayazee/flacon | setup.py | 1 | 1084 | """
Flacon
-------------
Flask application manager
"""
from setuptools import setup
setup(
name='Flacon',
version='0.0.1',
url='',
license='BSD',
author='Mehdi Bayazee, Mostafa Rokooie',
author_email='bayazee@gmail.com, mostafa.rokooie@gmail.com',
description='Flask based web framework',
long_description=__doc__,
packages=['flacon', 'flacon.commands'],
include_package_data=True,
package_data={'flacon': ['flacon/actions/project_template/*']},
namespace_packages=['flacon'],
zip_safe=False,
platforms='any',
install_requires=[
'flask>=0.9'
],
# scripts=['flacon/actions/flacon.py'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| bsd-3-clause | -7,783,147,430,495,596,000 | 27.526316 | 70 | 0.608856 | false | 3.871429 | false | true | false |
bhtucker/agents | abm/xypops.py | 1 | 9877 | # -*- coding: utf-8 -*-
"""
abm.xypops
~~~~~~~~~~
Environments not backed by networkx whose x, y traits are used in visualization
"""
from scipy.stats.distributions import norm
from scipy.stats.distributions import uniform
from sklearn.metrics.pairwise import euclidean_distances
from abm.viz import display_network
from abm.pops import Environment
from abm.entities import XyEntity
import numpy as np
from random import choice
Y_DIST = norm(300, 10)
CLUSTER_X_DIST_MAP = {
'A': uniform(0, 50),
'B': uniform(30, 50),
'C': uniform(60, 50)
}
CLUSTER_SIZES = {
'A': 8,
'B': 10,
'C': 8
}
def make_points(cluster, size, y_dist, x_dist):
"""Creates a set of points using y_dist and x_dist to draw the location."""
ys = y_dist.rvs(size)
xs = x_dist.rvs(size)
return list(zip(xs, ys, [cluster] * size))
class XyEnvironment(Environment):
"""
A set of connected Entities. Handles message passing and displaying.
Entities are connected randomly.
"""
def __init__(self, y_pos_dist=Y_DIST, cluster_x_dists=CLUSTER_X_DIST_MAP,
cluster_sizes=CLUSTER_SIZES, single_component=True,
entity_class=XyEntity, **kwargs):
super(XyEnvironment, self).__init__(**kwargs)
self.population = []
self.connectivity_matrix = None
self.connected_components = []
self.node_component_map = {}
self.entity_class = entity_class
self._set_entities(y_pos_dist, cluster_x_dists, cluster_sizes)
self._set_connectivity_matrix()
self._set_connections()
if single_component:
self._ensure_single_component()
def _set_entities(self, y_pos_dist, cluster_x_dists, cluster_sizes):
point_args = []
for cluster, size in cluster_sizes.iteritems():
point_args += make_points(cluster, size,
y_pos_dist, cluster_x_dists[cluster])
for ix, (x, y, cluster) in enumerate(point_args):
pt = self.entity_class(environment=self, index=ix, x=x, y=y, cluster=cluster)
self.population.append(pt)
self.size = len(self.population)
def _set_connections(self, track_components=True):
"""Initializes each Entity's adjacency list.
:param track_components: Flag for tracking connected components during graph construction
"""
for index, point in enumerate(self.population):
# make set of connections to indices; np.where returns a tuple
adjacencies = set(np.where(self.connectivity_matrix[index] > 0)[0])
adjacencies.discard(index)
# pass adjacency information down to agent
point.set_adjacencies(adjacencies)
if track_components:
# track connected components as we construct edges
if index in self.node_component_map:
component = self.node_component_map[index]
else:
component = set([index])
self.node_component_map[index] = component
self.connected_components.append(component)
# update the component in place with potential new members
component.update(adjacencies)
# update the node - component map so we can fetch this object
# for adjacencies
self.node_component_map.update(
{a: component for a in adjacencies})
# resolve potential component connections
self._resolve_components(component)
n = float(len(self.population))
k = float(np.sum(self.connectivity_matrix)) / 2
self.edge_density = k / (n * (n - 1) / 2)
def _ensure_single_component(self):
"""
Iterate through disjoint component list, adding connections between sequential components
Update other datastructures to reflect the new connections
"""
for ix, component in enumerate(self.connected_components[:-1]):
start, end = (choice(list(component)), choice(
list(self.connected_components[ix + 1])))
self.population[start].adjacencies.append(end)
self.population[end].adjacencies.append(start)
self.connectivity_matrix[start][end] = True
self.connectivity_matrix[end][start] = True
self.connected_components[ix].add(end)
self.connected_components[ix + 1].add(start)
self._resolve_components(self.connected_components[0])
def _resolve_components(self, component):
"""
Find components thought to be separate that now have intersections
Condense these and set self.connected_components to be a list of disjoint sets
"""
resolved_components = [component]
for other_component in self.connected_components:
if other_component.intersection(component) or other_component is component:
component.update(other_component)
self.node_component_map.update(
{a: component for a in other_component})
else:
resolved_components.append(other_component)
self.connected_components = resolved_components
def _set_connectivity_matrix(self):
"""
Computes the connectivity matrix of this Environment. Each point is
connected to each other within a radius.
"""
if self.connectivity_matrix is not None:
return
# generate a random symmetric matrix
point_count = len(self.population)
matrix = np.random.randint(
0, 2, point_count ** 2).reshape(point_count, point_count)
matrix = (matrix + matrix.T) / 2
for i in range(point_count):
matrix[i][i] = 0
self.connectivity_matrix = matrix
def display(self, current=None, target=None):
"""
Plots the state of the task. If <show> = False, doesn't plot
anything and the simulation can run faster.
"""
if not self.show:
return
display_network(self.population, self.connectivity_matrix,
current=current, target=target)
class CappedPreferentialEnvironment(XyEnvironment):
"""
A set of connected Entities. Handles message passing and displaying. Connections are laid
out such that entities of the same cluster are more likely to be tied together,
proportionally to a parameter alpha. The overall density of the network is controlled
by a parameter beta.
"""
def __init__(self, alpha=0.8, beta=0.4, *args, **kwargs):
self.alpha = alpha
self.beta = beta
super(CappedPreferentialEnvironment, self).__init__(*args, **kwargs)
def _set_connectivity_matrix(self):
"""
Computes the connectivity matrix of this Environment. Each point is
connected to each other within a radius.
"""
if self.connectivity_matrix is not None:
return
def decide_connection(point1, point2):
# A point is connected to another point of its same cluster
# with high probability proportional to alpha, and to
# another point of a different clluester with probability
# proportional to 1 - alpha.
# Moreover, the edge density of a network is capped at a value
# beta. That's why we choose a 0 with probability 1-beta,
# and partition beta into alpha and 1-alpha.
alpha = self.alpha
beta = self.beta
if point1.cluster == point2.cluster:
tie = np.random.choice(
[0, 0, 1], p=[1 - beta, beta * (1 - alpha), beta * alpha])
else:
tie = np.random.choice(
[0, 0, 1], p=[1 - beta, beta * alpha, beta * (1 - alpha)])
return tie
matrix = np.array([[0] * len(self.population)
for _ in range(len(self.population))])
# since the graph is undirected, the matrix is symmetric,
# which in turn means we need only compute the lower triangular
# elements and then copy them into the upper triangular elements
for i, point1 in enumerate(self.population):
for j, point2 in enumerate(self.population[:i]):
matrix[i][j] = decide_connection(point1, point2)
matrix[j][i] = matrix[i][j]
self.connectivity_matrix = matrix
class NearestNeighborsEnvironment(XyEnvironment):
"""
A set of connected Entities. Handles message passing and displaying. Connections laid
out geographically: each point is connected to some of its nearest neighbors.
"""
def _set_connectivity_matrix(self):
"""
Computes the connectivity matrix of this Environment. Each point is
connected to each other within a radius.
"""
if self.connectivity_matrix is not None:
return
points_arr = np.array([[p.x, p.y] for p in self.population])
distance_mat = euclidean_distances(points_arr, points_arr)
# Every point p will be connected to each other point whose distance
# to p is less than a cut-off value. This value is computed as the
# mean of {min_nonzero(dist_mat(p)) | p is a point}, times a factor
def min_nonzero(r):
return min(r[r > 0])
# apply_along_axis(f, axis=1, arr) applies f to each row
min_neighbor_distances = np.apply_along_axis(
min_nonzero, axis=1, arr=distance_mat)
factor = 2.2
neighbor_cutoff = np.mean(min_neighbor_distances) * factor
connectivity_matrix = distance_mat < neighbor_cutoff
self.connectivity_matrix = connectivity_matrix
| mit | -684,730,633,897,218,800 | 36.555133 | 97 | 0.611927 | false | 4.261001 | false | false | false |
phil-lidar1-fmc/hec-automation | run.py | 1 | 1379 | '''
Copyright (c) 2013, Kenneth Langga (klangga@gmail.com)
All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import subprocess
import time
# Check if run dir exists
RUNDIR = os.path.abspath('run')
if os.path.isdir(RUNDIR):
while True:
count = 0.
# List folder contents
for c in os.listdir(RUNDIR):
# Check if it's a batch file
if c.endswith('.bat'):
# Run batch file
print '\n', '#' * 40, 'Running', c, '#' * 40, '\n'
run = subprocess.Popen(os.path.join(RUNDIR, c), cwd=RUNDIR)
run.wait()
count += 1
# Sleep for a set interval
dur = 60 / count * 60
print '\n', '#' * 40, 'Sleeping for', dur, 'secs', '#' * 40, '\n'
time.sleep(dur)
| gpl-3.0 | -2,840,593,574,524,956,700 | 33.475 | 75 | 0.635243 | false | 3.873596 | false | false | false |
wfwei/ReadWeibo | ranking/MultiRank.py | 1 | 5429 | # !/usr/bin/python
# -*- coding: utf-8 -*-
from ReadWeibo.mainapp.models import Category, Weibo, Comment
from ReadWeibo.account.models import Account
from main import Config
import DataUtil as du
from scipy.sparse import *
from scipy import *
import numpy as np
import logging
import operator
import math, random, sys, csv
class MultiRank:
def __init__(self, graph, topic_words=Config._ML_WORDS,
alpha=.0, beta=0.05, mu=0.3, eta=0.15, max_iter=40):
self.max_iter = max_iter
self.alpha = alpha
self.beta = beta
self.mu = mu
self.eta = eta
self.graph = graph
self.ranks = {}
self.topic_words = set(topic_words.lower().split("-"))
def _adj_mat(self, graph, topic_words):
'''
build weibo ajd matrix by user activity
build bipartite graph between weibo and keywords
'''
# label id to each node
wb_id = 0; wd_id = 0
for key, info in graph.nodes(data=True):
if info['tp'] == 'weibo':
graph.node[key]['id'] = wb_id
wb_id += 1
elif info['tp'] == 'word':
graph.node[key]['id'] = wd_id
wd_id += 1
# make adj matrix
W = lil_matrix((wb_id, wb_id))
R = lil_matrix((wb_id, wd_id))
y_wb = np.zeros((wb_id, 1))
y_wd = np.zeros((wd_id, 1))
print 'wb_id:%s\twd_id:%s' % (wb_id, wd_id)
for key, info in graph.nodes(data=True):
if info['tp'] == 'weibo':
continue
if info['tp'] == 'user':
weight = 1.0
neis = graph.neighbors(key)
for i in range(len(neis)):
for j in range(len(neis))[i+1:]:
nod1 = graph.node[neis[i]]
nod2 = graph.node[neis[j]]
if nod1['tp']=='weibo' and nod2['tp']=='weibo':
W[nod1['id'], nod2['id']] += weight
W[nod2['id'], nod1['id']] += weight
elif info['tp'] == 'word':
for nod in graph.neighbors(key):
if graph.node[nod]['tp'] == 'weibo':
id1 = graph.node[nod]['id']
id2 = info['id']
R[id1, id2] += 1.0
if key in topic_words:
y_wd[graph.node[key]['id'], 0] = 1.0
return W, R, y_wb, y_wd
def rank(self):
W, R, y_wb, y_wd = self._adj_mat(self.graph, self.topic_words)
logging.info("make adjacent matrix over, and labeled %d words" % y_wd.sum())
D = lil_matrix(W.shape)
D_d = lil_matrix((R.shape[0], R.shape[0]))
D_t = lil_matrix((R.shape[1], R.shape[1]))
_sum = W.sum(1)
for _i in range(W.shape[0]):
if _sum[_i,0] != 0:
D[_i, _i] = _sum[_i,0]**(-0.5)
_sum = R.sum(1)
for _i in range(R.shape[0]):
if _sum[_i,0] != 0:
D_d[_i, _i] = _sum[_i,0]**(-0.5)
_sum = R.sum(0)
for _i in range(R.shape[1]):
if _sum[0, _i] != 0:
D_t[_i, _i] = _sum[0,_i]**(-0.5)
Sw = D.dot(W).dot(D)
Sr = D_d.dot(R).dot(D_t)
f = np.zeros(y_wb.shape)
alpha, beta, mu, eta = self.alpha, self.beta, self.mu, self.eta
for _iter in range(self.max_iter):
logging.info('iter : %d' % _iter)
f = (1.0/(1-beta))*(mu*Sw+eta*eta/(beta+eta)*Sr.dot(Sr.T)).dot(f) \
+ alpha/(1-beta)*y_wb + beta*eta/(1-beta)/(beta+eta)*Sr.dot(y_wd)
for key, node in self.graph.nodes(data=True):
if node['tp'] == 'weibo':
self.ranks[key] = f[node['id']]
def test(self, verbose=False):
sorted_r = sorted(self.ranks.iteritems(), key=operator.itemgetter(1), reverse=True)
found=0; tot=0; cost=.0
for w_id, weight in sorted_r:
wb = Weibo.objects.get(w_id=w_id)
tot += 1
if wb.real_category==1:
found += 1
cost += math.log(tot-found+1)
if verbose:
logging.info("%s\t%s\t%s" % (wb.real_category, weight, wb.text[:30]))
return cost
if __name__ == '__main__':
if len(sys.argv)<2:
print '''Expected input format: %s graph [-t topic] [-m max_iter]
graph: graph file path
-t: specify topic words
topic: topic words seperated by '-', default with ML words
-m: specify max iter count
max_iter: max iter count, default with 20
''' % sys.argv[0]
sys.exit(1)
load_path = sys.argv[1]
topic_words=Config._ML_WORDS
max_iter=20
_id = 2
while _id<len(sys.argv)-1:
if sys.argv[_id]=='-t':
topic_words = sys.argv[_id+1].decode('utf-8')
elif sys.argv[_id]=='-m':
max_iter = int(sys.argv[_id+1])
_id += 2
G = du.load_graph(load_path)
for mu, eta, beta in [(.1,.1,.8), (.1,.3,.6), (.3,.1,.6), (.2,.4,.4), (.4,.2,.4)]:
mr = MultiRank(G, topic_words=topic_words, max_iter=max_iter,
alpha=.0, beta=beta, mu=mu, eta=eta)
mr.rank()
cost = mr.test(verbose=False)
logging.info("cost=%s \t mu=%s, eta=%s, beta=%s" % (cost, mu, eta, beta))
| apache-2.0 | 8,863,134,820,167,962,000 | 32.306748 | 91 | 0.471173 | false | 3.163753 | false | false | false |
markgw/jazzparser | bin/models/ngram/ngram_model_suite.py | 1 | 7424 | #!/usr/bin/env ../../jazzshell
from subprocess import PIPE, Popen, STDOUT
from optparse import OptionParser
import os, csv
from jazzparser.utils.config import ConfigFile
BASE_TRAINING_OPTIONS = """
# Model type
%% ARG 0 ngram-multi
# Input data
%% ARG 2 %{PROJECT_ROOT}/input/fullseqs
# Input type specification
filetype = bulk-db-annotated
# Train for cross-evaluation
partitions = 10
# Don't use a cutoff on any backoff models
opts = backoff_cutoff=0
"""
BASE_TEST_OPTIONS = """
%% ARG 0 ngram-multi
%% ARG 2 %{PROJECT_ROOT}/input/fullseqs
partitions = 10
"""
BASE_ENTROPY_OPTIONS = BASE_TEST_OPTIONS + "+entropy\n"
BASE_ACCURACY_OPTIONS = BASE_TEST_OPTIONS + "+agreement\n"
def output_proc(proc):
output = ""
line = proc.stdout.readline()
while line:
output += line
print line.strip("\n")
line = proc.stdout.readline()
return output
def main():
usage = "%prog [options]"
description = "Trains a suite of ngram models and tests them all"
parser = OptionParser(usage=usage, description=description)
parser.add_option('-n', '--no-train', dest="no_train", action="store_true", help="don't train the models. Only do this if you've previously used this script to train all the models")
parser.add_option('--train', '--only-train', dest="only_train", action="store_true", help="only train the models, don't do the experiments")
parser.add_option('--bt', '--bigram-trigram', dest="bigram_trigram", action="store_true", help="only include bigram and trigram models")
parser.add_option('-t', '--trigram', dest="trigram", action="store_true", help="only include trigram models")
parser.add_option('--wb', '--witten-bell', dest="witten_bell", action="store_true", help="only use witten-bell smoothing (skip laplace)")
parser.add_option('--lap', '--laplace', dest="laplace", action="store_true", help="only use laplace smoothing (skip witten-bell)")
parser.add_option('-v', '--viterbi', dest="viterbi", action="store_true", help="use Viterbi decoding")
parser.add_option('-4', '--4grams', dest="fourgrams", action="store_true", help="run experiments for 4-gram models")
parser.add_option('-c', '--cutoff', dest="cutoff", action="store", type="int", help="custom cutoff to use, instead of trying several")
parser.add_option('--gt', '--good-turing', dest="good_turing", action="store_true", help="only use Good-Turing smoothing (not usually included)")
options, arguments = parser.parse_args()
cmd_dir = os.path.abspath("..")
train_cmd = "./train.py"
tageval_cmd = "./tageval.py"
if options.bigram_trigram:
orders = [2, 3]
elif options.trigram:
orders = [3]
elif options.fourgrams:
orders = [4]
else:
orders = [1, 2, 3]
if options.witten_bell:
smoothings = [("witten-bell", "wb")]
elif options.laplace:
smoothings = [("laplace", "lap")]
elif options.good_turing:
smoothings = [("simple-good-turing", "gt")]
else:
smoothings = [("witten-bell", "wb"), ("laplace", "lap")]
if options.cutoff is None:
cutoffs = [0, 2, 5]
else:
cutoffs = [options.cutoff]
# Open a CSV file to write the results to
with open("test_suite.csv", "w") as result_file:
results = csv.writer(result_file)
results.writerow(["Order", "Cutoff", "Smoothing", "Entropy", "Agreement"])
for model_order in orders:
for cutoff in cutoffs:
for smoothing,smoothing_short in smoothings:
#for chord_map in ["none", "small", "big"]:
print "\n#####################################################"
print "### Order %d, cutoff %d, smoothing %s ###" % (model_order, cutoff, smoothing)
# Build a unique name for the model
model_name = "suite_n%d_c%d_%s" % (model_order, cutoff, smoothing_short)
# Train the model
if not options.no_train:
# Prepare options to train the model
model_options = "n=%d:cutoff=%d:backoff=%d:estimator=%s" % \
(model_order, cutoff, model_order-1, smoothing)
training_opts = BASE_TRAINING_OPTIONS + \
"opts = %s\n%%%% ARG 1 %s" % (model_options, model_name)
# Turn these nice option specifications into command-line args
conf = ConfigFile.from_string(training_opts)
# Train this model
#train_output = check_output([train_cmd]+conf.get_strings(), cwd=cmd_dir)
train_proc = Popen([train_cmd]+conf.get_strings(),
cwd=cmd_dir, stdout=PIPE, stderr=STDOUT)
output_proc(train_proc)
if not options.only_train:
# Entropy doesn't tell us much for Viterbi decoding
if not options.viterbi:
# Test the model's entropy
print "### Entropy ###"
entropy_opts = BASE_ENTROPY_OPTIONS + "%%%% ARG 1 %s" % model_name
conf = ConfigFile.from_string(entropy_opts)
entropy_proc = Popen([tageval_cmd]+conf.get_strings(),
cwd=cmd_dir, stdout=PIPE, stderr=STDOUT)
# Output as we go
output = output_proc(entropy_proc)
# Get the last line and pull out the entropy value
last_line = output.strip("\n").rpartition("\n")[2]
entropy = float(last_line.split()[0])
else:
entropy = 0.0
# Test the model's top tag accuracy
print "\n### Agreement ###"
accuracy_opts = BASE_ACCURACY_OPTIONS + "%%%% ARG 1 %s" % model_name
if options.viterbi:
accuracy_opts += "\ntopt = decode=viterbi"
conf = ConfigFile.from_string(accuracy_opts)
accuracy_proc = Popen([tageval_cmd]+conf.get_strings(),
cwd=cmd_dir, stdout=PIPE, stderr=STDOUT)
# Output as we go
output = output_proc(accuracy_proc)
# Get the last line and pull out the agreement value
last_line = output.strip("\n").rpartition("\n")[2]
agreement = float(last_line.split()[-1].strip("()%"))
results.writerow(["%d" % model_order,
"%d" % cutoff,
"%s" % smoothing,
"%f" % entropy,
"%f" % agreement])
# Flush the file object so each result appears in the
# file immediately
result_file.flush()
if __name__ == "__main__":
main()
| gpl-3.0 | -4,326,134,430,385,559,600 | 46.896774 | 186 | 0.517376 | false | 4.225384 | true | false | false |
wkentaro/termsaver | termsaverlib/screen/base/__init__.py | 1 | 12282 | ###############################################################################
#
# file: __init__.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver application, and should not be used
# or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
This module holds base classes that are used by all screens within termsaver
application. Each individual "screen" represents a unique screensaver that can
be triggered by termsaver.
The base classes available in this package are:
* `ScreenBase`: the most basic screen class, which will handle simple
interaction with the terminal.
* `filereader.FileReaderBase`: handles screens that require recursive
directory scanning for files to be printed out
* `urlfetcher.UrlFetcherBase`: handles screens that require Internet
connectivity.
* `urlfetcher.SimpleUrlFetcherBase`: similar as `UrlFetcherBase`,
with simpler options (to avoid overhead to build your own argument
parsing, and usage documentation)
* `rssfeed.RSSFeedScreenBase`: handles RSS parsing from Internet.
* `rssfeed.SimpleRSSFeedScreenBase`: similar as `RSSFeedScreenBase`,
with simpler options (to avoid overhead to build your own argument
parsing, and usage documentation)
Build your own screen
=====================
It is very simple to inherit from these base classes and create your own
screen. See some of the examples implemented here already. Basically, you will
need to:
* define a name and description for your screen (class instantiation)
and keep them as short as possible (avoid too much typing)
* if applicable, define your command-line usage guidelines and options
(see `cli_opts`), if appropriate and override `_parse_args` method.
Create your help/usage text by overriding the `_usage_options_example`
method.
* build your action by overriding the `_run_cycle` method, if applicable
(the base class will be triggered by the `autorun` method that loops
indefinitely or until there is a keyboard interruption (ctrl+C).
Before you start, though, I strongly advise you to check out the code here
thoroughly, to avoid reinventing the wheel in parts that are already covered.
Additionally, consistency is important, so try to keep the same concept of how
things are done here... Well, if you have better idea, I am very opened to
adapt (but then instead of making a mess, we would change it all to be still
consistent).
"""
#
# Python built-in modules
#
import os
import getopt
import sys
#
# Internal modules
#
from termsaverlib import common, constants, exception
from termsaverlib.screen.helper import ScreenHelperBase
from termsaverlib.i18n import _
class ScreenBase(ScreenHelperBase):
"""
This is the main screen that all screens must inherit in order to be part
of the screensaver list, accessible with termsaver command-line options.
When inheriting this to your own screen, remember to override the
following methods:
* `_run_cycle`: define here the algorithm to display a text-based
look-alike screensaver. See other classes for example on how to
use this.
* `_usage_options_example`: print out here the options and examples
on how to use your screen. See other classes for examples on how
to use this.
* `_parse_args`: from a properly parsed (using getopt) argument list,
customize the configuration of your screen accordingly
That's all you need to do!
Additionally, you can also call the following helper methods:
* `screen_exit`: if by any reason you need to close the application
(remember, in most cases, you can just rely on throwing exceptions
that are understood by termsaver application, available in
`termsaverlib.exception` module)
* `log` : if you need to write anything on screen before or after a
screen cycle, you can do it in style by calling this method, which
will inform the screen as a prefix to the message being displayed
on screen.
You can also use the following optional property:
* `cleanup_per_cycle`: Defines if the screen should be cleaned up for
every rotation cycle (new file).
IMPORTANT:
All other methods are not to be tempered with!
"""
name = ''
"""
Defines the name of the screen.
"""
description = ''
"""
Defines the description (short) of the screen.
"""
cli_opts = {}
"""
Defines the getopt format command-line options of the screen. It should be
an object in the following structure:
cli_opts = {
'opts': 'h',
'long_opts': ['help',],
}
"""
cleanup_per_cycle = False
"""
Defines if the screen should be cleaned up for every rotation cycle
(new file).
"""
def __init__(self, name, description, cli_opts):
"""
The basic constructor of this class. You need to inform basic
information about your screen:
* `name`: describes the name of the screen (try to keep it short,
and/or abbreviated, as much as possible)
* `description`: a brief (very brief) description of what the screen
does (if you need to write more documentation about
it, you can rely on man docs for that)
* `cli_opts`: the command line options that will be available for
your screen (use getopt formatting)
"""
self.name = name
self.description = description
self.cli_opts = cli_opts
def autorun(self, args, loop=True):
"""
The accessible method for dynamically running a screen.
This method will basically parse the arguments, prepare them with
the method `_parse_args` that is inherited in sub-classes, and with
the property `cli_opts` that holds the formatting of the arguments.
Once all is ready to go, this will call the `_run_cycle` method, which
is filled in the sub-classes with the algorithms to display text on
screen to behave as a screensaver.
The arguments of this method are:
* args: (MANDATORY) the arguments passed when termsaver is executed
from command-line. See `termsaver` script for details.
* loop: (OPTIONAL) defines if termsaver should be executing on an
infinite looping (goes on until the keyboard interrupt
(Ctrl+C) is pressed), or not. This is up to the screen
action (or end-user through configuable setting) to decide.
"""
# prepare values and validate
if not args:
args = ''
if not self.cli_opts \
or 'opts' not in self.cli_opts.keys() \
or not self.cli_opts['opts']:
self.cli_opts['opts'] = ''
if not self.cli_opts['long_opts']:
self.cli_opts['long_opts'] = []
else:
if not type(self.cli_opts['long_opts']) is list or \
[type(i) == str for i in self.cli_opts['long_opts']] \
!= [True for __ in range(len(self.cli_opts['long_opts']))]:
#
# Don't worry too much about errors here. This is supposed to
# help developers while programming screens for this app.
#
raise Exception("Value of 'long_opts' in cli_opts dict MUST "\
"be a list of strings.")
try:
self._parse_args(getopt.getopt(args, self.cli_opts['opts'],
self.cli_opts['long_opts']))
except getopt.GetoptError, e:
raise exception.InvalidOptionException("", str(e))
# execute the cycle
self.clear_screen()
while(loop):
try:
self._run_cycle()
except KeyboardInterrupt, e:
#
# do some cleanup if applicable
#
self._on_keyboard_interrupt()
raise e
# Clear screen if appropriate
if self.cleanup_per_cycle:
self.clear_screen()
def _run_cycle(self):
"""
Executes a cycle of this screen. This base class actually does not hold
any special actions to begin with, but executing it from inheriting
classes is also a good practice, to allow future implementations that
must be taken from a base class.
"""
pass
@staticmethod
def usage_header():
"""
Simply prints a header information, used with the `usage` method.
See also `usage` method for details.
"""
print """%(app_title)s v.%(app_version)s - %(app_description)s.
""" % {
'app_title': constants.App.TITLE,
'app_version': constants.App.VERSION,
'app_description': constants.App.DESCRIPTION,
}
@staticmethod
def usage_footer():
"""
Simply prints a footer information, used with the `usage` method.
See also `usage` method for details.
"""
print """--
See more information about this project at:
%(url)s
Report bugs to authors at:
%(source_url)s
""" % {
'url': constants.App.URL,
'source_url': constants.App.SOURCE_URL,
}
def _usage_options_example(self):
"""
Describe here the options and examples of your screen.
See some examples of already implemented base screens so you can
write similar stuff on your own, and keep consistency.
"""
pass
def usage(self):
"""
Defines the usage information that is presented when a user hits the
help option.You should not directly override this method, instead, just
override the protected method `_usage_options_example`, created for
this purpose. All other stuff will be defined by the `usage_header` and
`usage_footer` methods.
"""
# header
self.usage_header()
print _("""Screen: %(screen)s
Description: %(description)s
Usage: %(app_name)s %(screen)s [options]""") % {
'app_name': constants.App.NAME,
'screen': self.name,
'description': self.description,
}
# any additional info in between (see other classes for reference)
self._usage_options_example()
#footer
self.usage_footer()
def _parse_args(self, prepared_args):
"""
(protected) MUST be overriden in inheriting classes, to deal with
special arguments that will customize values for them.
"""
pass
def screen_exit(self, error=0):
"""
Exits the screen (and finishes the application) with a specific error.
If none is informed, it exits as successful (error 0).
"""
sys.exit(error)
def log(self, text):
"""
Prints a log message on screen in the format:
%(app_name)s.%(screen)s: %(message)s
"""
print "%s.%s: %s" % (constants.App.NAME, self.name, text)
def _on_keyboard_interrupt(self):
"""
Executes extra commands if the keyboard interrupt exception happened
while running a cycle.
"""
pass | apache-2.0 | -7,446,971,719,490,099,000 | 33.994302 | 79 | 0.612522 | false | 4.593119 | false | false | false |
coburnw/hp5334-ivi | local/agilentBase5334.py | 1 | 17724 | """
Python Interchangeable Virtual Instrument Driver
Copyright (c) 2017 Coburn Wightman
derived from agilent436a.py driver by:
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import counter
from .. import vxi11
#import time
# Parameter Values
ChanNameMap = {0 : 'A', 1 : 'B', 2 : 'C'}
Units = set(['Sec', 'Hz', 'Volts'])
Operator = set(['none', 'difference', 'sum', 'quotient'])
RangeType = set(['in_range', 'under_range', 'over_range'])
OperationState = set(['complete', 'in_progress', 'unknown'])
MeasurementFunction = set(['frequency',
'period',
'pulse_width',
'duty_cycle',
'edge_time',
'frequency_ratio',
'time_interval',
'totalize_continuous',
'totalize_gated',
'totalize_timed',
'invalid'])
MeasurementFunctionMap = {'frequency' : 'FN', # fn1, fn2, fn3 is a, b, and c channel
'period': 'FN4',
'time_interval' : 'FN5',
'time_interval_delay' : 'FN6',
'frequency_ratio' : 'FN7',
'total_stop' : 'FN8', # non standard
'total_start' : 'FN9', # non standard
'pulse_width' : 'FN10',
'edge_time' : 'FN11',
#'dc_voltage' : 'FN12',
#'trigger_voltage' : 'FN13',
#'peak_to_peak_voltage' : 'FN14',
#'totalize_timed' : 'x',
#'totalize_gated' : 'xx',
'invalid' : 'inv'}
ErrorMessages = { 0 : 'No error', # to accurately reflect error codes of device, divide by 10
10 : 'Parameter disallowed in present mode',
11 : 'Attenuators controlled by AUTO TRIG',
12 : '50-ohm B, AC B settings preset by COM A',
13 : 'Slope B set by Slope A in Rise/Fall mode',
14 : 'Parameter disallowed in High Speed mode',
15 : 'Calibration data unaccessible in present mode',
20 : 'Invalid key entry',
21 : 'Data outside valid range',
22 : 'Data exceeds maximum resolution',
23 : 'Mantissa digit buffer full',
24 : 'Decimal point previously entered',
30 : 'Multiple key closures',
40 : 'Mnemonic not recognizable',
41 : 'Numeric syntax error',
42 : 'Alpha character expected',
43 : 'Data exceeds valid range',
44 : 'Attention (ATN) asserted in Talk-Only mode',
50 : 'Store instrument setup operation failed', #50.X where x is the register number: 0-9
51 : 'Recall instrument setup operation failed', #51.X
52 : 'HP-IB address cannot be recalled at power up; address default to 03'}
class agilentBase5334(ivi.Driver, counter.Base):
"Agilent HP5334 Series IVI Universal Counter driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilentBase5334, self).__init__(*args, **kwargs)
self._identity_description = "Agilent HP5334 Universal Counter driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 1
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['HP5334A','HP5334B']
self._init_defaults()
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilentBase5334, self)._initialize(resource, id_query, reset, **keywargs)
# configure interface
if self._interface is not None:
self._interface.term_char = '\n'
# interface clear
if not self._driver_operation_simulate:
self._clear()
# verify instrument model matches
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
self._set_cache_valid(False, 'identity_instrument_manufacturer')
self._set_cache_valid(False, 'identity_instrument_model')
self._set_cache_valid(False, 'identity_instrument_firmware_revision')
idstr = "HP5334S"
if not self._driver_operation_simulate:
idstr = self._ask("ID")
if idstr.find('HP') == 0:
self._identity_instrument_manufacturer = 'Agilent'
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._identity_instrument_model = idstr
self._identity_instrument_firmware_revision = 'Cannot query from instrument'
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid('identity_instrument_manufacturer'):
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid('identity_instrument_model'):
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
try:
error_code = self._ask("TE")
error_code = float(error_code) * 10
if error_code < 50 or error_code > 52:
error_message = ErrorMessages[error_code]
elif error_code < 51:
regnum = int((error_code % 50) * 10.01)
error_code = int(error_code)
error_message = ErrorMessages[error_code]
elif error_code < 52:
regnum = int((error_code % 51) * 10.01)
error_code = int(error_code)
error_message = "Register " + str(regnum) + ' ' + ErrorMessages[error_code]
except vxi11.vxi11.Vxi11Exception as err:
error_message = err.msg
error_code = -1
except ValueError:
error_message = "bad error code: " + str(error_code)
error_code = -1
except KeyError:
error_message = "undefined error code: " + str(error_code)
error_code = -1
return (int(error_code), error_message)
def _utility_lock_object(self):
pass
def _utility_unlock_object(self):
pass
def _utility_reset(self):
#if not self._driver_operation_simulate:
self._write("IN")
self._clear()
self.driver_operation.invalidate_all_attributes()
self._init_defaults()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
raise ivi.OperationNotSupportedException()
def _init_defaults(self):
self._measurement_function = 'frequency'
self.driver_operation.invalidate_all_attributes()
self._frequency_aperture = 0.3
self._period_aperture = 0.3
self._time_interval_resolution == 1e-9
def _init_channels(self):
try:
super(agilentBase5334, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_impedance = list()
self._channel_coupling = list()
self._channel_attenuation = list()
self._channel_level = list()
self._channel_hysteresis = list()
self._channel_slope = list()
self._channel_filter_enabled = list()
self._channel_count = 3
for i in range(self._channel_count):
self._channel_name.append(ChanNameMap[i])
self._channel_impedance.append(1e6)
self._channel_coupling.append('dc')
self._channel_attenuation.append(1)
self._channel_level.append(-50)
self._channel_hysteresis.append(0)
self._channel_slope.append('positive')
self._channel_filter_enabled.append(False)
self.channels._set_list(self._channel_name)
# Chan C not settable, override defaults
self._channel_impedance[2] = 50
self._channel_coupling[2] = 'ac'
def _get_channel_impedance(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_impedance[index]
def _set_channel_impedance(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
value = float(value)
#if not self._driver_operation_simulate:
if value > 99:
self._write(ChanNameMap[index] + "Z0") # set to 1meg
self._channel_impedance[index] = 1e6
else:
self._write(ChanNameMap[index] + "Z1") # set to 50ohm
self._channel_impedance[index] = 50
def _get_channel_coupling(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_coupling[index]
def _set_channel_coupling(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
if value not in counter.Coupling:
raise ivi.ValueNotSupportedException()
if value == "ac":
self._write(ChanNameMap[index] + "A1") # ac
else:
self._write(ChanNameMap[index] + "A0") # dc
self._channel_coupling[index] = value
def _get_channel_attenuation(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_attenuation[index]
def _set_channel_attenuation(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
value = float(value)
if value == 1:
self._write(ChanNameMap[index] + "X0") # x1
elif value == 10:
self._write(ChanNameMap[index] + "X1") # x10
else:
raise ivi.ValueNotSupportedException("attenuation must be '1' or '10'")
self._channel_attenuation[index] = value
def _get_channel_level(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_level[index]
def _set_channel_level(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
value = float(value)
max_atten = 10
if value > 4.999 * max_atten:
# set instrument to manual trigger (front panel knobs)
self._write('AU0')
elif value < -4.999 * max_atten:
# set instrument to automatic trigger
self._write('AU1')
elif self._get_identity_instrument_model() == 'HP5334A':
# set A instrument trigger dac values
self._write(ChanNameMap[index] + "T" + value)
else:
# B instrument has no dac. ignore for now.
pass
self._channel_level[index] = value
def _get_channel_hysteresis(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_level[index]
def _set_channel_hysteresis(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
self._channel_hysteresis[index] = value
def _get_channel_slope(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_slope[index]
def _set_channel_slope(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
if value not in counter.Slope:
raise ivi.ValueNotSupportedException()
if value == "positive":
self._write(ChanNameMap[index] + "S0") # positive
else:
self._write(ChanNameMap[index] + "S1") # negative
self._channel_slope[index] = value
def _get_channel_filter_enabled(self, index):
index = ivi.get_index(self._channel_name, index)
if index != 0:
raise ivi.ValueNotSupportedException()
return self._channel_filter_enabled[index]
def _set_channel_filter_enabled(self, index, value):
if index != 0:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
if value == True:
self._write("FI1") # 100khz filter on (a channel only)
else:
self._write("FI0") # filter off.
self._channel_filter_enabled[index] = value
# totalize
def _totalize_continuous_configure(self, channel):
if channel != 0:
raise ivi.SelectorNameException()
else:
self._totalize_continuous.channel = channel
def _totalize_continuous_fetch_count(self):
return self._measurement_fetch()
def _totalize_continuous_start(self):
cmd = 'RE FN9'
self._write(cmd)
def _totalize_continuous_stop(self):
cmd = 'FN8'
self._write(cmd)
# measurement
def _set_measurement_function(self, value): # override to limit functionality
if value not in MeasurementFunction:
raise ivi.ValueNotSupportedException()
self._measurement_function = value
def _measurement_is_measurement_complete(self): # counter.py version of get_state?
return True
def _measurement_abort(self):
self._write("RE")
#self._clear()
def _measurement_fetch(self):
val = self._read()
if val[0] == 'O':
return float("inf")
f = float(val[1:19])
return f
def _measurement_initiate(self):
if self._measurement_function == 'frequency' :
func = MeasurementFunctionMap[self._measurement_function] + repr(self._frequency_channel + 1)
gate = 'GA' + str(self._frequency_aperture_time)
cmd = func + gate
elif self._measurement_function == 'period' :
func = MeasurementFunctionMap[self._measurement_function]
gate = 'GA' + str(self._period_aperture_time)
cmd = func + gate
elif self._measurement_function == 'time_interval' :
func = MeasurementFunctionMap[self._measurement_function]
if self._time_interval_resolution == 1e-10:
gate = 'GV1'
else:
gate = 'GV0'
cmd = func + gate
elif self._measurement_function == 'frequency_ratio' :
cmd = MeasurementFunctionMap[self._measurement_function]
elif self._measurement_function == 'invalid' :
cmd = MeasurementFunctionMap['invalid']
self._write(cmd)
def _measurement_read(self, maximum_time):
self._measurement_initiate()
return self._measurement_fetch()
| mit | 4,786,143,389,363,991,000 | 35.695652 | 114 | 0.572726 | false | 4.128581 | false | false | false |
tudarmstadt-lt/taxi | graph_pruning/methods/m_mfas.py | 1 | 6788 | import networkx as nx
import numpy as np
import methods.util.write_graph as write_graph
import methods.util.util as util
g = nx.DiGraph()
def prepare(line):
g.add_edge(line[1], line[2])
def do(filename_out, delimiter, mode, gephi_out):
edges_to_be_removed = remove_cycle_edges_by_mfas()
cycles_removed = util.remove_edges_from_network_graph(g, edges_to_be_removed)
write_graph.network_graph(filename_out, g, gephi_out=gephi_out, delimiter=delimiter)
return cycles_removed
def pick_from_dict(d, order="max"):
min_k, min_v = 0, 10000
min_items = []
max_k, max_v = 0, -10000
max_items = []
for k, v in d.iteritems():
if v > max_v:
max_v = v
max_items = [(k, max_v)]
elif v == max_v:
max_items.append((k, v))
if v < min_v:
min_v = v
min_items = [(k, min_v)]
elif v == min_v:
min_items.append((k, v))
max_k, max_v = pick_randomly(max_items)
min_k, min_v = pick_randomly(min_items)
if order == "max":
return max_k, max_v
if order == "min":
return min_k, min_v
else:
return max_k, max_v, min_k, min_v
def pick_randomly(source):
np.random.shuffle(source)
np.random.shuffle(source)
np.random.shuffle(source)
return source[0]
def filter_big_scc(g, edges_to_be_removed):
# Given a graph g and edges to be removed
# Return a list of big scc subgraphs (# of nodes >= 2)
g.remove_edges_from(edges_to_be_removed)
sub_graphs = filter(lambda scc: scc.number_of_nodes() >= 2, nx.strongly_connected_component_subgraphs(g))
return sub_graphs
def get_big_sccs(g):
self_loop_edges = g.selfloop_edges()
g.remove_edges_from(g.selfloop_edges())
num_big_sccs = 0
edges_to_be_removed = []
big_sccs = []
for sub in nx.strongly_connected_component_subgraphs(g):
number_of_nodes = sub.number_of_nodes()
if number_of_nodes >= 2:
# strongly connected components
num_big_sccs += 1
big_sccs.append(sub)
# print(" # big sccs: %d" % (num_big_sccs))
return big_sccs
def nodes_in_scc(sccs):
scc_nodes = []
scc_edges = []
for scc in sccs:
scc_nodes += list(scc.nodes())
scc_edges += list(scc.edges())
# print("# nodes in big sccs: %d" % len(scc_nodes))
# print("# edges in big sccs: %d" % len(scc_edges))
return scc_nodes
def scc_nodes_edges(g):
scc_nodes = set()
scc_edges = set()
num_big_sccs = 0
num_nodes_biggest_scc = 0
biggest_scc = None
for sub in nx.strongly_connected_component_subgraphs(g):
number_nodes = sub.number_of_nodes()
if number_nodes >= 2:
scc_nodes.update(sub.nodes())
scc_edges.update(sub.edges())
num_big_sccs += 1
if num_nodes_biggest_scc < number_nodes:
num_nodes_biggest_scc = number_nodes
biggest_scc = sub
nonscc_nodes = set(g.nodes()) - scc_nodes
nonscc_edges = set(g.edges()) - scc_edges
print("num nodes biggest scc: %d" % num_nodes_biggest_scc)
print("num of big sccs: %d" % num_big_sccs)
if biggest_scc == None:
return scc_nodes, scc_nodes, nonscc_nodes, nonscc_edges
print("# nodes in biggest scc: %d, # edges in biggest scc: %d" % (
biggest_scc.number_of_nodes(), biggest_scc.number_of_edges()))
print("# nodes,edges in scc: (%d,%d), # nodes, edges in non-scc: (%d,%d) " % (
len(scc_nodes), len(scc_edges), len(nonscc_nodes), len(nonscc_edges)))
num_of_nodes = g.number_of_nodes()
num_of_edges = g.number_of_edges()
print(
"# nodes in graph: %d, # of edges in graph: %d, percentage nodes, edges in scc: (%0.4f,%0.4f), percentage nodes, edges in non-scc: (%0.4f,%0.4f)" % (
num_of_nodes, num_of_edges, len(scc_nodes) * 1.0 / num_of_nodes, len(scc_edges) * 1.0 / num_of_edges,
len(nonscc_nodes) * 1.0 / num_of_nodes, len(nonscc_edges) * 1.0 / num_of_edges))
return scc_nodes, scc_edges, nonscc_nodes, nonscc_edges
def get_nodes_degree_dict(g, nodes):
# get nodes degree dict: key = node, value = (max(d(in)/d(out),d(out)/d(in),"in" or "out")
in_degrees = g.in_degree(nodes)
out_degrees = g.out_degree(nodes)
degree_dict = {}
for node in nodes:
in_d = in_degrees[node]
out_d = out_degrees[node]
if in_d >= out_d:
try:
value = in_d * 1.0 / out_d
except Exception as e:
value = 0
f = "in"
else:
try:
value = out_d * 1.0 / in_d
except Exception as e:
value = 0
f = "out"
degree_dict[node] = (value, f)
# print("node: %d: %s" % (node,degree_dict[node]))
return degree_dict
def greedy_local_heuristic(sccs, degree_dict, edges_to_be_removed):
while True:
graph = sccs.pop()
temp_nodes_degree_dict = {}
for node in graph.nodes():
temp_nodes_degree_dict[node] = degree_dict[node][0]
max_node, _ = pick_from_dict(temp_nodes_degree_dict)
max_value = degree_dict[max_node]
# degrees = [(node,degree_dict[node]) for node in list(graph.nodes())]
# max_node,max_value = max(degrees,key = lambda x: x[1][0])
if max_value[1] == "in":
# indegree > outdegree, remove out-edges
edges = [(max_node, o) for o in graph.neighbors(max_node)]
else:
# outdegree > indegree, remove in-edges
edges = [(i, max_node) for i in graph.predecessors(max_node)]
edges_to_be_removed += edges
sub_graphs = filter_big_scc(graph, edges_to_be_removed)
if sub_graphs:
for index, sub in enumerate(sub_graphs):
sccs.append(sub)
if not sccs:
return
def remove_self_loops_from_graph(g):
self_loops = list(g.selfloop_edges())
g.remove_edges_from(self_loops)
return self_loops
def remove_cycle_edges_by_mfas():
self_loops = remove_self_loops_from_graph(g)
scc_nodes, _, _, _ = scc_nodes_edges(g)
degree_dict = get_nodes_degree_dict(g, scc_nodes)
sccs = get_big_sccs(g)
if len(sccs) == 0:
print("After removal of self loop edgs: %s" % nx.is_directed_acyclic_graph(g))
return self_loops
edges_to_be_removed = []
import timeit
t1 = timeit.default_timer()
greedy_local_heuristic(sccs, degree_dict, edges_to_be_removed)
t2 = timeit.default_timer()
print("mfas time usage: %0.4f s" % (t2 - t1))
edges_to_be_removed = list(set(edges_to_be_removed))
# g.remove_edges_from(edges_to_be_removed)
edges_to_be_removed += self_loops
return edges_to_be_removed
| apache-2.0 | 6,720,313,174,056,933,000 | 31.792271 | 165 | 0.580141 | false | 2.982425 | false | false | false |
Bladrak/thumbor | thumbor/handlers/upload.py | 6 | 2585 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import uuid
import mimetypes
from thumbor.handlers import ImageApiHandler
from thumbor.engines import BaseEngine
##
# Handler to upload images.
# This handler support only POST method, but images can be uploaded :
# - through multipart/form-data (designed for forms)
# - or with the image content in the request body (rest style)
##
class ImageUploadHandler(ImageApiHandler):
def post(self):
# Check if the image uploaded is a multipart/form-data
if self.multipart_form_data():
file_data = self.request.files['media'][0]
body = file_data['body']
# Retrieve filename from 'filename' field
filename = file_data['filename']
else:
body = self.request.body
# Retrieve filename from 'Slug' header
filename = self.request.headers.get('Slug')
# Check if the image uploaded is valid
if self.validate(body):
# Use the default filename for the uploaded images
if not filename:
content_type = self.request.headers.get('Content-Type', BaseEngine.get_mimetype(body))
extension = mimetypes.guess_extension(content_type.split(';', 1)[0], False)
if extension is None: # Content-Type is unknown, try with body
extension = mimetypes.guess_extension(BaseEngine.get_mimetype(body), False)
if extension == '.jpe':
extension = '.jpg' # Hack because mimetypes return .jpe by default
if extension is None: # Even body is unknown, return an empty string to be contat
extension = ''
filename = self.context.config.UPLOAD_DEFAULT_FILENAME + extension
# Build image id based on a random uuid (32 characters)
image_id = str(uuid.uuid4().hex)
self.write_file(image_id, body)
self.set_status(201)
self.set_header('Location', self.location(image_id, filename))
def multipart_form_data(self):
if 'media' not in self.request.files or not self.request.files['media']:
return False
else:
return True
def location(self, image_id, filename):
base_uri = self.request.uri
return '%s/%s/%s' % (base_uri, image_id, filename)
| mit | -7,515,603,351,654,393,000 | 36.463768 | 102 | 0.622824 | false | 4.162641 | false | false | false |
CG-F16-24-Rutgers/steersuite-rutgers | steerstats/tools/gameobjects/gametime.py | 8 | 4870 |
class GameClock(object):
"""Manages time in a game."""
def __init__(self, game_ticks_per_second=20):
"""Create a Game Clock object.
game_ticks_per_second -- The number of logic frames a second.
"""
self.game_ticks_per_second = float(game_ticks_per_second)
self.game_tick = 1. / self.game_ticks_per_second
self.speed = 1.
self.clock_time = 0.
self.virtual_time = 0.
self.game_time = 0.
self.game_frame_count = 0
self.real_time_passed = 0.
self.real_time = self.get_real_time()
self.started = False
self.paused = False
self.between_frame = 0.0
self.fps_sample_start_time = 0.0
self.fps_sample_count = 0
self.average_fps = 0
def start(self):
"""Starts the Game Clock. Must be called once."""
if self.started:
return
self.clock_time = 0.
self.virtual_time = 0.
self.game_time = 0.
self.game_frame_count = 0
self.real_time_passed = 0.
self.real_time = self.get_real_time()
self.started = True
self.fps = 0.0
self.fps_sample_start_time = self.real_time
self.fps_sample_count = 0
def set_speed(self, speed):
"""Sets the speed of the clock.
speed -- A time factor (1 is normal speed, 2 is twice normal)
"""
assert isinstance(speed, float), "Must be a float"
if speed < 0.0:
raise ValueError("Negative speeds not supported")
self.speed = speed
def pause(self):
"""Pauses the Game Clock."""
self.pause = True
def unpause(self):
"""Un-pauses the Game Clock."""
self.pause = False
def get_real_time(self):
"""Returns the real time, as reported by the system clock.
This method may be overriden."""
import time
return time.clock()
def get_fps(self):
"""Retrieves the current frames per second as a tuple containing
the fps and average fps over a second."""
return self.fps, self.average_fps
def get_between_frame(self):
"""Returns the interpolant between the previous game tick and the
next game tick."""
return self.between_frame
def update(self, max_updates = 0):
"""Advances time, must be called once per frame. Yields tuples of
game frame count and game time.
max_updates -- Maximum number of game time updates to issue.
"""
assert self.started, "You must call 'start' before using a GameClock."
real_time_now = self.get_real_time()
self.real_time_passed = real_time_now - self.real_time
self.real_time = real_time_now
self.clock_time += self.real_time_passed
if not self.paused:
self.virtual_time += self.real_time_passed * self.speed
update_count = 0
while self.game_time + self.game_tick < self.virtual_time:
self.game_frame_count += 1
self.game_time = self.game_frame_count * self.game_tick
yield (self.game_frame_count, self.game_time)
if max_updates and update_count == max_updates:
break
self.between_frame = ( self.virtual_time - self.game_time ) / self.game_tick
if self.real_time_passed != 0:
self.fps = 1.0 / self.real_time_passed
else:
self.fps = 0.0
self.fps_sample_count += 1
if self.real_time - self.fps_sample_start_time > 1.0:
self.average_fps = self.fps_sample_count / (self.real_time - self.fps_sample_start_time)
self.fps_sample_start_time = self.real_time
self.fps_sample_count = 0
if __name__ == "__main__":
import time
t = GameClock(20) # AI is 20 frames per second
t.start()
while t.virtual_time < 2.0:
for (frame_count, game_time) in t.update():
print "Game frame #%i, %2.4f" % (frame_count, game_time)
virtual_time = t.virtual_time
print "\t%2.2f%% between game frame, time is %2.4f"%(t.between_frame*100., virtual_time)
time.sleep(0.2) # Simulate time to render frame
| gpl-3.0 | 8,472,696,090,217,853,000 | 27.150289 | 100 | 0.501232 | false | 4.21645 | false | false | false |
CauldronDevelopmentLLC/buildbot | buildbot/status/mail.py | 1 | 21677 | # -*- test-case-name: buildbot.test.test_status -*-
# the email.MIMEMultipart module is only available in python-2.2.2 and later
import re
from email.Message import Message
from email.Utils import formatdate
from email.MIMEText import MIMEText
try:
from email.MIMEMultipart import MIMEMultipart
canDoAttachments = True
except ImportError:
canDoAttachments = False
import urllib
from zope.interface import implements
from twisted.internet import defer
from twisted.python import log as twlog
from buildbot import interfaces, util
from buildbot.status import base
from buildbot.status.builder import FAILURE, SUCCESS, WARNINGS, Results
VALID_EMAIL = re.compile("[a-zA-Z0-9\.\_\%\-\+]+@[a-zA-Z0-9\.\_\%\-]+.[a-zA-Z]{2,6}")
def message(attrs):
"""Generate a buildbot mail message and return a tuple of message text
and type.
This function can be replaced using the customMesg variable in MailNotifier.
A message function will *always* get a dictionary of attributes with
the following values:
builderName - (str) Name of the builder that generated this event.
projectName - (str) Name of the project.
mode - (str) Mode set in MailNotifier. (failing, passing, problem).
result - (str) Builder result as a string. 'success', 'warnings',
'failure', 'skipped', or 'exception'
buildURL - (str) URL to build page.
buildbotURL - (str) URL to buildbot main page.
buildText - (str) Build text from build.getText().
slavename - (str) Slavename.
reason - (str) Build reason from build.getReason().
responsibleUsers - (List of str) List of responsible users.
branch - (str) Name of branch used. If no SourceStamp exists branch
is an empty string.
revision - (str) Name of revision used. If no SourceStamp exists revision
is an empty string.
patch - (str) Name of patch used. If no SourceStamp exists patch
is an empty string.
changes - (list of objs) List of change objects from SourceStamp. A change
object has the following useful information:
who - who made this change
revision - what VC revision is this change
branch - on what branch did this change occur
when - when did this change occur
files - what files were affected in this change
comments - comments reguarding the change.
The functions asText and asHTML return a list of strings with
the above information formatted.
logs - (List of Tuples) List of tuples that contain the log name, log url
and log contents as a list of strings.
"""
text = ""
if attrs['mode'] == "all":
text += "The Buildbot has finished a build"
elif attrs['mode'] == "failing":
text += "The Buildbot has detected a failed build"
elif attrs['mode'] == "passing":
text += "The Buildbot has detected a passing build"
else:
text += "The Buildbot has detected a new failure"
text += " of %s on %s.\n" % (attrs['builderName'], attrs['projectName'])
if attrs['buildURL']:
text += "Full details are available at:\n %s\n" % attrs['buildURL']
text += "\n"
if attrs['buildbotURL']:
text += "Buildbot URL: %s\n\n" % urllib.quote(attrs['buildbotURL'], '/:')
text += "Buildslave for this Build: %s\n\n" % attrs['slavename']
text += "Build Reason: %s\n" % attrs['reason']
#
# No source stamp
#
if attrs['branch']:
source = "unavailable"
else:
source = ""
if attrs['branch']:
source += "[branch %s] " % attrs['branch']
if attrs['revision']:
source += attrs['revision']
else:
source += "HEAD"
if attrs['patch']:
source += " (plus patch)"
text += "Build Source Stamp: %s\n" % source
text += "Blamelist: %s\n" % ",".join(attrs['responsibleUsers'])
text += "\n"
t = attrs['buildText']
if t:
t = ": " + " ".join(t)
else:
t = ""
if attrs['result'] == 'success':
text += "Build succeeded!\n"
elif attrs['result'] == 'warnings':
text += "Build Had Warnings%s\n" % t
else:
text += "BUILD FAILED%s\n" % t
text += "\n"
text += "sincerely,\n"
text += " -The Buildbot\n"
text += "\n"
return (text, 'plain')
class Domain(util.ComparableMixin):
implements(interfaces.IEmailLookup)
compare_attrs = ["domain"]
def __init__(self, domain):
assert "@" not in domain
self.domain = domain
def getAddress(self, name):
"""If name is already an email address, pass it through."""
if '@' in name:
return name
return name + "@" + self.domain
class MailNotifier(base.StatusReceiverMultiService):
"""This is a status notifier which sends email to a list of recipients
upon the completion of each build. It can be configured to only send out
mail for certain builds, and only send messages when the build fails, or
when it transitions from success to failure. It can also be configured to
include various build logs in each message.
By default, the message will be sent to the Interested Users list, which
includes all developers who made changes in the build. You can add
additional recipients with the extraRecipients argument.
To get a simple one-message-per-build (say, for a mailing list), use
sendToInterestedUsers=False, extraRecipients=['listaddr@example.org']
Each MailNotifier sends mail to a single set of recipients. To send
different kinds of mail to different recipients, use multiple
MailNotifiers.
"""
implements(interfaces.IEmailSender)
compare_attrs = ["extraRecipients", "lookup", "fromaddr", "mode",
"categories", "builders", "addLogs", "relayhost",
"subject", "sendToInterestedUsers", "customMesg"]
def __init__(self, fromaddr, mode="all", categories=None, builders=None,
addLogs=False, relayhost="localhost",
subject="buildbot %(result)s in %(projectName)s on %(builder)s",
lookup=None, extraRecipients=[],
sendToInterestedUsers=True, customMesg=message):
"""
@type fromaddr: string
@param fromaddr: the email address to be used in the 'From' header.
@type sendToInterestedUsers: boolean
@param sendToInterestedUsers: if True (the default), send mail to all
of the Interested Users. If False, only
send mail to the extraRecipients list.
@type extraRecipients: tuple of string
@param extraRecipients: a list of email addresses to which messages
should be sent (in addition to the
InterestedUsers list, which includes any
developers who made Changes that went into this
build). It is a good idea to create a small
mailing list and deliver to that, then let
subscribers come and go as they please.
@type subject: string
@param subject: a string to be used as the subject line of the message.
%(builder)s will be replaced with the name of the
builder which provoked the message.
@type mode: string (defaults to all)
@param mode: one of:
- 'all': send mail about all builds, passing and failing
- 'failing': only send mail about builds which fail
- 'passing': only send mail about builds which succeed
- 'problem': only send mail about a build which failed
when the previous build passed
@type builders: list of strings
@param builders: a list of builder names for which mail should be
sent. Defaults to None (send mail for all builds).
Use either builders or categories, but not both.
@type categories: list of strings
@param categories: a list of category names to serve status
information for. Defaults to None (all
categories). Use either builders or categories,
but not both.
@type addLogs: boolean.
@param addLogs: if True, include all build logs as attachments to the
messages. These can be quite large. This can also be
set to a list of log names, to send a subset of the
logs. Defaults to False.
@type relayhost: string
@param relayhost: the host to which the outbound SMTP connection
should be made. Defaults to 'localhost'
@type lookup: implementor of {IEmailLookup}
@param lookup: object which provides IEmailLookup, which is
responsible for mapping User names (which come from
the VC system) into valid email addresses. If not
provided, the notifier will only be able to send mail
to the addresses in the extraRecipients list. Most of
the time you can use a simple Domain instance. As a
shortcut, you can pass as string: this will be
treated as if you had provided Domain(str). For
example, lookup='twistedmatrix.com' will allow mail
to be sent to all developers whose SVN usernames
match their twistedmatrix.com account names.
@type customMesg: func
@param customMesg: A function that returns a tuple containing the text of
a custom message and its type. This function takes
the dict attrs which has the following values:
builderName - (str) Name of the builder that generated this event.
projectName - (str) Name of the project.
mode - (str) Mode set in MailNotifier. (failing, passing, problem).
result - (str) Builder result as a string. 'success', 'warnings',
'failure', 'skipped', or 'exception'
buildURL - (str) URL to build page.
buildbotURL - (str) URL to buildbot main page.
buildText - (str) Build text from build.getText().
slavename - (str) Slavename.
reason - (str) Build reason from build.getReason().
responsibleUsers - (List of str) List of responsible users.
branch - (str) Name of branch used. If no SourceStamp exists branch
is an empty string.
revision - (str) Name of revision used. If no SourceStamp exists revision
is an empty string.
patch - (str) Name of patch used. If no SourceStamp exists patch
is an empty string.
changes - (list of objs) List of change objects from SourceStamp. A change
object has the following useful information:
who - who made this change
revision - what VC revision is this change
branch - on what branch did this change occur
when - when did this change occur
files - what files were affected in this change
comments - comments reguarding the change.
The functions asText and asHTML return a list of strings with
the above information formatted.
logs - (List of Tuples) List of tuples that contain the log name, log url,
and log contents as a list of strings.
"""
base.StatusReceiverMultiService.__init__(self)
assert isinstance(extraRecipients, (list, tuple))
for r in extraRecipients:
assert isinstance(r, str)
assert VALID_EMAIL.search(r) # require full email addresses, not User names
self.extraRecipients = extraRecipients
self.sendToInterestedUsers = sendToInterestedUsers
self.fromaddr = fromaddr
assert mode in ('all', 'failing', 'problem')
self.mode = mode
self.categories = categories
self.builders = builders
self.addLogs = addLogs
self.relayhost = relayhost
self.subject = subject
if lookup is not None:
if type(lookup) is str:
lookup = Domain(lookup)
assert interfaces.IEmailLookup.providedBy(lookup)
self.lookup = lookup
self.customMesg = customMesg
self.watched = []
self.status = None
# you should either limit on builders or categories, not both
if self.builders != None and self.categories != None:
twlog.err("Please specify only builders to ignore or categories to include")
raise # FIXME: the asserts above do not raise some Exception either
def setServiceParent(self, parent):
"""
@type parent: L{buildbot.master.BuildMaster}
"""
base.StatusReceiverMultiService.setServiceParent(self, parent)
self.setup()
def setup(self):
self.status = self.parent.getStatus()
self.status.subscribe(self)
def disownServiceParent(self):
self.status.unsubscribe(self)
for w in self.watched:
w.unsubscribe(self)
return base.StatusReceiverMultiService.disownServiceParent(self)
def builderAdded(self, name, builder):
# only subscribe to builders we are interested in
if self.categories != None and builder.category not in self.categories:
return None
self.watched.append(builder)
return self # subscribe to this builder
def builderRemoved(self, name):
pass
def builderChangedState(self, name, state):
pass
def buildStarted(self, name, build):
pass
def buildFinished(self, name, build, results):
# here is where we actually do something.
builder = build.getBuilder()
if self.builders is not None and name not in self.builders:
return # ignore this build
if self.categories is not None and \
builder.category not in self.categories:
return # ignore this build
if self.mode == "failing" and results != FAILURE:
return
if self.mode == "passing" and results != SUCCESS:
return
if self.mode == "problem":
if results != FAILURE:
return
prev = build.getPreviousBuild()
if prev and prev.getResults() == FAILURE:
return
# for testing purposes, buildMessage returns a Deferred that fires
# when the mail has been sent. To help unit tests, we return that
# Deferred here even though the normal IStatusReceiver.buildFinished
# signature doesn't do anything with it. If that changes (if
# .buildFinished's return value becomes significant), we need to
# rearrange this.
return self.buildMessage(name, build, results)
def buildMessage(self, name, build, results):
#
# logs is a list of tuples that contain the log
# name, log url, and the log contents as a list of strings.
#
logs = list()
for log in build.getLogs():
stepName = log.getStep().getName()
logName = log.getName()
logs.append(('%s.%s' % (stepName, logName),
'%s/steps/%s/logs/%s' % (self.status.getURLForThing(build), stepName, logName),
log.getText().splitlines()))
attrs = {'builderName': name,
'projectName': self.status.getProjectName(),
'mode': self.mode,
'result': Results[results],
'buildURL': self.status.getURLForThing(build),
'buildbotURL': self.status.getBuildbotURL(),
'buildText': build.getText(),
'slavename': build.getSlavename(),
'reason': build.getReason(),
'responsibleUsers': build.getResponsibleUsers(),
'branch': "",
'revision': "",
'patch': "",
'changes': [],
'logs': logs}
ss = build.getSourceStamp()
if ss:
attrs['branch'] = ss.branch
attrs['revision'] = ss.revision
attrs['patch'] = ss.patch
attrs['changes'] = ss.changes[:]
text, type = self.customMesg(attrs)
assert type in ('plain', 'html'), "'%s' message type must be 'plain' or 'html'." % type
haveAttachments = False
if attrs['patch'] or self.addLogs:
haveAttachments = True
if not canDoAttachments:
twlog.msg("warning: I want to send mail with attachments, "
"but this python is too old to have "
"email.MIMEMultipart . Please upgrade to python-2.3 "
"or newer to enable addLogs=True")
if haveAttachments and canDoAttachments:
m = MIMEMultipart()
m.attach(MIMEText(text, type))
else:
m = Message()
m.set_payload(text)
m.set_type("text/%s" % type)
m['Date'] = formatdate(localtime=True)
m['Subject'] = self.subject % { 'result': attrs['result'],
'projectName': attrs['projectName'],
'builder': attrs['builderName'],
}
m['From'] = self.fromaddr
# m['To'] is added later
if attrs['patch']:
a = MIMEText(attrs['patch'][1])
a.add_header('Content-Disposition', "attachment",
filename="source patch")
m.attach(a)
if self.addLogs:
for log in build.getLogs():
name = "%s.%s" % (log.getStep().getName(),
log.getName())
if self._shouldAttachLog(log.getName()) or self._shouldAttachLog(name):
a = MIMEText(log.getText())
a.add_header('Content-Disposition', "attachment",
filename=name)
m.attach(a)
# now, who is this message going to?
dl = []
recipients = []
if self.sendToInterestedUsers and self.lookup:
for u in build.getInterestedUsers():
d = defer.maybeDeferred(self.lookup.getAddress, u)
d.addCallback(recipients.append)
dl.append(d)
d = defer.DeferredList(dl)
d.addCallback(self._gotRecipients, recipients, m)
return d
def _shouldAttachLog(self, logname):
if type(self.addLogs) is bool:
return self.addLogs
return logname in self.addLogs
def _gotRecipients(self, res, rlist, m):
recipients = set()
for r in rlist:
if r is None: # getAddress didn't like this address
continue
# Git can give emails like 'User' <user@foo.com>@foo.com so check
# for two @ and chop the last
if r.count('@') > 1:
r = r[:r.rindex('@')]
if VALID_EMAIL.search(r):
recipients.add(r)
else:
twlog.msg("INVALID EMAIL: %r" + r)
# if we're sending to interested users move the extra's to the CC
# list so they can tell if they are also interested in the change
# unless there are no interested users
if self.sendToInterestedUsers and len(recipients):
m['CC'] = ", ".join(sorted(self.extraRecipients[:]))
else:
[recipients.add(r) for r in self.extraRecipients[:]]
m['To'] = ", ".join(sorted(recipients))
# The extras weren't part of the TO list so add them now
if self.sendToInterestedUsers:
for r in self.extraRecipients:
recipients.add(r)
return self.sendMessage(m, list(recipients))
def sendMessage(self, m, recipients):
from twisted.mail.smtp import sendmail
s = m.as_string()
twlog.msg("sending mail (%d bytes) to" % len(s), recipients)
return sendmail(self.relayhost, self.fromaddr, recipients, s)
| gpl-2.0 | -8,253,264,787,955,298,000 | 40.368321 | 104 | 0.555058 | false | 4.723687 | false | false | false |
sql-analytics/openvstorage | ovs/dal/dataobject.py | 1 | 37746 | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DataObject module
"""
import uuid
import copy
import re
import json
import inspect
from ovs.dal.exceptions import (ObjectNotFoundException, ConcurrencyException, LinkedObjectException,
MissingMandatoryFieldsException, SaveRaceConditionException, InvalidRelationException,
VolatileObjectException)
from ovs.dal.helpers import Descriptor, Toolbox, HybridRunner
from ovs.dal.relations import RelationMapper
from ovs.dal.dataobjectlist import DataObjectList
from ovs.dal.datalist import DataList
from ovs.extensions.generic.volatilemutex import VolatileMutex
from ovs.extensions.storage.exceptions import KeyNotFoundException
from ovs.extensions.storage.persistentfactory import PersistentFactory
from ovs.extensions.storage.volatilefactory import VolatileFactory
class MetaClass(type):
"""
This metaclass provides dynamic __doc__ generation feeding doc generators
"""
def __new__(mcs, name, bases, dct):
"""
Overrides instance creation of all DataObject instances
"""
if name != 'DataObject':
for internal in ['_properties', '_relations', '_dynamics']:
data = set()
for base in bases:
if hasattr(base, internal):
data.update(getattr(base, internal))
if '_{0}_{1}'.format(name, internal) in dct:
data.update(dct.pop('_{0}_{1}'.format(name, internal)))
dct[internal] = list(data)
for prop in dct['_properties']:
docstring = prop.docstring
if isinstance(prop.property_type, type):
itemtype = prop.property_type.__name__
extra_info = ''
else:
itemtype = 'Enum({0})'.format(prop.property_type[0].__class__.__name__)
extra_info = '(enum values: {0})'.format(', '.join(prop.property_type))
dct[prop.name] = property(
doc='[persistent] {0} {1}\n@type: {2}'.format(docstring, extra_info, itemtype)
)
for relation in dct['_relations']:
itemtype = relation.foreign_type.__name__ if relation.foreign_type is not None else name
dct[relation.name] = property(
doc='[relation] one-to-{0} relation with {1}.{2}\n@type: {3}'.format(
'one' if relation.onetoone else 'many',
itemtype,
relation.foreign_key,
itemtype
)
)
for dynamic in dct['_dynamics']:
if bases[0].__name__ == 'DataObject':
if '_{0}'.format(dynamic.name) not in dct:
raise LookupError('Dynamic property {0} in {1} could not be resolved'.format(dynamic.name, name))
method = dct['_{0}'.format(dynamic.name)]
else:
methods = [getattr(base, '_{0}'.format(dynamic.name)) for base in bases if hasattr(base, '_{0}'.format(dynamic.name))]
if len(methods) == 0:
raise LookupError('Dynamic property {0} in {1} could not be resolved'.format(dynamic.name, name))
method = [0]
docstring = method.__doc__.strip()
if isinstance(dynamic.return_type, type):
itemtype = dynamic.return_type.__name__
extra_info = ''
else:
itemtype = 'Enum({0})'.format(dynamic.return_type[0].__class__.__name__)
extra_info = '(enum values: {0})'.format(', '.join(dynamic.return_type))
dct[dynamic.name] = property(
fget=method,
doc='[dynamic] ({0}s) {1} {2}\n@rtype: {3}'.format(dynamic.timeout, docstring, extra_info, itemtype)
)
return super(MetaClass, mcs).__new__(mcs, name, bases, dct)
class DataObject(object):
"""
This base class contains all logic to support our multiple backends and the caching
- Storage backends:
- Persistent backend for persistent storage (key-value store)
- Volatile backend for volatile but fast storage (key-value store)
- Storage backends are abstracted and injected into this class, making it possible to use
fake backends
- Features:
- Hybrid property access:
- Persistent backend
- 3rd party component for "live" properties
- Individual cache settings for "live" properties
- 1-n relations with automatic property propagation
- Recursive save
"""
__metaclass__ = MetaClass
#######################
# Attributes
#######################
# Properties that needs to be overwritten by implementation
_properties = [] # Blueprint data of the objec type
_dynamics = [] # Timeout of readonly object properties cache
_relations = [] # Blueprint for relations
#######################
## Constructor
#######################
def __new__(cls, *args, **kwargs):
"""
Initializes the class
"""
hybrid_structure = HybridRunner.get_hybrids()
identifier = Descriptor(cls).descriptor['identifier']
if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
new_class = Descriptor().load(hybrid_structure[identifier]).get_object()
return super(cls, new_class).__new__(new_class, *args, **kwargs)
return super(DataObject, cls).__new__(cls)
def __init__(self, guid=None, data=None, datastore_wins=False, volatile=False):
"""
Loads an object with a given guid. If no guid is given, a new object
is generated with a new guid.
* guid: The guid indicating which object should be loaded
* datastoreWins: Optional boolean indicating save conflict resolve management.
** True: when saving, external modified fields will not be saved
** False: when saving, all changed data will be saved, regardless of external updates
** None: in case changed field were also changed externally, an error will be raised
"""
# Initialize super class
super(DataObject, self).__init__()
# Initialize internal fields
self._frozen = False
self._datastore_wins = datastore_wins
self._guid = None # Guid identifier of the object
self._original = {} # Original data copy
self._metadata = {} # Some metadata, mainly used for unit testing
self._data = {} # Internal data storage
self._objects = {} # Internal objects storage
# Initialize public fields
self.dirty = False
self.volatile = volatile
# Worker fields/objects
self._name = self.__class__.__name__.lower()
self._namespace = 'ovs_data' # Namespace of the object
self._mutex_listcache = VolatileMutex('listcache_{0}'.format(self._name))
self._mutex_reverseindex = VolatileMutex('reverseindex')
# Rebuild _relation types
hybrid_structure = HybridRunner.get_hybrids()
for relation in self._relations:
if relation.foreign_type is not None:
identifier = Descriptor(relation.foreign_type).descriptor['identifier']
if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
relation.foreign_type = Descriptor().load(hybrid_structure[identifier]).get_object()
# Init guid
self._new = False
if guid is None:
self._guid = str(uuid.uuid4())
self._new = True
else:
guid = str(guid).lower()
if re.match('^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$', guid) is not None:
self._guid = str(guid)
else:
raise ValueError('The given guid is invalid: {0}'.format(guid))
# Build base keys
self._key = '{0}_{1}_{2}'.format(self._namespace, self._name, self._guid)
# Version mutex
self._mutex_version = VolatileMutex('ovs_dataversion_{0}_{1}'.format(self._name, self._guid))
# Load data from cache or persistent backend where appropriate
self._volatile = VolatileFactory.get_client()
self._persistent = PersistentFactory.get_client()
self._metadata['cache'] = None
if self._new:
self._data = {}
else:
self._data = self._volatile.get(self._key)
if self._data is None:
Toolbox.log_cache_hit('object_load', False)
self._metadata['cache'] = False
try:
self._data = self._persistent.get(self._key)
except KeyNotFoundException:
raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
self.__class__.__name__, self._guid
))
else:
Toolbox.log_cache_hit('object_load', True)
self._metadata['cache'] = True
# Set default values on new fields
for prop in self._properties:
if prop.name not in self._data:
self._data[prop.name] = prop.default
self._add_property(prop)
# Load relations
for relation in self._relations:
if relation.name not in self._data:
if relation.foreign_type is None:
cls = self.__class__
else:
cls = relation.foreign_type
self._data[relation.name] = Descriptor(cls).descriptor
self._add_relation_property(relation)
# Add wrapped properties
for dynamic in self._dynamics:
self._add_dynamic_property(dynamic)
# Load foreign keys
relations = RelationMapper.load_foreign_relations(self.__class__)
if relations is not None:
for key, info in relations.iteritems():
self._objects[key] = {'info': info,
'data': None}
self._add_list_property(key, info['list'])
# Store original data
self._original = copy.deepcopy(self._data)
if not self._new:
# Re-cache the object
self._volatile.set(self._key, self._data)
# Freeze property creation
self._frozen = True
# Optionally, initialize some fields
if data is not None:
for field, value in data.iteritems():
setattr(self, field, value)
#######################
# Helper methods for dynamic getting and setting
#######################
def _add_property(self, prop):
"""
Adds a simple property to the object
"""
# pylint: disable=protected-access
fget = lambda s: s._get_property(prop)
fset = lambda s, v: s._set_property(prop, v)
# pylint: enable=protected-access
setattr(self.__class__, prop.name, property(fget, fset))
def _add_relation_property(self, relation):
"""
Adds a complex property to the object (hybrids)
"""
# pylint: disable=protected-access
fget = lambda s: s._get_relation_property(relation)
fset = lambda s, v: s._set_relation_property(relation, v)
gget = lambda s: s._get_guid_property(relation)
# pylint: enable=protected-access
setattr(self.__class__, relation.name, property(fget, fset))
setattr(self.__class__, '{0}_guid'.format(relation.name), property(gget))
def _add_list_property(self, attribute, list):
"""
Adds a list (readonly) property to the object
"""
# pylint: disable=protected-access
fget = lambda s: s._get_list_property(attribute)
gget = lambda s: s._get_list_guid_property(attribute)
# pylint: enable=protected-access
setattr(self.__class__, attribute, property(fget))
setattr(self.__class__, ('{0}_guids' if list else '{0}_guid').format(attribute), property(gget))
def _add_dynamic_property(self, dynamic):
"""
Adds a dynamic property to the object
"""
# pylint: disable=protected-access
fget = lambda s: s._get_dynamic_property(dynamic)
# pylint: enable=protected-access
setattr(self.__class__, dynamic.name, property(fget))
# Helper method spporting property fetching
def _get_property(self, prop):
"""
Getter for a simple property
"""
return self._data[prop.name]
def _get_relation_property(self, relation):
"""
Getter for a complex property (hybrid)
It will only load the object once and caches it for the lifetime of this object
"""
attribute = relation.name
if attribute not in self._objects:
descriptor = Descriptor().load(self._data[attribute])
self._objects[attribute] = descriptor.get_object(instantiate=True)
return self._objects[attribute]
def _get_guid_property(self, relation):
"""
Getter for a foreign key property
"""
attribute = relation.name
return self._data[attribute]['guid']
def _get_list_property(self, attribute):
"""
Getter for the list property
It will execute the related query every time to return a list of hybrid objects that
refer to this object. The resulting data will be stored or merged into the cached list
preserving as much already loaded objects as possible
"""
info = self._objects[attribute]['info']
remote_class = Descriptor().load(info['class']).get_object()
remote_key = info['key']
datalist = DataList.get_relation_set(remote_class, remote_key, self.__class__, attribute, self.guid)
if self._objects[attribute]['data'] is None:
self._objects[attribute]['data'] = DataObjectList(datalist.data, remote_class)
else:
self._objects[attribute]['data'].merge(datalist.data)
if info['list'] is True:
return self._objects[attribute]['data']
else:
data = self._objects[attribute]['data']
if len(data) > 1:
raise InvalidRelationException('More than one element found in {0}'.format(attribute))
return data[0] if len(data) == 1 else None
def _get_list_guid_property(self, attribute):
"""
Getter for guid list property
"""
dataobjectlist = getattr(self, attribute)
if dataobjectlist is None:
return None
if hasattr(dataobjectlist, '_guids'):
return dataobjectlist._guids
return dataobjectlist.guid
def _get_dynamic_property(self, dynamic):
"""
Getter for dynamic property, wrapping the internal data loading property
in a caching layer
"""
data_loader = getattr(self, '_{0}'.format(dynamic.name))
return self._backend_property(data_loader, dynamic)
# Helper method supporting property setting
def _set_property(self, prop, value):
"""
Setter for a simple property that will validate the type
"""
self.dirty = True
if value is None:
self._data[prop.name] = value
else:
correct, allowed_types, given_type = Toolbox.check_type(value, prop.property_type)
if correct:
self._data[prop.name] = value
else:
raise TypeError('Property {0} allows types {1}. {2} given'.format(
prop.name, str(allowed_types), given_type
))
def _set_relation_property(self, relation, value):
"""
Setter for a complex property (hybrid) that will validate the type
"""
self.dirty = True
attribute = relation.name
if value is None:
self._objects[attribute] = None
self._data[attribute]['guid'] = None
else:
descriptor = Descriptor(value.__class__).descriptor
if descriptor['identifier'] != self._data[attribute]['identifier']:
raise TypeError('An invalid type was given: {0} instead of {1}'.format(
descriptor['type'], self._data[attribute]['type']
))
self._objects[attribute] = value
self._data[attribute]['guid'] = value.guid
def __setattr__(self, key, value):
"""
__setattr__ hook that will block creating on the fly new properties, except
the predefined ones
"""
if not hasattr(self, '_frozen') or not self._frozen:
allowed = True
else:
# If our object structure is frozen (which is after __init__), we only allow known
# property updates: items that are in __dict__ and our own blueprinting dicts
allowed = key in self.__dict__ \
or key in (prop.name for prop in self._properties) \
or key in (relation.name for relation in self._relations) \
or key in (dynamic.name for dynamic in self._dynamics)
if allowed:
super(DataObject, self).__setattr__(key, value)
else:
raise RuntimeError('Property {0} does not exist on this object.'.format(key))
#######################
# Saving data to persistent store and invalidating volatile store
#######################
def save(self, recursive=False, skip=None):
"""
Save the object to the persistent backend and clear cache, making use
of the specified conflict resolve settings.
It will also invalidate certain caches if required. For example lists pointing towards this
object
"""
if self.volatile is True:
raise VolatileObjectException()
tries = 0
successful = False
while successful is False:
invalid_fields = []
for prop in self._properties:
if prop.mandatory is True and self._data[prop.name] is None:
invalid_fields.append(prop.name)
for relation in self._relations:
if relation.mandatory is True and self._data[relation.name]['guid'] is None:
invalid_fields.append(relation.name)
if len(invalid_fields) > 0:
raise MissingMandatoryFieldsException('Missing fields on {0}: {1}'.format(self._name, ', '.join(invalid_fields)))
if recursive:
# Save objects that point to us (e.g. disk.vmachine - if this is disk)
for relation in self._relations:
if relation.name != skip: # disks will be skipped
item = getattr(self, relation.name)
if item is not None:
item.save(recursive=True, skip=relation.foreign_key)
# Save object we point at (e.g. machine.disks - if this is machine)
relations = RelationMapper.load_foreign_relations(self.__class__)
if relations is not None:
for key, info in relations.iteritems():
if key != skip: # machine will be skipped
if info['list'] is True:
for item in getattr(self, key).iterloaded():
item.save(recursive=True, skip=info['key'])
else:
item = getattr(self, key)
if item is not None:
item.save(recursive=True, skip=info['key'])
try:
data = self._persistent.get(self._key)
except KeyNotFoundException:
if self._new:
data = {'_version': 0}
else:
raise ObjectNotFoundException('{0} with guid \'{1}\' was deleted'.format(
self.__class__.__name__, self._guid
))
changed_fields = []
data_conflicts = []
for attribute in self._data.keys():
if attribute == '_version':
continue
if self._data[attribute] != self._original[attribute]:
# We changed this value
changed_fields.append(attribute)
if attribute in data and self._original[attribute] != data[attribute]:
# Some other process also wrote to the database
if self._datastore_wins is None:
# In case we didn't set a policy, we raise the conflicts
data_conflicts.append(attribute)
elif self._datastore_wins is False:
# If the datastore should not win, we just overwrite the data
data[attribute] = self._data[attribute]
# If the datastore should win, we discard/ignore our change
else:
# Normal scenario, saving data
data[attribute] = self._data[attribute]
elif attribute not in data:
data[attribute] = self._data[attribute]
if data_conflicts:
raise ConcurrencyException('Got field conflicts while saving {0}. Conflicts: {1}'.format(
self._name, ', '.join(data_conflicts)
))
# Refresh internal data structure
self._data = copy.deepcopy(data)
# First, update reverse index
try:
self._mutex_reverseindex.acquire(60)
for relation in self._relations:
key = relation.name
original_guid = self._original[key]['guid']
new_guid = self._data[key]['guid']
if original_guid != new_guid:
if relation.foreign_type is None:
classname = self.__class__.__name__.lower()
else:
classname = relation.foreign_type.__name__.lower()
if original_guid is not None:
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, original_guid)
reverse_index = self._volatile.get(reverse_key)
if reverse_index is not None:
if relation.foreign_key in reverse_index:
entries = reverse_index[relation.foreign_key]
if self.guid in entries:
entries.remove(self.guid)
reverse_index[relation.foreign_key] = entries
self._volatile.set(reverse_key, reverse_index)
if new_guid is not None:
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, new_guid)
reverse_index = self._volatile.get(reverse_key)
if reverse_index is not None:
if relation.foreign_key in reverse_index:
entries = reverse_index[relation.foreign_key]
if self.guid not in entries:
entries.append(self.guid)
reverse_index[relation.foreign_key] = entries
self._volatile.set(reverse_key, reverse_index)
else:
reverse_index[relation.foreign_key] = [self.guid]
self._volatile.set(reverse_key, reverse_index)
else:
reverse_index = {relation.foreign_key: [self.guid]}
self._volatile.set(reverse_key, reverse_index)
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(self._name, self.guid)
reverse_index = self._volatile.get(reverse_key)
if reverse_index is None:
reverse_index = {}
relations = RelationMapper.load_foreign_relations(self.__class__)
if relations is not None:
for key, _ in relations.iteritems():
reverse_index[key] = []
self._volatile.set(reverse_key, reverse_index)
finally:
self._mutex_reverseindex.release()
# Second, invalidate property lists
try:
self._mutex_listcache.acquire(60)
cache_key = '{0}_{1}'.format(DataList.cachelink, self._name)
cache_list = Toolbox.try_get(cache_key, {})
change = False
for list_key in cache_list.keys():
fields = cache_list[list_key]
if ('__all' in fields and self._new) or list(set(fields) & set(changed_fields)):
change = True
self._volatile.delete(list_key)
del cache_list[list_key]
if change is True:
self._volatile.set(cache_key, cache_list)
self._persistent.set(cache_key, cache_list)
finally:
self._mutex_listcache.release()
# Save the data
try:
self._mutex_version.acquire(5)
this_version = self._data['_version']
try:
store_version = self._persistent.get(self._key)['_version']
except KeyNotFoundException:
store_version = 0
if this_version == store_version:
self._data['_version'] = this_version + 1
self._persistent.set(self._key, self._data)
self._volatile.delete(self._key)
successful = True
else:
tries += 1
finally:
self._mutex_version.release()
if tries > 5:
raise SaveRaceConditionException()
self._original = copy.deepcopy(self._data)
self.dirty = False
self._new = False
#######################
# Other CRUDs
#######################
def delete(self, abandon=False):
"""
Delete the given object. It also invalidates certain lists
"""
if self.volatile is True:
raise VolatileObjectException()
# Check foreign relations
relations = RelationMapper.load_foreign_relations(self.__class__)
if relations is not None:
for key, info in relations.iteritems():
items = getattr(self, key)
if info['list'] is True:
if len(items) > 0:
if abandon is True:
for item in items.itersafe():
setattr(item, info['key'], None)
try:
item.save()
except ObjectNotFoundException:
pass
else:
raise LinkedObjectException('There are {0} items left in self.{1}'.format(len(items), key))
elif items is not None:
# No list (so a 1-to-1 relation), so there should be an object, or None
item = items # More clear naming
if abandon is True:
setattr(item, info['key'], None)
try:
item.save()
except ObjectNotFoundException:
pass
else:
raise LinkedObjectException('There is still an item linked in self.{0}'.format(key))
# Delete the object out of the persistent store
try:
self._persistent.delete(self._key)
except KeyNotFoundException:
pass
# First, update reverse index
try:
self._mutex_reverseindex.acquire(60)
for relation in self._relations:
key = relation.name
original_guid = self._original[key]['guid']
if original_guid is not None:
if relation.foreign_type is None:
classname = self.__class__.__name__.lower()
else:
classname = relation.foreign_type.__name__.lower()
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, original_guid)
reverse_index = self._volatile.get(reverse_key)
if reverse_index is not None:
if relation.foreign_key in reverse_index:
entries = reverse_index[relation.foreign_key]
if self.guid in entries:
entries.remove(self.guid)
reverse_index[relation.foreign_key] = entries
self._volatile.set(reverse_key, reverse_index)
self._volatile.delete('ovs_reverseindex_{0}_{1}'.format(self._name, self.guid))
finally:
self._mutex_reverseindex.release()
# Second, invalidate property lists
try:
self._mutex_listcache.acquire(60)
cache_key = '{0}_{1}'.format(DataList.cachelink, self._name)
cache_list = Toolbox.try_get(cache_key, {})
change = False
for list_key in cache_list.keys():
fields = cache_list[list_key]
if '__all' in fields:
change = True
self._volatile.delete(list_key)
del cache_list[list_key]
if change is True:
self._volatile.set(cache_key, cache_list)
self._persistent.set(cache_key, cache_list)
finally:
self._mutex_listcache.release()
# Delete the object and its properties out of the volatile store
self.invalidate_dynamics()
self._volatile.delete(self._key)
# Discard all pending changes
def discard(self):
"""
Discard all pending changes, reloading the data from the persistent backend
"""
self.__init__(guid = self._guid,
datastore_wins = self._datastore_wins)
def invalidate_dynamics(self, properties=None):
"""
Invalidates all dynamic property caches. Use with caution, as this action can introduce
a short performance hit.
"""
for dynamic in self._dynamics:
if properties is None or dynamic.name in properties:
self._volatile.delete('{0}_{1}'.format(self._key, dynamic.name))
def export(self):
"""
Exports this object's data for import in another object
"""
data = {}
for prop in self._properties:
data[prop.name] = self._data[prop.name]
return data
def serialize(self, depth=0):
"""
Serializes the internal data, getting rid of certain metadata like descriptors
"""
data = {'guid': self.guid}
for relation in self._relations:
key = relation.name
if depth == 0:
data['{0}_guid'.format(key)] = self._data[key]['guid']
else:
instance = getattr(self, key)
if instance is not None:
data[key] = getattr(self, key).serialize(depth=(depth - 1))
else:
data[key] = None
for prop in self._properties:
data[prop.name] = self._data[prop.name]
for dynamic in self._dynamics:
data[dynamic.name] = getattr(self, dynamic.name)
return data
def copy(self, other_object, include=None, exclude=None, include_relations=False):
"""
Copies all _properties (and optionally _relations) properties over from a given hybrid to
self. One can pass in a list of properties that should be copied, or a list of properties
that should not be copied. Exclude > Include
"""
if include is not None and not isinstance(include, list):
raise TypeError('Argument include should be None or a list of strings')
if exclude is not None and not isinstance(exclude, list):
raise TypeError('Argument exclude should be None or a list of strings')
if self.__class__.__name__ != other_object.__class__.__name__:
raise TypeError('Properties can only be loaded from hybrids of the same type')
all_properties = [prop.name for prop in self._properties]
all_relations = [relation.name for relation in self._relations]
if include:
properties_to_copy = include
else:
properties_to_copy = all_properties
if include_relations:
properties_to_copy += all_relations
if exclude:
properties_to_copy = [p for p in properties_to_copy if p not in exclude]
possible_options = all_properties + (all_relations if include_relations else [])
properties_to_copy = [p for p in properties_to_copy if p in possible_options]
for key in properties_to_copy:
setattr(self, key, getattr(other_object, key))
def updated_on_datastore(self):
"""
Checks whether this object has been modified on the datastore
"""
if self.volatile is True:
return False
this_version = self._data['_version']
try:
store_version = self._persistent.get(self._key)['_version']
except KeyNotFoundException:
store_version = -1
return this_version != store_version
#######################
# Properties
#######################
@property
def guid(self):
"""
The primary key of the object
"""
return self._guid
#######################
# Helper methods
#######################
def _backend_property(self, function, dynamic):
"""
Handles the internal caching of dynamic properties
"""
caller_name = dynamic.name
cache_key = '{0}_{1}'.format(self._key, caller_name)
mutex = VolatileMutex(cache_key)
try:
cached_data = self._volatile.get(cache_key)
if cached_data is None:
if dynamic.locked:
mutex.acquire()
cached_data = self._volatile.get(cache_key)
if cached_data is None:
function_info = inspect.getargspec(function)
if 'dynamic' in function_info.args:
cached_data = function(dynamic=dynamic) # Load data from backend
else:
cached_data = function()
if cached_data is not None:
correct, allowed_types, given_type = Toolbox.check_type(cached_data, dynamic.return_type)
if not correct:
raise TypeError('Dynamic property {0} allows types {1}. {2} given'.format(
caller_name, str(allowed_types), given_type
))
if dynamic.timeout > 0:
self._volatile.set(cache_key, cached_data, dynamic.timeout)
return cached_data
finally:
mutex.release()
def __str__(self):
"""
The string representation of a DataObject is the serialized value
"""
return json.dumps(self.serialize(), indent=4)
def __hash__(self):
"""
Defines a hashing equivalent for a given object. The key (object type and guid) is considered to be identifying
"""
return hash(self._key)
def __eq__(self, other):
"""
Checks whether two objects are the same.
"""
if not isinstance(other, DataObject):
return False
return self.__hash__() == other.__hash__()
def __ne__(self, other):
"""
Checks whether to objects are not the same.
"""
if not isinstance(other, DataObject):
return True
return not self.__eq__(other)
| apache-2.0 | -2,780,434,259,696,459,300 | 42.486175 | 138 | 0.535103 | false | 4.804735 | false | false | false |
drestuart/delvelib | src/creatures/PlayerClass.py | 1 | 1552 | '''
Created on Mar 13, 2013
@author: dstu
'''
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer
import AIClass as AI
import CreatureClass as Cr
import colors
import Game as G
class Player(Cr.Creature):
color = colors.white
description = 'player'
species = 'player'
creatureType = 'player'
def __init__(self, **kwargs):
super(Player, self).__init__(symbol = u'@', name=u"player",
AIClass = AI.PlayerAI, maxHP=10, **kwargs)
id = Column(Integer, ForeignKey('creatures.id'), primary_key=True)
def move(self, dx, dy):
newX = self.getX() + dx
newY = self.getY() + dy
level = self.getLevel()
newTile = level.getTile(newX, newY)
if newTile is not None and level.placeCreature(self, newTile):
return True
elif newTile is not None:
return newTile.bump(self)
else:
return level.bumpEdge(self)
def die(self):
message = self.The() + " dies!"
G.message(message)
print message
G.game.quit()
def the(self):
return self.getName()
def The(self):
return self.getName()
def giveItemToCreature(self, item, creature):
self.getInventory().removeItem(item)
creature.getInventory().addItem(item)
def getQuestItemOfType(self, itemType):
return self.getInventory().getQuestItemOfType(itemType)
| lgpl-3.0 | 7,175,754,097,030,793,000 | 24.459016 | 79 | 0.578608 | false | 3.919192 | false | false | false |
paulopatto/dotfiles | fonts/bin/scripts/Hack/fix-dsig.py | 3 | 1461 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013,2016 The Font Bakery Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
# Adapted for the Hack typeface build workflow by Chris Simpkins
from __future__ import print_function, unicode_literals
import sys
import os
from fontTools import ttLib
def set_empty_dsig(ttFont):
newDSIG = ttLib.newTable("DSIG")
newDSIG.ulVersion = 1
newDSIG.usFlag = 0
newDSIG.usNumSigs = 0
newDSIG.signatureRecords = []
ttFont.tables["DSIG"] = newDSIG
def main(argv):
for path in argv:
if not os.path.exists(path):
sys.stderr.write("[fix-dsig.py] ERROR: " + path + " is not a valid path to a font file")
sys.exit(1)
else:
font = ttLib.TTFont(path)
set_empty_dsig(font)
font.save(path)
print(path + " - successful DSIG table fix")
if __name__ == '__main__':
main(sys.argv[1:])
| mit | -2,454,767,404,344,025,000 | 30.085106 | 94 | 0.703628 | false | 3.405594 | false | false | false |
benthomasson/behave | behave/model.py | 1 | 59326 | # -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
"""
This module provides the model element class that represent a behave model:
* :class:`Feature`
* :class:`Scenario`
* :class:`ScenarioOutline`
* :class:`Step`
* ...
"""
from __future__ import absolute_import, with_statement, unicode_literals
import copy
import difflib
import logging
import traceback
import itertools
import sys
import time
import six
from six.moves import zip # pylint: disable=redefined-builtin
from behave.model_core import \
BasicStatement, TagAndStatusStatement, TagStatement, Replayable
from behave.matchers import NoMatch
from behave.textutil import text as _text
class Feature(TagAndStatusStatement, Replayable):
"""A `feature`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
be "Feature".
.. attribute:: name
The name of the feature (the text after "Feature".)
.. attribute:: description
The description of the feature as seen in the *feature file*. This is
stored as a list of text lines.
.. attribute:: background
The :class:`~behave.model.Background` for this feature, if any.
.. attribute:: scenarios
A list of :class:`~behave.model.Scenario` making up this feature.
.. attribute:: tags
A list of @tags (as :class:`~behave.model.Tag` which are basically
glorified strings) attached to the feature.
See :ref:`controlling things with tags`.
.. attribute:: status
Read-Only. A summary status of the feature's run. If read before the
feature is fully tested it will return "untested" otherwise it will
return one of:
"untested"
The feature was has not been completely tested yet.
"skipped"
One or more steps of this feature was passed over during testing.
"passed"
The feature was tested successfully.
"failed"
One or more steps of this feature failed.
.. attribute:: hook_failed
Indicates if a hook failure occured while running this feature.
.. versionadded:: 1.2.6
.. attribute:: duration
The time, in seconds, that it took to test this feature. If read before
the feature is tested it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the feature
was found.
.. attribute:: line
The line number of the *feature file* where the feature was found.
.. attribute:: language
Indicates which spoken language (English, French, German, ..) was used
for parsing the feature file and its keywords. The I18N language code
indicates which language is used. This corresponds to the language tag
at the beginning of the feature file.
.. versionadded:: 1.2.6
.. _`feature`: gherkin.html#features
"""
type = "feature"
def __init__(self, filename, line, keyword, name, tags=None,
description=None, scenarios=None, background=None,
language=None):
tags = tags or []
super(Feature, self).__init__(filename, line, keyword, name, tags)
self.description = description or []
self.scenarios = []
self.background = background
self.language = language
self.parser = None
self.hook_failed = False
if scenarios:
for scenario in scenarios:
self.add_scenario(scenario)
def reset(self):
"""Reset to clean state before a test run."""
super(Feature, self).reset()
self.hook_failed = False
for scenario in self.scenarios:
scenario.reset()
def __repr__(self):
return '<Feature "%s": %d scenario(s)>' % \
(self.name, len(self.scenarios))
def __iter__(self):
return iter(self.scenarios)
def add_scenario(self, scenario):
scenario.feature = self
scenario.background = self.background
self.scenarios.append(scenario)
def compute_status(self):
"""Compute the status of this feature based on its:
* scenarios
* scenario outlines
* hook failures
:return: Computed status (as string-enum).
"""
skipped = True
passed_count = 0
for scenario in self.scenarios:
scenario_status = scenario.status
if scenario_status == "failed":
return "failed"
elif scenario_status == "untested":
if passed_count > 0:
return "failed" # ABORTED: Some passed, now untested.
return "untested"
if scenario_status != "skipped":
skipped = False
if scenario_status == "passed":
passed_count += 1
if skipped:
return "skipped"
elif self.hook_failed:
return "failed"
else:
return "passed"
@property
def duration(self):
# -- NEW: Background is executed N times, now part of scenarios.
feature_duration = 0.0
for scenario in self.scenarios:
feature_duration += scenario.duration
return feature_duration
def walk_scenarios(self, with_outlines=False):
"""
Provides a flat list of all scenarios of this feature.
A ScenarioOutline element adds its scenarios to this list.
But the ScenarioOutline element itself is only added when specified.
A flat scenario list is useful when all scenarios of a features
should be processed.
:param with_outlines: If ScenarioOutline items should be added, too.
:return: List of all scenarios of this feature.
"""
all_scenarios = []
for scenario in self.scenarios:
if isinstance(scenario, ScenarioOutline):
scenario_outline = scenario
if with_outlines:
all_scenarios.append(scenario_outline)
all_scenarios.extend(scenario_outline.scenarios)
else:
all_scenarios.append(scenario)
return all_scenarios
def should_run(self, config=None):
"""
Determines if this Feature (and its scenarios) should run.
Implements the run decision logic for a feature.
The decision depends on:
* if the Feature is marked as skipped
* if the config.tags (tag expression) enable/disable this feature
:param config: Runner configuration to use (optional).
:return: True, if scenario should run. False, otherwise.
"""
answer = not self.should_skip
if answer and config:
answer = self.should_run_with_tags(config.tags)
return answer
def should_run_with_tags(self, tag_expression):
"""Determines if this feature should run when the tag expression is used.
A feature should run if:
* it should run according to its tags
* any of its scenarios should run according to its tags
:param tag_expression: Runner/config environment tags to use.
:return: True, if feature should run. False, otherwise (skip it).
"""
run_feature = tag_expression.check(self.tags)
if not run_feature:
for scenario in self:
if scenario.should_run_with_tags(tag_expression):
run_feature = True
break
return run_feature
def mark_skipped(self):
"""Marks this feature (and all its scenarios and steps) as skipped.
Note this function may be called before the feature is executed.
"""
self.skip(require_not_executed=True)
assert self.status == "skipped"
def skip(self, reason=None, require_not_executed=False):
"""Skip executing this feature or the remaining parts of it.
Note that this feature may be already partly executed
when this function is called.
:param reason: Optional reason why feature should be skipped (as string).
:param require_not_executed: Optional, requires that feature is not
executed yet (default: false).
"""
if reason:
logger = logging.getLogger("behave")
logger.warning(u"SKIP FEATURE %s: %s", self.name, reason)
self._cached_status = None
self.should_skip = True
self.skip_reason = reason
for scenario in self.scenarios:
scenario.skip(reason, require_not_executed)
if not self.scenarios:
# -- SPECIAL CASE: Feature without scenarios
self._cached_status = "skipped"
assert self.status in self.final_status #< skipped, failed or passed.
def run(self, runner):
# pylint: disable=too-many-branches
self._cached_status = None
self.hook_failed = False
runner.context._push() # pylint: disable=protected-access
runner.context.feature = self
# run this feature if the tags say so or any one of its scenarios
run_feature = self.should_run(runner.config)
if run_feature or runner.config.show_skipped:
for formatter in runner.formatters:
formatter.feature(self)
# current tags as a set
runner.context.tags = set(self.tags)
hooks_called = False
if not runner.config.dry_run and run_feature:
hooks_called = True
for tag in self.tags:
runner.run_hook("before_tag", runner.context, tag)
runner.run_hook("before_feature", runner.context, self)
# -- RE-EVALUATE SHOULD-RUN STATE:
# Hook may call feature.mark_skipped() to exclude it.
run_feature = self.should_run()
if self.background and (run_feature or runner.config.show_skipped):
for formatter in runner.formatters:
formatter.background(self.background)
failed_count = 0
for scenario in self.scenarios:
# -- OPTIONAL: Select scenario by name (regular expressions).
if (runner.config.name and
not scenario.should_run_with_name_select(runner.config)):
scenario.mark_skipped()
continue
failed = scenario.run(runner)
if failed:
failed_count += 1
if runner.config.stop or runner.aborted:
# -- FAIL-EARLY: Stop after first failure.
break
self._cached_status = None # -- ENFORCE: compute_status() after run.
if not self.scenarios and not run_feature:
# -- SPECIAL CASE: Feature without scenarios
self._cached_status = "skipped"
if hooks_called:
runner.run_hook("after_feature", runner.context, self)
if self.hook_failed and failed_count == 0:
failed_count = 1
for tag in self.tags:
runner.run_hook("after_tag", runner.context, tag)
runner.context._pop() # pylint: disable=protected-access
if run_feature or runner.config.show_skipped:
for formatter in runner.formatters:
formatter.eof()
failed = (failed_count > 0)
return failed
class Background(BasicStatement, Replayable):
"""A `background`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Background".
.. attribute:: name
The name of the background (the text after "Background:".)
.. attribute:: steps
A list of :class:`~behave.model.Step` making up this background.
.. attribute:: duration
The time, in seconds, that it took to run this background. If read
before the background is run it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the background
was found.
.. attribute:: line
The line number of the *feature file* where the background was found.
.. _`background`: gherkin.html#backgrounds
"""
type = "background"
def __init__(self, filename, line, keyword, name, steps=None):
super(Background, self).__init__(filename, line, keyword, name)
self.steps = steps or []
def __repr__(self):
return '<Background "%s">' % self.name
def __iter__(self):
return iter(self.steps)
@property
def duration(self):
duration = 0
for step in self.steps:
duration += step.duration
return duration
class Scenario(TagAndStatusStatement, Replayable):
"""A `scenario`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Scenario".
.. attribute:: name
The name of the scenario (the text after "Scenario:".)
.. attribute:: description
The description of the scenario as seen in the *feature file*.
This is stored as a list of text lines.
.. attribute:: feature
The :class:`~behave.model.Feature` this scenario belongs to.
.. attribute:: steps
A list of :class:`~behave.model.Step` making up this scenario.
.. attribute:: tags
A list of @tags (as :class:`~behave.model.Tag` which are basically
glorified strings) attached to the scenario.
See :ref:`controlling things with tags`.
.. attribute:: status
Read-Only. A summary status of the scenario's run. If read before the
scenario is fully tested it will return "untested" otherwise it will
return one of:
"untested"
The scenario was has not been completely tested yet.
"skipped"
One or more steps of this scenario was passed over during testing.
"passed"
The scenario was tested successfully.
"failed"
One or more steps of this scenario failed.
.. attribute:: hook_failed
Indicates if a hook failure occured while running this scenario.
.. versionadded:: 1.2.6
.. attribute:: duration
The time, in seconds, that it took to test this scenario. If read before
the scenario is tested it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the scenario
was found.
.. attribute:: line
The line number of the *feature file* where the scenario was found.
.. _`scenario`: gherkin.html#scenarios
"""
# pylint: disable=too-many-instance-attributes
type = "scenario"
continue_after_failed_step = False
def __init__(self, filename, line, keyword, name, tags=None, steps=None,
description=None):
tags = tags or []
super(Scenario, self).__init__(filename, line, keyword, name, tags)
self.description = description or []
self.steps = steps or []
self.background = None
self.feature = None # REFER-TO: owner=Feature
self.hook_failed = False
self._background_steps = None
self._row = None
self.was_dry_run = False
self.stderr = None
self.stdout = None
def reset(self):
"""Reset the internal data to reintroduce new-born state just after the
ctor was called.
"""
super(Scenario, self).reset()
self._row = None
self.was_dry_run = False
self.stderr = None
self.stdout = None
for step in self.all_steps:
step.reset()
@property
def background_steps(self):
"""Provide background steps if feature has a background.
Lazy init that copies the background steps.
Note that a copy of the background steps is needed to ensure
that the background step status is specific to the scenario.
:return: List of background steps or empty list
"""
if self._background_steps is None:
# -- LAZY-INIT (need copy of background.steps):
# Each scenario needs own background.steps status.
# Otherwise, background step status of the last scenario is used.
steps = []
if self.background:
steps = [copy.copy(step) for step in self.background.steps]
self._background_steps = steps
return self._background_steps
@property
def all_steps(self):
"""Returns iterator to all steps, including background steps if any."""
if self.background is not None:
return itertools.chain(self.background_steps, self.steps)
else:
return iter(self.steps)
def __repr__(self):
return '<Scenario "%s">' % self.name
def __iter__(self):
return self.all_steps
def compute_status(self):
"""Compute the status of the scenario from its steps
(and hook failures).
:return: Computed status (as string).
"""
for step in self.all_steps:
if step.status == "undefined":
if self.was_dry_run:
# -- SPECIAL CASE: In dry-run with undefined-step discovery
# Undefined steps should not cause failed scenario.
return "untested"
else:
# -- NORMALLY: Undefined steps cause failed scenario.
return "failed"
elif step.status != "passed":
assert step.status in ("failed", "skipped", "untested")
return step.status
#elif step.status == "failed":
# return "failed"
#elif step.status == "skipped":
# return "skipped"
#elif step.status == "untested":
# return "untested"
if self.hook_failed:
return "failed"
return "passed"
@property
def duration(self):
# -- ORIG: for step in self.steps: Background steps were excluded.
scenario_duration = 0
for step in self.all_steps:
scenario_duration += step.duration
return scenario_duration
@property
def effective_tags(self):
"""
Effective tags for this scenario:
* own tags
* tags inherited from its feature
"""
tags = self.tags
if self.feature:
tags = self.feature.tags + self.tags
return tags
def should_run(self, config=None):
"""
Determines if this Scenario (or ScenarioOutline) should run.
Implements the run decision logic for a scenario.
The decision depends on:
* if the Scenario is marked as skipped
* if the config.tags (tag expression) enable/disable this scenario
* if the scenario is selected by name
:param config: Runner configuration to use (optional).
:return: True, if scenario should run. False, otherwise.
"""
answer = not self.should_skip
if answer and config:
answer = (self.should_run_with_tags(config.tags) and
self.should_run_with_name_select(config))
return answer
def should_run_with_tags(self, tag_expression):
"""
Determines if this scenario should run when the tag expression is used.
:param tag_expression: Runner/config environment tags to use.
:return: True, if scenario should run. False, otherwise (skip it).
"""
return tag_expression.check(self.effective_tags)
def should_run_with_name_select(self, config):
"""Determines if this scenario should run when it is selected by name.
:param config: Runner/config environment name regexp (if any).
:return: True, if scenario should run. False, otherwise (skip it).
"""
# -- SELECT-ANY: If select by name is not specified (not config.name).
return not config.name or config.name_re.search(self.name)
def mark_skipped(self):
"""Marks this scenario (and all its steps) as skipped.
Note that this method can be called before the scenario is executed.
"""
self.skip(require_not_executed=True)
assert self.status == "skipped", "OOPS: scenario.status=%s" % self.status
def skip(self, reason=None, require_not_executed=False):
"""Skip from executing this scenario or the remaining parts of it.
Note that the scenario may be already partly executed
when this method is called.
:param reason: Optional reason why it should be skipped (as string).
"""
if reason:
scenario_type = self.__class__.__name__
logger = logging.getLogger("behave")
logger.warning(u"SKIP %s %s: %s", scenario_type, self.name, reason)
self._cached_status = None
self.should_skip = True
self.skip_reason = reason
for step in self.all_steps:
not_executed = step.status in ("untested", "skipped")
if not_executed:
step.status = "skipped"
else:
assert not require_not_executed, \
"REQUIRE NOT-EXECUTED, but step is %s" % step.status
if not self.all_steps:
# -- SPECIAL CASE: Scenario without steps
self._cached_status = "skipped"
assert self.status in self.final_status #< skipped, failed or passed
def run(self, runner):
# pylint: disable=too-many-branches, too-many-statements
self._cached_status = None
failed = False
run_scenario = self.should_run(runner.config)
run_steps = run_scenario and not runner.config.dry_run
dry_run_scenario = run_scenario and runner.config.dry_run
self.was_dry_run = dry_run_scenario
if run_scenario or runner.config.show_skipped:
for formatter in runner.formatters:
formatter.scenario(self)
runner.context._push() # pylint: disable=protected-access
runner.context.scenario = self
runner.context.tags = set(self.effective_tags)
hooks_called = False
if not runner.config.dry_run and run_scenario:
hooks_called = True
for tag in self.tags:
runner.run_hook("before_tag", runner.context, tag)
runner.run_hook("before_scenario", runner.context, self)
# -- RE-EVALUATE SHOULD-RUN STATE:
# Hook may call scenario.mark_skipped() to exclude it.
run_scenario = run_steps = self.should_run()
runner.setup_capture()
if run_scenario or runner.config.show_skipped:
for step in self:
for formatter in runner.formatters:
formatter.step(step)
for step in self.all_steps:
if run_steps:
if not step.run(runner):
# -- CASE: Failed or undefined step
# Optionally continue_after_failed_step if enabled.
# But disable run_steps after undefined-step.
run_steps = (self.continue_after_failed_step and
step.status == "failed")
failed = True
# pylint: disable=protected-access
runner.context._set_root_attribute("failed", True)
self._cached_status = "failed"
elif self.should_skip:
# -- CASE: Step skipped remaining scenario.
# assert self.status == "skipped", "Status: %s" % self.status
run_steps = False
elif failed or dry_run_scenario:
# -- SKIP STEPS: After failure/undefined-step occurred.
# BUT: Detect all remaining undefined steps.
step.status = "skipped"
if dry_run_scenario:
step.status = "untested"
found_step = runner.step_registry.find_match(step)
if not found_step:
step.status = "undefined"
runner.undefined_steps.append(step)
else:
# -- SKIP STEPS: For disabled scenario.
# CASES:
# * Undefined steps are not detected (by intention).
# * Step skipped remaining scenario.
step.status = "skipped"
self._cached_status = None # -- ENFORCE: compute_status() after run.
if not run_scenario:
# -- SPECIAL CASE: Scenario without steps.
self._cached_status = "skipped"
# Attach the stdout and stderr if generate Junit report
if runner.config.junit:
self.stdout = runner.context.stdout_capture.getvalue()
self.stderr = runner.context.stderr_capture.getvalue()
runner.teardown_capture()
if hooks_called:
runner.run_hook("after_scenario", runner.context, self)
if self.hook_failed:
failed = True
for tag in self.tags:
runner.run_hook("after_tag", runner.context, tag)
runner.context._pop() # pylint: disable=protected-access
return failed
class ScenarioOutlineBuilder(object):
"""Helper class to use a ScenarioOutline as a template and
build its scenarios (as template instances).
"""
def __init__(self, annotation_schema):
self.annotation_schema = annotation_schema
@staticmethod
def render_template(text, row=None, params=None):
"""Render a text template with placeholders, ala "Hello <name>".
:param row: As placeholder provider (dict-like).
:param params: As additional placeholder provider (as dict).
:return: Rendered text, known placeholders are substituted w/ values.
"""
if not ("<" in text and ">" in text):
return text
safe_values = False
for placeholders in (row, params):
if not placeholders:
continue
for name, value in placeholders.items():
if safe_values and ("<" in value and ">" in value):
continue # -- OOPS, value looks like placeholder.
text = text.replace("<%s>" % name, value)
return text
def make_scenario_name(self, outline_name, example, row, params=None):
"""Build a scenario name for an example row of this scenario outline.
Placeholders for row data are replaced by values.
SCHEMA: "{outline_name} -*- {examples.name}@{row.id}"
:param outline_name: ScenarioOutline's name (as template).
:param example: Examples object.
:param row: Row of this example.
:param params: Additional placeholders for example/row.
:return: Computed name for the scenario representing example/row.
"""
if params is None:
params = {}
params["examples.name"] = example.name or ""
params.setdefault("examples.index", example.index)
params.setdefault("row.index", row.index)
params.setdefault("row.id", row.id)
# -- STEP: Replace placeholders in scenario/example name (if any).
examples_name = self.render_template(example.name, row, params)
params["examples.name"] = examples_name
scenario_name = self.render_template(outline_name, row, params)
class Data(object):
def __init__(self, name, index):
self.name = name
self.index = index
self.id = name # pylint: disable=invalid-name
example_data = Data(examples_name, example.index)
row_data = Data(row.id, row.index)
return self.annotation_schema.format(name=scenario_name,
examples=example_data, row=row_data)
@classmethod
def make_row_tags(cls, outline_tags, row, params=None):
if not outline_tags:
return []
tags = []
for tag in outline_tags:
if "<" in tag and ">" in tag:
tag = cls.render_template(tag, row, params)
if "<" in tag or ">" in tag:
# -- OOPS: Unknown placeholder, drop tag.
continue
new_tag = Tag.make_name(tag, unescape=True)
tags.append(new_tag)
return tags
@classmethod
def make_step_for_row(cls, outline_step, row, params=None):
# -- BASED-ON: new_step = outline_step.set_values(row)
new_step = copy.deepcopy(outline_step)
new_step.name = cls.render_template(new_step.name, row, params)
if new_step.text:
new_step.text = cls.render_template(new_step.text, row)
if new_step.table:
for name, value in row.items():
for row in new_step.table:
for i, cell in enumerate(row.cells):
row.cells[i] = cell.replace("<%s>" % name, value)
return new_step
def build_scenarios(self, scenario_outline):
"""Build scenarios for a ScenarioOutline from its examples."""
# -- BUILD SCENARIOS (once): For this ScenarioOutline from examples.
params = {
"examples.name": None,
"examples.index": None,
"row.index": None,
"row.id": None,
}
scenarios = []
for example_index, example in enumerate(scenario_outline.examples):
example.index = example_index+1
params["examples.name"] = example.name
params["examples.index"] = _text(example.index)
for row_index, row in enumerate(example.table):
row.index = row_index+1
row.id = "%d.%d" % (example.index, row.index)
params["row.id"] = row.id
params["row.index"] = _text(row.index)
scenario_name = self.make_scenario_name(scenario_outline.name,
example, row, params)
row_tags = self.make_row_tags(scenario_outline.tags, row, params)
row_tags.extend(example.tags)
new_steps = []
for outline_step in scenario_outline.steps:
new_step = self.make_step_for_row(outline_step, row, params)
new_steps.append(new_step)
# -- STEP: Make Scenario name for this row.
# scenario_line = example.line + 2 + row_index
scenario_line = row.line
scenario = Scenario(scenario_outline.filename, scenario_line,
scenario_outline.keyword,
scenario_name, row_tags, new_steps)
scenario.feature = scenario_outline.feature
scenario.background = scenario_outline.background
scenario._row = row # pylint: disable=protected-access
scenarios.append(scenario)
return scenarios
class ScenarioOutline(Scenario):
"""A `scenario outline`_ parsed from a *feature file*.
A scenario outline extends the existing :class:`~behave.model.Scenario`
class with the addition of the :class:`~behave.model.Examples` tables of
data from the *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Scenario Outline".
.. attribute:: name
The name of the scenario (the text after "Scenario Outline:".)
.. attribute:: description
The description of the `scenario outline`_ as seen in the *feature file*.
This is stored as a list of text lines.
.. attribute:: feature
The :class:`~behave.model.Feature` this scenario outline belongs to.
.. attribute:: steps
A list of :class:`~behave.model.Step` making up this scenario outline.
.. attribute:: examples
A list of :class:`~behave.model.Examples` used by this scenario outline.
.. attribute:: tags
A list of @tags (as :class:`~behave.model.Tag` which are basically
glorified strings) attached to the scenario.
See :ref:`controlling things with tags`.
.. attribute:: status
Read-Only. A summary status of the scenario outlines's run. If read
before the scenario is fully tested it will return "untested" otherwise
it will return one of:
"untested"
The scenario was has not been completely tested yet.
"skipped"
One or more scenarios of this outline was passed over during testing.
"passed"
The scenario was tested successfully.
"failed"
One or more scenarios of this outline failed.
.. attribute:: duration
The time, in seconds, that it took to test the scenarios of this
outline. If read before the scenarios are tested it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the scenario
was found.
.. attribute:: line
The line number of the *feature file* where the scenario was found.
.. _`scenario outline`: gherkin.html#scenario-outlines
"""
type = "scenario_outline"
annotation_schema = u"{name} -- @{row.id} {examples.name}"
def __init__(self, filename, line, keyword, name, tags=None,
steps=None, examples=None, description=None):
super(ScenarioOutline, self).__init__(filename, line, keyword, name,
tags, steps, description)
self.examples = examples or []
self._scenarios = []
def reset(self):
"""Reset runtime temporary data like before a test run."""
super(ScenarioOutline, self).reset()
for scenario in self._scenarios: # -- AVOID: BUILD-SCENARIOS
scenario.reset()
@property
def scenarios(self):
"""Return the scenarios with the steps altered to take the values from
the examples.
"""
if self._scenarios:
return self._scenarios
# -- BUILD SCENARIOS (once): For this ScenarioOutline from examples.
builder = ScenarioOutlineBuilder(self.annotation_schema)
self._scenarios = builder.build_scenarios(self)
return self._scenarios
def __repr__(self):
return '<ScenarioOutline "%s">' % self.name
def __iter__(self):
return iter(self.scenarios) # -- REQUIRE: BUILD-SCENARIOS
def compute_status(self):
skipped_count = 0
for scenario in self._scenarios: # -- AVOID: BUILD-SCENARIOS
scenario_status = scenario.status
if scenario_status in ("failed", "untested"):
return scenario_status
elif scenario_status == "skipped":
skipped_count += 1
if skipped_count > 0 and skipped_count == len(self._scenarios):
# -- ALL SKIPPED:
return "skipped"
# -- OTHERWISE: ALL PASSED
return "passed"
@property
def duration(self):
outline_duration = 0
for scenario in self._scenarios: # -- AVOID: BUILD-SCENARIOS
outline_duration += scenario.duration
return outline_duration
def should_run_with_tags(self, tag_expression):
"""Determines if this scenario outline (or one of its scenarios)
should run when the tag expression is used.
:param tag_expression: Runner/config environment tags to use.
:return: True, if scenario should run. False, otherwise (skip it).
"""
if tag_expression.check(self.effective_tags):
return True
for scenario in self.scenarios: # -- REQUIRE: BUILD-SCENARIOS
if scenario.should_run_with_tags(tag_expression):
return True
# -- NOTHING SELECTED:
return False
def should_run_with_name_select(self, config):
"""Determines if this scenario should run when it is selected by name.
:param config: Runner/config environment name regexp (if any).
:return: True, if scenario should run. False, otherwise (skip it).
"""
if not config.name:
return True # -- SELECT-ALL: Select by name is not specified.
for scenario in self.scenarios: # -- REQUIRE: BUILD-SCENARIOS
if scenario.should_run_with_name_select(config):
return True
# -- NOTHING SELECTED:
return False
def mark_skipped(self):
"""Marks this scenario outline (and all its scenarios/steps) as skipped.
Note that this method may be called before the scenario outline
is executed.
"""
self.skip(require_not_executed=True)
assert self.status == "skipped"
def skip(self, reason=None, require_not_executed=False):
"""Skip from executing this scenario outline or its remaining parts.
Note that the scenario outline may be already partly executed
when this method is called.
:param reason: Optional reason why it should be skipped (as string).
"""
if reason:
logger = logging.getLogger("behave")
logger.warning(u"SKIP ScenarioOutline %s: %s", self.name, reason)
self._cached_status = None
self.should_skip = True
for scenario in self.scenarios:
scenario.skip(reason, require_not_executed)
if not self.scenarios:
# -- SPECIAL CASE: ScenarioOutline without scenarios/examples
self._cached_status = "skipped"
assert self.status in self.final_status #< skipped, failed or passed
def run(self, runner):
# pylint: disable=protected-access
# REASON: context._set_root_attribute(), scenario._row
self._cached_status = None
failed_count = 0
for scenario in self.scenarios: # -- REQUIRE: BUILD-SCENARIOS
runner.context._set_root_attribute("active_outline", scenario._row)
failed = scenario.run(runner)
if failed:
failed_count += 1
if runner.config.stop or runner.aborted:
# -- FAIL-EARLY: Stop after first failure.
break
runner.context._set_root_attribute("active_outline", None)
return failed_count > 0
class Examples(TagStatement, Replayable):
"""A table parsed from a `scenario outline`_ in a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Example".
.. attribute:: name
The name of the example (the text after "Example:".)
.. attribute:: table
An instance of :class:`~behave.model.Table` that came with the example
in the *feature file*.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the example
was found.
.. attribute:: line
The line number of the *feature file* where the example was found.
.. _`examples`: gherkin.html#examples
"""
type = "examples"
def __init__(self, filename, line, keyword, name, tags=None, table=None):
super(Examples, self).__init__(filename, line, keyword, name, tags)
self.table = table
self.index = None
class Step(BasicStatement, Replayable):
"""A single `step`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Given", "When", "Then" or a number of other words.
.. attribute:: name
The name of the step (the text after "Given" etc.)
.. attribute:: step_type
The type of step as determined by the keyword. If the keyword is "and"
then the previous keyword in the *feature file* will determine this
step's step_type.
.. attribute:: text
An instance of :class:`~behave.model.Text` that came with the step
in the *feature file*.
.. attribute:: table
An instance of :class:`~behave.model.Table` that came with the step
in the *feature file*.
.. attribute:: status
Read-Only. A summary status of the step's run. If read before the
step is tested it will return "untested" otherwise it will
return one of:
"skipped"
This step was passed over during testing.
"passed"
The step was tested successfully.
"failed"
The step failed.
.. attribute:: hook_failed
Indicates if a hook failure occured while running this step.
.. versionadded:: 1.2.6
.. attribute:: duration
The time, in seconds, that it took to test this step. If read before the
step is tested it will return 0.0.
.. attribute:: error_message
If the step failed then this will hold any error information, as a
single string. It will otherwise be None.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the step was
found.
.. attribute:: line
The line number of the *feature file* where the step was found.
.. _`step`: gherkin.html#steps
"""
type = "step"
def __init__(self, filename, line, keyword, step_type, name, text=None,
table=None):
super(Step, self).__init__(filename, line, keyword, name)
self.step_type = step_type
self.text = text
self.table = table
self.status = "untested"
self.hook_failed = False
self.duration = 0
self.exception = None
self.exc_traceback = None
self.error_message = None
def reset(self):
"""Reset temporary runtime data to reach clean state again."""
self.status = "untested"
self.hook_failed = False
self.duration = 0
self.exception = None
self.exc_traceback = None
self.error_message = None
def store_exception_context(self, exception):
self.exception = exception
self.exc_traceback = sys.exc_info()[2]
def __repr__(self):
return '<%s "%s">' % (self.step_type, self.name)
def __eq__(self, other):
return (self.step_type, self.name) == (other.step_type, other.name)
def __hash__(self):
return hash(self.step_type) + hash(self.name)
def set_values(self, table_row):
"""Clone a new step from this one, used for ScenarioOutline.
Replace ScenarioOutline placeholders w/ values.
:param table_row: Placeholder data for example row.
:return: Cloned, adapted step object.
.. note:: Deprecating
Use 'ScenarioOutlineBuilder.make_step_for_row()' instead.
"""
import warnings
warnings.warn("Use 'ScenarioOutline.make_step_for_row()' instead",
PendingDeprecationWarning, stacklevel=2)
outline_step = self
return ScenarioOutlineBuilder.make_step_for_row(outline_step, table_row)
def run(self, runner, quiet=False, capture=True):
# pylint: disable=too-many-branches, too-many-statements
# -- RESET: Run-time information.
self.exception = self.exc_traceback = self.error_message = None
self.status = "untested"
self.hook_failed = False
match = runner.step_registry.find_match(self)
if match is None:
runner.undefined_steps.append(self)
if not quiet:
for formatter in runner.formatters:
formatter.match(NoMatch())
self.status = "undefined"
if not quiet:
for formatter in runner.formatters:
formatter.result(self)
return False
keep_going = True
error = u""
if not quiet:
for formatter in runner.formatters:
formatter.match(match)
runner.run_hook("before_step", runner.context, self)
if capture:
runner.start_capture()
try:
start = time.time()
# -- ENSURE:
# * runner.context.text/.table attributes are reset (#66).
# * Even EMPTY multiline text is available in context.
runner.context.text = self.text
runner.context.table = self.table
match.run(runner.context)
if self.status == "untested":
# -- NOTE: Executed step may have skipped scenario and itself.
self.status = "passed"
except KeyboardInterrupt as e:
runner.aborted = True
error = u"ABORTED: By user (KeyboardInterrupt)."
self.status = "failed"
self.store_exception_context(e)
except AssertionError as e:
self.status = "failed"
self.store_exception_context(e)
if e.args:
message = _text(e)
error = u"Assertion Failed: "+ message
else:
# no assertion text; format the exception
error = _text(traceback.format_exc())
except Exception as e: # pylint: disable=broad-except
self.status = "failed"
error = _text(traceback.format_exc())
self.store_exception_context(e)
self.duration = time.time() - start
if capture:
runner.stop_capture()
runner.run_hook("after_step", runner.context, self)
if self.hook_failed:
self.status = "failed"
# flesh out the failure with details
if self.status == "failed":
assert isinstance(error, six.text_type)
if capture:
# -- CAPTURE-ONLY: Non-nested step failures.
if runner.config.stdout_capture:
output = runner.stdout_capture.getvalue()
if output:
output = _text(output)
error += u"\nCaptured stdout:\n" + output
if runner.config.stderr_capture:
output = runner.stderr_capture.getvalue()
if output:
output = _text(output)
error += u"\nCaptured stderr:\n" + output
if runner.config.log_capture:
output = runner.log_capture.getvalue()
if output:
output = _text(output)
error += u"\nCaptured logging:\n" + output
self.error_message = error
keep_going = False
if not quiet:
for formatter in runner.formatters:
formatter.result(self)
return keep_going
class Table(Replayable):
"""A `table`_ extracted from a *feature file*.
Table instance data is accessible using a number of methods:
**iteration**
Iterating over the Table will yield the :class:`~behave.model.Row`
instances from the .rows attribute.
**indexed access**
Individual rows may be accessed directly by index on the Table instance;
table[0] gives the first non-heading row and table[-1] gives the last
row.
The attributes are:
.. attribute:: headings
The headings of the table as a list of strings.
.. attribute:: rows
An list of instances of :class:`~behave.model.Row` that make up the body
of the table in the *feature file*.
Tables are also comparable, for what that's worth. Headings and row data
are compared.
.. _`table`: gherkin.html#table
"""
type = "table"
def __init__(self, headings, line=None, rows=None):
Replayable.__init__(self)
self.headings = headings
self.line = line
self.rows = []
if rows:
for row in rows:
self.add_row(row, line)
def add_row(self, row, line=None):
self.rows.append(Row(self.headings, row, line))
def add_column(self, column_name, values=None, default_value=u""):
"""Adds a new column to this table.
Uses :param:`default_value` for new cells (if :param:`values` are
not provided). param:`values` are extended with :param:`default_value`
if values list is smaller than the number of table rows.
:param column_name: Name of new column (as string).
:param values: Optional list of cell values in new column.
:param default_value: Default value for cell (if values not provided).
:returns: Index of new column (as number).
"""
# assert isinstance(column_name, unicode)
assert not self.has_column(column_name)
if values is None:
values = [default_value] * len(self.rows)
elif not isinstance(values, list):
values = list(values)
if len(values) < len(self.rows):
more_size = len(self.rows) - len(values)
more_values = [default_value] * more_size
values.extend(more_values)
new_column_index = len(self.headings)
self.headings.append(column_name)
for row, value in zip(self.rows, values):
assert len(row.cells) == new_column_index
row.cells.append(value)
return new_column_index
def remove_column(self, column_name):
if not isinstance(column_name, int):
try:
column_index = self.get_column_index(column_name)
except ValueError:
raise KeyError("column=%s is unknown" % column_name)
assert isinstance(column_index, int)
assert column_index < len(self.headings)
del self.headings[column_index]
for row in self.rows:
assert column_index < len(row.cells)
del row.cells[column_index]
def remove_columns(self, column_names):
for column_name in column_names:
self.remove_column(column_name)
def has_column(self, column_name):
return column_name in self.headings
def get_column_index(self, column_name):
return self.headings.index(column_name)
def require_column(self, column_name):
"""Require that a column exists in the table.
Raise an AssertionError if the column does not exist.
:param column_name: Name of new column (as string).
:return: Index of column (as number) if it exists.
"""
if not self.has_column(column_name):
columns = ", ".join(self.headings)
msg = "REQUIRE COLUMN: %s (columns: %s)" % (column_name, columns)
raise AssertionError(msg)
return self.get_column_index(column_name)
def require_columns(self, column_names):
for column_name in column_names:
self.require_column(column_name)
def ensure_column_exists(self, column_name):
"""Ensures that a column with the given name exists.
If the column does not exist, the column is added.
:param column_name: Name of column (as string).
:return: Index of column (as number).
"""
if self.has_column(column_name):
return self.get_column_index(column_name)
else:
return self.add_column(column_name)
def __repr__(self):
return "<Table: %dx%d>" % (len(self.headings), len(self.rows))
def __eq__(self, other):
if isinstance(other, Table):
if self.headings != other.headings:
return False
for my_row, their_row in zip(self.rows, other.rows):
if my_row != their_row:
return False
else:
# -- ASSUME: table <=> raw data comparison
other_rows = other
for my_row, their_row in zip(self.rows, other_rows):
if my_row != their_row:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __iter__(self):
return iter(self.rows)
def __getitem__(self, index):
return self.rows[index]
def assert_equals(self, data):
"""Assert that this table's cells are the same as the supplied "data".
The data passed in must be a list of lists giving:
[
[row 1],
[row 2],
[row 3],
]
If the cells do not match then a useful AssertionError will be raised.
"""
assert self == data
raise NotImplementedError
class Row(object):
"""One row of a `table`_ parsed from a *feature file*.
Row data is accessible using a number of methods:
**iteration**
Iterating over the Row will yield the individual cells as strings.
**named access**
Individual cells may be accessed by heading name; row["name"] would give
the cell value for the column with heading "name".
**indexed access**
Individual cells may be accessed directly by index on the Row instance;
row[0] gives the first cell and row[-1] gives the last cell.
The attributes are:
.. attribute:: cells
The list of strings that form the cells of this row.
.. attribute:: headings
The headings of the table as a list of strings.
Rows are also comparable, for what that's worth. Only the cells are
compared.
.. _`table`: gherkin.html#table
"""
def __init__(self, headings, cells, line=None, comments=None):
self.headings = headings
self.comments = comments
for c in cells:
assert isinstance(c, six.text_type)
self.cells = cells
self.line = line
def __getitem__(self, name):
try:
index = self.headings.index(name)
except ValueError:
if isinstance(name, int):
index = name
else:
raise KeyError('"%s" is not a row heading' % name)
return self.cells[index]
def __repr__(self):
return "<Row %r>" % (self.cells,)
def __eq__(self, other):
return self.cells == other.cells
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.cells)
def __iter__(self):
return iter(self.cells)
def items(self):
return zip(self.headings, self.cells)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def as_dict(self):
"""Converts the row and its cell data into a dictionary.
:return: Row data as dictionary (without comments, line info).
"""
from behave.compat.collections import OrderedDict
return OrderedDict(self.items())
class Tag(six.text_type):
"""Tags appear may be associated with Features or Scenarios.
They're a subclass of regular strings (unicode pre-Python 3) with an
additional ``line`` number attribute (where the tag was seen in the source
feature file.
See :ref:`controlling things with tags`.
"""
allowed_chars = u"._-=:" # In addition to aplha-numerical chars.
quoting_chars = ("'", '"', "<", ">")
def __new__(cls, name, line):
o = six.text_type.__new__(cls, name)
o.line = line
return o
@classmethod
def make_name(cls, text, unescape=False, allowed_chars=None):
"""Translate text into a "valid tag" without whitespace, etc.
Translation rules are:
* alnum chars => same, kept
* space chars => "_"
* other chars => deleted
Preserve following characters (in addition to alnums, like: A-z, 0-9):
* dots => "." (support: dotted-names, active-tag name schema)
* minus => "-" (support: dashed-names)
* underscore => "_"
* equal => "=" (support: active-tag name schema)
* colon => ":" (support: active-tag name schema or similar)
:param text: Unicode text as input for name.
:param unescape: Optional flag to unescape some chars (default: false)
:param allowed_chars: Optional string with additional preserved chars.
:return: Unicode name that can be used as tag.
"""
assert isinstance(text, six.text_type)
if allowed_chars is None:
allowed_chars = cls.allowed_chars
if unescape:
# -- UNESCAPE: Some escaped sequences
text = text.replace("\\t", "\t").replace("\\n", "\n")
chars = []
for char in text:
if char.isalnum() or (allowed_chars and char in allowed_chars):
chars.append(char)
elif char.isspace():
chars.append(u"_")
elif char in cls.quoting_chars:
pass # -- NORMALIZE: Remove any quoting chars.
# -- MAYBE:
# else:
# # -- OTHERWISE: Accept gracefully any other character.
# chars.append(char)
return u"".join(chars)
class Text(six.text_type):
"""Store multiline text from a Step definition.
The attributes are:
.. attribute:: value
The actual text parsed from the *feature file*.
.. attribute:: content_type
Currently only "text/plain".
"""
def __new__(cls, value, content_type=u"text/plain", line=0):
assert isinstance(value, six.text_type)
assert isinstance(content_type, six.text_type)
o = six.text_type.__new__(cls, value)
o.content_type = content_type
o.line = line
return o
def line_range(self):
line_count = len(self.splitlines())
return (self.line, self.line + line_count + 1)
def replace(self, old, new, count=-1):
return Text(super(Text, self).replace(old, new, count), self.content_type,
self.line)
def assert_equals(self, expected):
"""Assert that my text is identical to the "expected" text.
A nice context diff will be displayed if they do not match.
"""
if self == expected:
return True
diff = []
for line in difflib.unified_diff(self.splitlines(),
expected.splitlines()):
diff.append(line)
# strip unnecessary diff prefix
diff = ["Text does not match:"] + diff[3:]
raise AssertionError("\n".join(diff))
# -----------------------------------------------------------------------------
# UTILITY FUNCTIONS:
# -----------------------------------------------------------------------------
def reset_model(model_elements):
"""Reset the test run information stored in model elements.
:param model_elements: List of model elements (Feature, Scenario, ...)
"""
for model_element in model_elements:
model_element.reset()
| bsd-2-clause | 355,775,774,216,674,050 | 33.815728 | 82 | 0.587112 | false | 4.437911 | true | false | false |
bccBalloon/HiBalloon | extTemp_gopro.py | 1 | 1385 | #Written for the 135-102DAG-J01 Thermistor
import Adafruit_BBIO.ADC as ADC
import time
import math as mt
import goprohero as gp
ADC.setup()
camera = gp.GoProHero('10.5.5.9', 'goprohero316')
cameraOn = False
recording = False
#See June 4 comment on http://ealmberg.blogspot.com/2015/06/4-june-15.html
Bvalue = 3348 #Beta
Ro = 1000 #Resistance at 25 C
To = 298.15 #Room temperature Kelvin
while camera :
adcValue = ADC.read("P9_35")
R = 1000/((1/adcValue) - 1) #Get measured resistance
T = 1.0/To + (1.0/Bvalue)*mt.log(R/Ro) #Formula from above blogspot address
T_K = 1.0/T
if T_K > 301 and not cameraOn :
cameraOn = camera.command('power', 'on')
time.sleep(5)
print str(T_K) + ' K'
t_c = 1.0/T - 273.15 #Convert to celsius
print str(t_c) + ' C'
t_f = t_c*(9/5.0) + 32.0 #Convert to Fahrenheit
print str(t_f)
if T_K > 301 and cameraOn and not recording :
recording = camera.command('record','on')
time.sleep(5)
<<<<<<< HEAD
elif T_K <= 301 and cameraOn and recording :
recording = not camera.command('record', 'off')
=======
elif T_K <= 301 and cameraOn and recording :
recording = not camera.command('record', 'off')
>>>>>>> b9c8e0014ec6dd4e5a6378c7034a8ec6642549ab
time.sleep(5)
cameraOn = not camera.command('power', 'sleep')
time.sleep(5)
time.sleep(1)
| gpl-2.0 | 8,119,238,455,491,777,000 | 29.777778 | 79 | 0.631769 | false | 2.694553 | false | false | false |
ooici/coi-services | ion/util/pydap/handlers/coverage/coverage_handler.py | 1 | 18170 | import re
import os
import numpy as np
import numexpr as ne
from urllib import unquote
from pyon.util.log import log
from email.utils import formatdate
from stat import ST_MTIME
from coverage_model.coverage import AbstractCoverage
from coverage_model.parameter_types import QuantityType,ConstantRangeType,ArrayType, ConstantType, RecordType
from coverage_model.parameter_types import CategoryType, BooleanType, ParameterFunctionType, SparseConstantType
from coverage_model.parameter_functions import ParameterFunctionException
from pyon.container.cc import Container
from ion.services.dm.inventory.dataset_management_service import DatasetManagementService
from pydap.model import DatasetType,BaseType, GridType, SequenceType
from pydap.handlers.lib import BaseHandler
from pyon.public import CFG, PRED
import time
import simplejson as json
import collections
import functools
numpy_boolean = '?'
numpy_integer_types = 'bhilqp'
numpy_uinteger_types = 'BHILQP'
numpy_floats = 'efdg'
numpy_complex = 'FDG'
numpy_object = 'O'
numpy_str = 'SUV'
def exception_wrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
log.exception("Failed handling PyDAP request")
raise
return wrapper
def request_profile(enabled=False):
def profile(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
init = time.time()
retval = func(*args, **kwargs)
finished = time.time()
log.info('Request took %ss', finished-init)
return retval
return wrapper
return profile
class Handler(BaseHandler):
CACHE_LIMIT = CFG.get_safe('server.pydap.cache_limit', 5)
CACHE_EXPIRATION = CFG.get_safe('server.pydap.cache_expiration', 5)
REQUEST_LIMIT = CFG.get_safe('server.pydap.request_limit', 200) # MB
_coverages = collections.OrderedDict() # Cache has to be a class var because each handler is initialized per request
extensions = re.compile(r'^.*[0-9A-Za-z\-]{32}',re.IGNORECASE)
def __init__(self, filepath):
self.filepath = filepath
def calculate_bytes(self, bitmask, parameter_num):
timesteps = np.sum(bitmask)
# Assume 8 bytes per variable per timestep
count = 8 * parameter_num * timesteps
return count
def is_too_large(self, bitmask, parameter_num):
requested = self.calculate_bytes(bitmask, parameter_num)
return requested > (self.REQUEST_LIMIT * 1024**2)
def get_numpy_type(self, data):
data = self.none_to_str(data)
result = data.dtype.char
if self.is_basestring(data):
result = 'S'
elif self.is_float(data):
result = 'd'
elif self.is_int(data):
result = 'i'
elif result == 'O':
self.json_dump(data)
result = 'O'
elif result == '?':
result = '?'
elif result not in ('d','f','h','i','b','H','I','B','S'):
raise TypeNotSupportedError('Type: %s (%s)' %(result, repr(data)))
return result
def json_dump(self, data):
try:
return json.dumps([i for i in data])
except TypeError as e:
raise TypeNotSupportedError(e)
@classmethod
def get_coverage(cls, data_product_id):
'''
Memoization (LRU) of _get_coverage
'''
if not data_product_id:
return
try:
result, ts = cls._coverages.pop(data_product_id)
if (time.time() - ts) > cls.CACHE_EXPIRATION:
result.close()
raise KeyError(data_product_id)
except KeyError:
if data_product_id is None:
return None
resource_registry = Container.instance.resource_registry
dataset_ids, _ = resource_registry.find_objects(data_product_id, PRED.hasDataset, id_only=True)
if not dataset_ids: return None
dataset_id = dataset_ids[0]
result = DatasetManagementService._get_coverage(dataset_id, mode='r')
result.value_caching = False
ts = time.time()
if result is None:
return None
if len(cls._coverages) >= cls.CACHE_LIMIT:
key, value = cls._coverages.popitem(0)
coverage, ts = value
coverage.close(timeout=5)
cls._coverages[dataset_id] = result, ts
return result
def get_attrs(self, cov, name):
pc = cov.get_parameter_context(name)
attrs = {}
if hasattr(pc,'uom'):
attrs['units'] = pc.uom
if hasattr(pc,'display_name'):
attrs['long_name'] = pc.display_name
return attrs
def get_data(self,cov, name, bitmask):
#pc = cov.get_parameter_context(name)
try:
data = self.get_values(cov, name)
data[bitmask]
#data = cov._range_value[name][:][bitmask]
except ParameterFunctionException:
data = np.empty(cov.num_timesteps(), dtype='object')
data = np.asanyarray(data)
if not data.shape:
data.shape = (1,)
return data
def get_time_data(self, cov, slice_):
return self.get_data(cov, cov.temporal_parameter_name, slice_)
def make_series(self, response, name, data, attrs, ttype):
base_type = BaseType(name=name, data=data, type=ttype, attributes=attrs)
#grid[dims[0]] = BaseType(name=dims[0], data=time_data, type=time_data.dtype.char, attributes=time_attrs, dimensions=dims, shape=time_data.shape)
return base_type
def filter_data(self, data):
if len(data.shape) > 1:
return self.ndim_stringify(data), 'S'
if data.dtype.char in numpy_integer_types + numpy_uinteger_types:
return data, data.dtype.char
if data.dtype.char in numpy_floats:
return data, data.dtype.char
if data.dtype.char in numpy_boolean:
return np.asanyarray(data, dtype='int32') ,'i'
if data.dtype.char in numpy_complex:
return self.stringify(data), 'S'
if data.dtype.char in numpy_object:
return self.stringify_inplace(data), 'S'
if data.dtype.char in numpy_str:
return data, 'S'
return np.asanyarray(['Unsupported Type' for i in data]), 'S'
def ndim_stringify(self, data):
retval = np.empty(data.shape[0], dtype='O')
try:
if len(data.shape)>1:
for i in xrange(data.shape[0]):
retval[i] = ','.join(map(lambda x : str(x), data[i].tolist()))
return retval
except:
retval = np.asanyarray(['None' for d in data])
return retval
def stringify(self, data):
retval = np.empty(data.shape, dtype='O')
try:
for i,obj in enumerate(data):
retval[i] = str(obj)
except:
retval = np.asanyarray(['None' for d in data])
return retval
def stringify_inplace(self, data):
try:
for i,obj in enumerate(data):
data[i] = str(obj)
except:
data = np.asanyarray(['None' for d in data])
return data
def get_bitmask(self, cov, fields, slices, selectors):
'''
returns a bitmask appropriate to the values
'''
bitmask = np.ones(cov.num_timesteps(), dtype=np.bool)
for selector in selectors:
field, operator, value = self.parse_selectors(selector)
if operator is None:
continue
values = self.get_values(cov, field)
expression = ' '.join(['values', operator, value])
bitmask = bitmask & ne.evaluate(expression)
return bitmask
def get_values(self, cov, field):
data_dict = cov.get_parameter_values(param_names=[field], fill_empty_params=True, as_record_array=False).get_data()
data = data_dict[field]
return data
def get_dataset(self, cov, fields, slices, selectors, dataset, response):
seq = SequenceType('data')
bitmask = self.get_bitmask(cov, fields, slices, selectors)
if self.is_too_large(bitmask, len(fields)):
log.error('Client request too large. \nFields: %s\nSelectors: %s', fields, selectors)
return
for name in fields:
# Strip the data. from the field
if name.startswith('data.'):
name = name[5:]
pc = cov.get_parameter_context(name)
if re.match(r'.*_[a-z0-9]{32}', name):
continue # Let's not do this
try:
data = self.get_data(cov, name, bitmask)
attrs = self.get_attrs(cov, name)
if isinstance(pc.param_type, QuantityType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type, ConstantType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type, ConstantRangeType):
#start = time.time()
#convert to string
try:
#scalar case
if data.shape == (2,):
data = np.atleast_1d('_'.join([str(data[0]), str(data[1])]))
else:
for i,d in enumerate(data):
f = [str(d[0]),str(d[1])]
data[i] = '_'.join(f)
except Exception, e:
data = np.asanyarray(['None' for d in data])
seq[name] = self.make_series(response, name, data, attrs, 'S')
elif isinstance(pc.param_type,BooleanType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type,CategoryType):
data, dtype = self.filter_data(data)
#start = time.time()
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type,ArrayType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type,RecordType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type, ParameterFunctionType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type, SparseConstantType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
#dataset[name] = self.make_series(response, name, data, attrs, dtype)
# elif param.is_coordinate and cov.temporal_parameter_name == name:
# dataset[name] = BaseType(name=name, data=data, type=data.dtype.char, attributes=attrs, shape=data.shape)
# else:
# log.error("Unhandled parameter for parameter (%s) type: %s", name, pc.param_type.__class__.__name__)
except Exception, e:
log.exception('Problem reading cov %s %s', cov.name, e.__class__.__name__)
continue
dataset['data'] = seq
return dataset
def value_encoding_to_dap_type(self, value_encoding):
if value_encoding is None:
return 'S'
dt = np.dtype(value_encoding).char
if dt =='O':
return 'S'
return dt
def dap_type(self, context):
if isinstance(context.param_type, (ArrayType, ConstantRangeType, CategoryType, RecordType)):
return 'S'
return self.value_encoding_to_dap_type(context.param_type.value_encoding)
def handle_dds(self, coverage, dataset, fields):
cov = coverage
seq = SequenceType('data')
for name in fields:
# Strip the data. from the field
if name.startswith('data.'):
name = name[5:]
if re.match(r'.*_[a-z0-9]{32}', name):
continue # Let's not do this
try:
context = coverage.get_parameter_context(name)
attrs = self.get_attrs(cov, name)
#grid[name] = BaseType(name=name, type=self.dap_type(context), attributes=attrs, dimensions=(time_name,), shape=(coverage.num_timesteps,))
seq[name] = BaseType(name=name, type=self.dap_type(context), attributes=attrs, shape=(coverage.num_timesteps(),))
#grid[cov.temporal_parameter_name] = time_base
except Exception:
log.exception('Problem reading cov %s', str(cov))
continue
dataset['data'] = seq
return dataset
def parse_query_string(self,query_string):
tokens = query_string.split('&')
fields = []
selectors = []
slices = []
dap_selection_operators = ['<', '<=', '>', '>=', '=', '!=', '=~']
slice_operators = ['[', ':', ']']
for token in tokens:
token = unquote(token)
if not token: # ignore the case where the url ends in nothing or a &
continue
token_identified = False
for selector in dap_selection_operators:
if selector in token:
selectors.append(token)
token_identified = True
break
for operator in slice_operators:
if operator in token:
slices.append(token)
token_identified = True
break
if not token_identified:
fields = token.split(',')
return fields, slices, selectors
def parse_slices(self,slice_operator):
pivot = slice_operator.find('[')
field = slice_operator[:pivot]
slicer = slice_operator[pivot:]
# Strip away the outer []s
slicer = slicer[1:-1]
# Separte the slice into tokens separated by :
start,stride,stop = slicer.split(':')
start = int(start)
stride = int(stride)
stop = int(stop)+1
slice_ = slice(start,stride,stop)
return field, slice_
def parse_selectors(self, selector):
matches = re.match(r'([a-zA-Z0-9-_\.]+)(<=|<|>=|>|=~|!=|=)(.*)', selector)
field, operator, value = matches.groups()
value = value.replace('"','')
value = value.replace("'",'')
if 'data.' in field: # strip away the data prefix
field = field[5:]
if operator is '=':
operator = '=='
elif operator is '=~':
return None,None,None
return field, operator, value
@request_profile(CFG.get_safe('server.pydap.profile_enabled', True))
@exception_wrapper
def parse_constraints(self, environ):
base, data_product_id = os.path.split(self.filepath)
coverage = self.get_coverage(data_product_id)
last_modified = formatdate(time.mktime(time.localtime(os.stat(self.filepath)[ST_MTIME])))
environ['pydap.headers'].append(('Last-modified', last_modified))
atts = {}
atts['title'] = coverage.name
dataset = DatasetType(coverage.name) #, attributes=atts)
response = environ['pydap.response']
if response == 'dods':
query_string = environ['QUERY_STRING']
fields, slices, selectors = self.parse_query_string(query_string)
elif response in ('dds', 'das'):
fields = [] # All fields
slices = []
selectors = []
all_vars = coverage.list_parameters()
if not fields:
fields = all_vars
if response == "dods":
dataset = self.get_dataset(coverage, fields, slices, selectors, dataset, response)
elif response in ('dds', 'das'):
self.handle_dds(coverage, dataset, fields)
return dataset
def none_to_str(self, data):
for i,d in enumerate(data):
if d is None:
data[i] = 'None'
return data
def is_basestring(self, data):
for d in data:
if not isinstance(d, basestring):
return False
return True
def is_float(self, data):
for d in data:
if not isinstance(d, float):
return False
return True
def is_int(self, data):
for d in data:
if not isinstance(d, int):
return False
return True
def is_collection(self, data):
for d in data:
if not isinstance(d, (list,tuple,np.ndarray)):
return False
return True
def update_slice_object(self, slice_, fill_index):
slice_ = slice_[0] if slice_ else slice(None)
#need to truncate slice here in case time has fill values
if slice_.start is None and slice_.stop is None and fill_index >=0:
slice_ = slice(0, fill_index, 1)
if slice_.start and slice_.stop is None:
if fill_index > slice_.start:
return None
if fill_index > slice_.stop:
slice_.stop = fill_index
if slice_.start is not None and slice_.start == slice_.stop:
slice_ = slice(slice_.start, slice_.stop+1, slice_.step)
return slice_
class TypeNotSupportedError(Exception):
pass
| bsd-2-clause | -661,066,451,977,343,600 | 36.933194 | 154 | 0.557567 | false | 4.063968 | false | false | false |
bitesofcode/projexui | projexui/widgets/xquerybuilderwidget/xquerybuilderwidget.py | 2 | 9247 | #!/usr/bin/python
""" Defines an interface to allow users to build their queries on the fly. """
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintanence information
__maintainer__ = 'Projex Software'
__email__ = 'team@projexsoftware.com'
#------------------------------------------------------------------------------
from projex.text import nativestring
from projexui.qt import Signal
from projexui.qt.QtCore import Qt
from projexui.qt.QtGui import QWidget,\
QVBoxLayout
import projexui
from projexui.widgets.xquerybuilderwidget.xqueryrule \
import XQueryRule
from projexui.widgets.xquerybuilderwidget.xquerylinewidget \
import XQueryLineWidget
class XQueryBuilderWidget(QWidget):
""" """
saveRequested = Signal()
resetRequested = Signal()
cancelRequested = Signal()
def __init__( self, parent = None ):
super(XQueryBuilderWidget, self).__init__( parent )
# load the user interface
projexui.loadUi(__file__, self)
self.setMinimumWidth(470)
# define custom properties
self._rules = {}
self._defaultQuery = []
self._completionTerms = []
self._minimumCount = 1
# set default properties
self._container = QWidget(self)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(2)
layout.addStretch(1)
self._container.setLayout(layout)
self.uiQueryAREA.setWidget(self._container)
# create connections
self.uiResetBTN.clicked.connect( self.emitResetRequested )
self.uiSaveBTN.clicked.connect( self.emitSaveRequested )
self.uiCancelBTN.clicked.connect( self.emitCancelRequested )
self.resetRequested.connect( self.reset )
def addLineWidget( self, query = None ):
"""
Adds a new line widget to the system with the given values.
:param query | (<str> term, <str> operator, <str> vlaue) || None
"""
widget = XQueryLineWidget(self)
widget.setTerms(sorted(self._rules.keys()))
widget.setQuery(query)
index = self._container.layout().count() - 1
self._container.layout().insertWidget(index, widget)
widget.addRequested.connect( self.addLineWidget )
widget.removeRequested.connect( self.removeLineWidget )
# update the remove enabled options for these widgets
self.updateRemoveEnabled()
def addRule( self, rule ):
"""
Adds a rule to the system.
:param rule | <XQueryRule>
"""
self._rules[rule.term()] = rule
self.updateRules()
def clear( self ):
"""
Clears out all the widgets from the system.
"""
for lineWidget in self.lineWidgets():
lineWidget.setParent(None)
lineWidget.deleteLater()
def completionTerms( self ):
"""
Returns the list of terms that will be used as a global override
for completion terms when the query rule generates a QLineEdit instance.
:return [<str>, ..]
"""
return self._completionTerms
def count( self ):
"""
Returns the count of the line widgets in the system.
:return <int>
"""
return len(self.lineWidgets())
def currentQuery( self ):
"""
Returns the current query string for this widget.
:return [(<str> term, <str> operator, <str> value), ..]
"""
widgets = self.lineWidgets()
output = []
for widget in widgets:
output.append(widget.query())
return output
def defaultQuery( self ):
"""
Returns the default query for the system.
:return [(<str> term, <str> operator, <str> value), ..]
"""
return self._defaultQuery
def keyPressEvent( self, event ):
"""
Emits the save requested signal for this builder for when the enter
or return press is clicked.
:param event | <QKeyEvent>
"""
if ( event.key() in (Qt.Key_Enter, Qt.Key_Return) ):
self.emitSaveRequested()
super(XQueryBuilderWidget, self).keyPressEvent(event)
def emitCancelRequested( self ):
"""
Emits the cancel requested signal.
"""
if ( not self.signalsBlocked() ):
self.cancelRequested.emit()
def emitResetRequested( self ):
"""
Emits the reste requested signal.
"""
if ( not self.signalsBlocked() ):
self.resetRequested.emit()
def emitSaveRequested( self ):
"""
Emits the save requested signal.
"""
if ( not self.signalsBlocked() ):
self.saveRequested.emit()
def findRule( self, term ):
"""
Looks up a rule by the inputed term.
:param term | <str>
:return <XQueryRule> || None
"""
return self._rules.get(nativestring(term))
def removeLineWidget( self, widget ):
"""
Removes the line widget from the query.
:param widget | <XQueryLineWidget>
"""
widget.setParent(None)
widget.deleteLater()
self.updateRemoveEnabled()
def minimumCount( self ):
"""
Defines the minimum number of query widgets that are allowed.
:return <int>
"""
return self._minimumCount
def lineWidgets( self ):
"""
Returns a list of line widgets for this system.
:return [<XQueryLineWidget>, ..]
"""
return self.findChildren(XQueryLineWidget)
def reset( self ):
"""
Resets the system to the default query.
"""
self.setCurrentQuery(self.defaultQuery())
def setCompletionTerms( self, terms ):
"""
Sets the list of terms that will be used as a global override
for completion terms when the query rule generates a QLineEdit instance.
:param terms | [<str>, ..]
"""
self._completionTerms = terms
def setCurrentQuery( self, query ):
"""
Sets the query for this system to the inputed query.
:param query | [(<str> term, <str> operator, <str> value), ..]
"""
self.clear()
for entry in query:
self.addLineWidget(entry)
# make sure we have the minimum number of widgets
for i in range(self.minimumCount() - len(query)):
self.addLineWidget()
def setDefaultQuery( self, query ):
"""
Sets the default query that will be used when the user clicks on the \
reset button or the reset method is called.
:param query | [(<str> term, <str> operator, <str> value), ..]
"""
self._defaultQuery = query[:]
def setMinimumCount( self, count ):
"""
Sets the minimum number of line widgets that are allowed at any \
given time.
:param count | <int>
"""
self._minimumCount = count
def setRules( self, rules ):
"""
Sets all the rules for this builder.
:param rules | [<XQueryRule>, ..]
"""
if ( type(rules) in (list, tuple) ):
self._rules = dict([(x.term(), x) for x in rules])
self.updateRules()
return True
elif ( type(rules) == dict ):
self._rules = rules.copy()
self.updateRules()
return True
else:
return False
def setTerms( self, terms ):
"""
Sets a simple rule list by accepting a list of strings for terms. \
This is a convenience method for the setRules method.
:param rules | [<str> term, ..]
"""
return self.setRules([XQueryRule(term = term) for term in terms])
def updateRemoveEnabled( self ):
"""
Updates the remove enabled baesd on the current number of line widgets.
"""
lineWidgets = self.lineWidgets()
count = len(lineWidgets)
state = self.minimumCount() < count
for widget in lineWidgets:
widget.setRemoveEnabled(state)
def updateRules( self ):
"""
Updates the query line items to match the latest rule options.
"""
terms = sorted(self._rules.keys())
for child in self.lineWidgets():
child.setTerms(terms) | lgpl-3.0 | 5,430,064,327,202,641,000 | 29.153595 | 80 | 0.533254 | false | 4.575458 | false | false | false |
kubeflow/kfp-tekton | sdk/python/tests/compiler/testdata/basic_no_decorator.py | 1 | 3734 | # Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
# import kfp.gcp as gcp
message_param = dsl.PipelineParam(name='message', value='When flies fly behind flies')
output_path_param = dsl.PipelineParam(name='outputpath', value='default_output')
class GetFrequentWordOp(dsl.ContainerOp):
"""A get frequent word class representing a component in ML Pipelines.
The class provides a nice interface to users by hiding details such as container,
command, arguments.
"""
def __init__(self, name, message="When flies fly behind flies,"
" then flies are following flies."):
"""__init__
Args:
name: An identifier of the step which needs to be unique within a pipeline.
message: a dsl.PipelineParam object representing an input message.
"""
super(GetFrequentWordOp, self).__init__(
name=name,
image='python:3.6-jessie',
command=['sh', '-c'],
arguments=['python -c "from collections import Counter; '
'words = Counter(\'%s\'.split()); print(max(words, key=words.get))" '
'| tee /tmp/message.txt' % message],
file_outputs={'word': '/tmp/message.txt'})
class SaveMessageOp(dsl.ContainerOp):
"""A class representing a component in ML Pipelines.
It saves a message to a given output_path.
"""
def __init__(self, name, message, output_path):
"""Args:
name: An identifier of the step which needs to be unique within a pipeline.
message: a dsl.PipelineParam object representing the message to be saved.
output_path: a dsl.PipelineParam object representing the GCS path for output file.
"""
super(SaveMessageOp, self).__init__(
name=name,
image='google/cloud-sdk',
command=['sh', '-c'],
arguments=['echo "%s" | tee /tmp/results.txt | gsutil cp /tmp/results.txt %s'
% (message, output_path)])
class ExitHandlerOp(dsl.ContainerOp):
"""A class representing a component in ML Pipelines."""
def __init__(self, name):
super(ExitHandlerOp, self).__init__(
name=name,
image='python:3.6-jessie',
command=['sh', '-c'],
arguments=['echo exit!'])
def save_most_frequent_word():
exit_op = ExitHandlerOp('exiting')
with dsl.ExitHandler(exit_op):
counter = GetFrequentWordOp(
name='get-Frequent',
message=message_param)
counter.container.set_memory_request('200M')
saver = SaveMessageOp(
name='save',
message=counter.output,
output_path=output_path_param)
saver.container.set_cpu_limit('0.5')
# saver.container.set_gpu_limit('2')
saver.add_node_selector_constraint('kubernetes.io/os', 'linux')
# saver.apply(gcp.use_tpu(tpu_cores=2, tpu_resource='v2', tf_version='1.12'))
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
tkc = TektonCompiler()
compiled_workflow = tkc._create_workflow(
save_most_frequent_word,
'Save Most Frequent Word',
'Get Most Frequent Word and Save to GCS',
[message_param, output_path_param],
None)
tkc._write_workflow(compiled_workflow, __file__.replace('.py', '.yaml'))
| apache-2.0 | 7,977,434,732,202,649,000 | 36.34 | 91 | 0.657204 | false | 3.69703 | false | false | false |
lowiki-org/localwiki-backend-server | localwiki/utils/views.py | 1 | 12193 | import time
from django.utils.decorators import classonlymethod
from django.conf import settings
from django.http import HttpResponse, Http404, HttpResponseForbidden, HttpResponseServerError
from django.utils import simplejson as json
from django.utils.cache import patch_response_headers
from django.utils.decorators import method_decorator
from django.views.decorators.vary import vary_on_headers as dj_vary_on_headers
from django.views.decorators.cache import never_cache
from django.views.generic import View, RedirectView, TemplateView
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.views.decorators.csrf import requires_csrf_token
from django.template import (Context, loader, TemplateDoesNotExist)
from django.utils.cache import get_max_age
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.template.context import RequestContext
from versionutils.versioning.views import RevertView, DeleteView
from . import take_n_from
# 29 days, effectively infinite in cache years
# XXX NOTE: For some reason, the memcached client we're using
# gives a client error when sending timestamp-style expiration
# dates -- e.g. > 30 days timestamps. So, for now we must make
# sure and always use <= 30 day timeouts, which should be fine.
DEFAULT_MEMCACHED_TIMEOUT = 60 * 60 * 24 * 29
class ForbiddenException:
pass
class NeverCacheMixin(object):
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(NeverCacheMixin, self).dispatch(*args, **kwargs)
class CacheMixin(object):
cache_timeout = DEFAULT_MEMCACHED_TIMEOUT
cache_keep_forever = False
@staticmethod
def get_cache_key(request=None, **kwargs):
raise NotImplementedError
def _should_cache(self, request, response):
if response.streaming or response.status_code != 200:
return False
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return False
if get_max_age(response) == 0:
return False
return True
def _get_from_cache(self, method, request, *args, **kwargs):
key = self.get_cache_key(request=request, **kwargs)
response = cache.get(key)
if response is None:
response = getattr(super(CacheMixin, self), method)(request, *args, **kwargs)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: cache.set(key, r, self.cache_timeout)
)
else:
cache.set(key, response, self.cache_timeout)
if self._should_cache(request, response):
# Mark to keep around in Varnish and other cache layers
if self.cache_keep_forever:
response['X-KEEPME'] = True
patch_response_headers(response, self.cache_timeout)
return response
@staticmethod
def invalidate(request, **kwargs):
key = CacheMixin.get_cache_key(request=request, **kwargs)
cache.delete(key)
def get(self, request, *args, **kwargs):
return self._get_from_cache('get', request, *args, **kwargs)
def head(self, request, *args, **kwargs):
return self._get_from_cache('head', request, *args, **kwargs)
@classmethod
def get_region_slug_param(*args, **kwargs):
from regions.models import RegionSettings
if kwargs.get('region'):
return kwargs.get('region')
if not kwargs.get('request'):
raise KeyError("Need either `request` or a `region` parameter.")
request = kwargs.get('request')
return RegionSettings.objects.get(domain=request.META['HTTP_HOST']).region.slug
class Custom404Mixin(object):
@classonlymethod
def as_view(cls, **initargs):
default_view = super(Custom404Mixin, cls).as_view(**initargs)
def view_or_handler404(request, *args, **kwargs):
self = cls(**initargs)
try:
return default_view(request, *args, **kwargs)
except Http404 as e:
if hasattr(self, 'handler404'):
return self.handler404(request, *args, **kwargs)
raise e
return view_or_handler404
class CreateObjectMixin(object):
def create_object(self):
self.form_class._meta.model()
def get_object(self, queryset=None):
try:
return super(CreateObjectMixin, self).get_object(queryset)
except Http404:
return self.create_object()
class JSONResponseMixin(object):
def render_to_response(self, context):
"Returns a JSON response containing 'context' as payload"
return self.get_json_response(self.convert_context_to_json(context))
def get_json_response(self, content, **httpresponse_kwargs):
"Construct an `HttpResponse` object."
return HttpResponse(content, content_type='application/json',
**httpresponse_kwargs)
def convert_context_to_json(self, context):
"""
Convert the context dictionary into a JSON object.
Note: Make sure that the entire context dictionary is serializable
"""
return json.dumps(context)
class JSONView(View, JSONResponseMixin):
"""
A JSONView returns, on GET, a json dictionary containing the values of
get_context_data().
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class PermissionRequiredMixin(object):
"""
View mixin for verifying permissions before updating an existing object
Attrs:
permission: A string representing the permission that's required
on the object. E.g. 'page.change_page'. Override
permission_for_object() to allow more complex permission
relationships.
forbidden_message: A string to display when the permisson is not
allowed.
"""
permission = None
forbidden_message = _('Sorry, you are not allowed to perform this action.')
forbidden_message_anon = _('Anonymous users may not perform this action. '
'Please <a href="/Users/login/">log in</a>.')
def get_protected_object(self):
"""
Returns the object that should be used to check permissions.
Override this to use a different object as the "guard".
"""
return self.object
def get_protected_objects(self):
"""
Returns the objects that should be used to check permissions.
"""
return [self.get_protected_object()]
def permission_for_object(self, obj):
"""
Gets the permission that's required for `obj`.
Override this to allow more complex permission relationships.
"""
return self.permission
def get_object_idempotent(self):
return self.object
def patch_get_object(self):
# Since get_object will get called again, we want it to be idempotent
self.get_object = self.get_object_idempotent
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
if hasattr(self, 'get_object'):
self.object = self.get_object()
self.patch_get_object()
protected_objects = self.get_protected_objects()
for obj in protected_objects:
if not request.user.has_perm(self.permission_for_object(obj), obj):
if request.user.is_authenticated():
msg = self.forbidden_message
else:
msg = self.forbidden_message_anon
html = render_to_string('403.html', {'message': msg},
RequestContext(request))
return HttpResponseForbidden(html)
return super(PermissionRequiredMixin, self).dispatch(request, *args,
**kwargs)
class NamedRedirectView(RedirectView):
name = None
def get_redirect_url(self, **kwargs):
return reverse(self.name, kwargs=kwargs)
class AuthenticationRequired(object):
"""
Mixin to make a view only usable to authenticated users.
"""
forbidden_message = _('Sorry, you are not allowed to perform this action.')
forbidden_template_name = '403.html'
def get_forbidden_message(self):
return self.forbidden_message
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
if self.request.user.is_authenticated():
return super(AuthenticationRequired, self).dispatch(request, *args, **kwargs)
msg = self.get_forbidden_message()
html = render_to_string(self.forbidden_template_name, {'message': msg}, RequestContext(request))
return HttpResponseForbidden(html)
class GetCSRFCookieView(TemplateView):
template_name = 'utils/get_csrf_cookie.html'
class MultipleTypesPaginatedView(TemplateView):
items_per_page = 50
context_object_name = 'objects'
def get_object_lists(self):
raise NotImplementedError
def get_pagination_key(self, qs):
"""
Args:
qs: The queryset or iterable we want to get the querystring lookup key for.
Returns:
The querystring lookup. By default, this is `qs.model.__name__.lower()`
"""
return qs.model.__name__.lower()
def get_pagination_merge_key(self):
"""
Returns:
A callable that, when called, returns the value to use for the merge +
sort. Default: no further sorting (stay in place).
"""
return None
def get_pagination_objects(self):
items_with_indexes = []
id_to_page_key = {}
for (_id, qs) in enumerate(self.get_object_lists()):
pagination_key = self.get_pagination_key(qs)
page = int(self.request.GET.get(pagination_key, 0))
items_with_indexes.append((qs, page))
id_to_page_key[_id] = pagination_key
items, indexes, has_more_left = take_n_from(
items_with_indexes,
self.items_per_page,
merge_key=self.get_pagination_merge_key()
)
self.has_more_left = has_more_left
self.current_indexes = {}
for (num, index) in enumerate(indexes):
self.current_indexes[id_to_page_key[num]] = index
return items
def get_context_data(self, *args, **kwargs):
c = super(MultipleTypesPaginatedView, self).get_context_data(*args, **kwargs)
c[self.context_object_name] = self.get_pagination_objects()
c['pagination_has_more_left'] = self.has_more_left
c['pagination_next'] = ''
if self.has_more_left:
qitems = []
for pagelabel, index in self.current_indexes.items():
qitems.append('%s=%s' % (pagelabel, index))
c['pagination_next'] = '?' + '&'.join(qitems)
return c
class RevertView(RevertView):
def allow_admin_actions(self):
return self.request.user.is_staff
class DeleteView(DeleteView):
def allow_admin_actions(self):
return self.request.user.is_staff
@requires_csrf_token
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: :template:`500.html`
Context: Contains {{ STATIC_URL }} and {{ LANGUAGE_CODE }}
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return HttpResponseServerError('<h1>Server Error (500)</h1>')
return HttpResponseServerError(template.render(Context({
'STATIC_URL': settings.STATIC_URL,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
})))
| gpl-2.0 | -5,861,725,121,537,733,000 | 33.541076 | 104 | 0.637989 | false | 4.224879 | false | false | false |
nandoflorestan/poorbox | setup.py | 1 | 2232 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# http://peak.telecommunity.com/DevCenter/setuptools#developer-s-guide
# from distutils.core import setup
from setuptools import setup, find_packages
def read_text(filename, dir=None):
import codecs
import os
if dir is None:
dir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(dir, filename)
with codecs.open(filename, 'r', encoding='utf-8') as f:
return f.read()
setup(
url='https://github.com/nandoflorestan/python-dropbox-backup',
name="poorbox",
version='0.1',
author='Nando Florestan',
author_email="nandoflorestan@gmail.com",
license='BSD',
description="Downloads a dropbox directory via the dropbox REST API. "
"Downloads only the changed files. Useful for limited environments.",
long_description=read_text('README.rst'),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='tests',
install_requires=['six', 'dropbox>=1.5.1'],
keywords=['dropbox', "python", 'REST', 'API', 'download', 'console'],
classifiers=[ # http://pypi.python.org/pypi?:action=list_classifiers
"Development Status :: 4 - Beta",
# "Development Status :: 5 - Production/Stable",
'Environment :: Console',
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: System Administrators",
'License :: OSI Approved :: BSD License',
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Communications :: File Sharing",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Archiving :: Mirroring",
"Topic :: System :: Software Distribution",
"Topic :: System :: Systems Administration",
"Topic :: Utilities",
],
entry_points='''
[console_scripts]
poorbox = poorbox:main
''',
)
| bsd-3-clause | -8,014,234,604,394,212,000 | 36.2 | 77 | 0.627688 | false | 3.97861 | false | false | false |
power12317/weblate | weblate/trans/management/commands/import_project.py | 1 | 4304 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2013 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.core.management.base import BaseCommand, CommandError
from weblate.trans.models import SubProject, Project
from glob import glob
import tempfile
import git
import logging
import os
import re
import fnmatch
logger = logging.getLogger('weblate')
class Command(BaseCommand):
help = 'imports projects with more subprojects'
args = '<project> <gitrepo> <branch> <filemask>'
def get_name(self, maskre, path):
matches = maskre.match(path)
return matches.group(1)
def get_match_regexp(self, filemask):
'''
Prepare regexp for file matching
'''
match = fnmatch.translate(filemask)
match = match.replace('.*.*', '(.*.*)')
return re.compile(match)
def handle(self, *args, **options):
'''
Automatic import of project.
'''
if len(args) != 4:
raise CommandError('Not enough parameters!')
# Read params
prjname, repo, branch, filemask = args
maskre = self.get_match_regexp(filemask)
# Try to get project
try:
project = Project.objects.get(slug=prjname)
except Project.DoesNotExist:
raise CommandError(
'Project %s does not exist, you need to create it first!' % prjname
)
# Do we have correct mask?
if not '**' in filemask:
raise CommandError(
'You need to specify double wildcard for subproject part of the match!'
)
# Create temporary working dir
workdir = tempfile.mkdtemp(dir=project.get_path())
os.chmod(workdir, 0755)
# Initialize git repository
logger.info('Initializing git repository...')
gitrepo = git.Repo.init(workdir)
gitrepo.git.remote('add', 'origin', repo)
logger.info('Fetching remote git repository...')
gitrepo.git.remote('update', 'origin')
gitrepo.git.branch('--track', branch, 'origin/%s' % branch)
logger.info('Updating working copy in git repository...')
gitrepo.git.checkout(branch)
# Find matching files
matches = glob(os.path.join(workdir, filemask))
matches = [f.replace(workdir, '').strip('/') for f in matches]
logger.info('Found %d matching files', len(matches))
# Parse subproject names out of them
names = set()
for match in matches:
names.add(self.get_name(maskre, match))
logger.info('Found %d subprojects', len(names))
# Create first subproject (this one will get full git repo)
name = names.pop()
logger.info('Creating subproject %s as main subproject', name)
# Rename gitrepository to new name
os.rename(
workdir,
os.path.join(project.get_path(), name)
)
SubProject.objects.create(
name=name,
slug=name,
project=project,
repo=repo,
branch=branch,
filemask=filemask.replace('**', name)
)
sharedrepo = 'weblate://%s/%s' % (project.slug, name)
# Create remaining subprojects sharing git repository
for name in names:
logger.info('Creating subproject %s', name)
SubProject.objects.create(
name=name,
slug=name,
project=project,
repo=sharedrepo,
branch=branch,
filemask=filemask.replace('**', name)
)
| gpl-3.0 | 3,533,665,913,927,857,000 | 31.097015 | 87 | 0.608231 | false | 4.216667 | false | false | false |
timole/assortment-planning-self-organizing-map | src/csv2sompack-eur-js.py | 1 | 5522 | #!/usr/bin/python
import re, sys, json
import xml.etree.ElementTree as et
import numpy
def parseColumnNames(csv):
columnNames = re.split(',', csv)
result = []
for columnName in columnNames:
result.append(columnName.replace(' ', '').replace('"', '').strip())
return result
def parseRowData(csv):
values = re.split(',', csv)
result = []
for value in values:
if(value.strip()[0] == '"' and value.strip()[-1] == '"'):
value = value.strip()[1:-1]
result.append(value.strip())
return result
# "SALES_PERIOD_TILL_DAY","SALES_YEAR_WEEK","OUTLET_ID","PERIODICAL_PRODUCT_ID","PRODUCT_ID","DELIVERED","RETURNED","SOLD"
periodId = sys.argv[1]
filename = sys.argv[2]
outputFilename = sys.argv[3]
outputFilename2 = sys.argv[4]
outputFilenameOutletJs = sys.argv[5]
outputFilename3 = sys.argv[6]
f = open(filename, "r")
out = open(outputFilename, "w")
out2 = open(outputFilename2, "w")
outOutletJs = open(outputFilenameOutletJs, "w")
out3 = open(outputFilename3, "w")
print "Period id: " + periodId
print "Input file: " + filename
print "Output file: " + outputFilename
print "JS output file: " + outputFilenameOutletJs
first = True
dataparsed = False
j = {}
data = []
root = {}
data.append(root)
fields = []
rows = []
parsed = 0
outletIdSet = set()
productIdSet = set()
i = 0
maxLines = -1 #100
#DATE_INDEX=0
#YEAR_WEEK_INDEX=1
OUTLET_ID_INDEX=0
#PERIODICAL_PRODUCT_ID_INDEX = 1
#PRODUCT_ID_INDEX = 1
PERIODICAL_PRODUCT_ID_INDEX = 1
#DELIVERED_INDEX = 5
#RETURNED_INDEX = 6
SOLD_INDEX = 2
for line in f:
if first:
columnNames = parseColumnNames(line)
for columnName in columnNames:
print "col: " + columnName
first = False
else:
if i == maxLines:
break
i = i + 1
rowData = parseRowData(line)
outletId = rowData[OUTLET_ID_INDEX]
productId = rowData[PERIODICAL_PRODUCT_ID_INDEX]
sold = float(rowData[SOLD_INDEX])
if outletId not in outletIdSet:
outletIdSet.add(outletId)
if productId not in productIdSet:
productIdSet.add(productId)
if i % 1000 == 0:
sys.stdout.write(".")
sys.stdout.flush()
numOutlets = len(outletIdSet)
numProducts = len(productIdSet)
print
print "outlets: " + str(numOutlets)
print "products: " + str(numProducts)
outletIds = list(outletIdSet)
productIds = list(productIdSet)
m = numpy.zeros( (numProducts, numOutlets), dtype=numpy.float)
for i in range(0, numProducts):
for j in range(0, numOutlets):
m[i, j] = -1
f = open(filename, "r")
first = True
i = 0
for line in f:
if first:
first = False
else:
if i == maxLines:
break
i = i + 1
rowData = parseRowData(line)
outletId = rowData[OUTLET_ID_INDEX]
productId = rowData[PERIODICAL_PRODUCT_ID_INDEX]
sold = float(rowData[SOLD_INDEX])
row = productIds.index(productId)
col = outletIds.index(outletId)
prev = float(m[row, col])
if prev == -1:
prev = 0
m[row, col] = prev + float(sold)
m[row, col] = float(sold)
if i % 1000 == 0:
sys.stdout.write(".")
sys.stdout.flush()
print
print numOutlets
out.write(str(numOutlets) + '\n')
out.write("# ")
for outletId in outletIds:
out.write(str(outletId) + " ")
out.write('\n')
for i in range(0, numProducts):
for j in range(0, numOutlets):
val = m[i,j]
if val == -1:
val = ""
else:
val = str(val)
out.write(val + " ")
out.write(productIds[i])
out.write('\n')
sys.stdout.write(".")
sys.stdout.flush()
print
print numProducts
out2.write(str(numProducts) + '\n')
out2.write("# ")
isFirstProductId = True
out3.write(";")
for productId in productIds:
if(not isFirstProductId):
out3.write(";")
isFirstProductId = False
out2.write(str(productId) + " ")
out3.write(str(productId))
out2.write('\n')
out3.write('\n')
for i in range(0, numOutlets):
out3.write(outletIds[i])
out3.write(";")
isFirstProductId = True
for j in range(0, numProducts):
val = m[j,i]
if val == -1:
val = ""
else:
val = str(val)
out2.write(val + " ")
if(not isFirstProductId):
out3.write(";")
isFirstProductId = False
out3.write(val)
out2.write(outletIds[i])
out2.write('\n')
out3.write('\n')
sys.stdout.write(".")
sys.stdout.flush()
print
print "Outlet JS"
varName = "outletSalesByPeriod";
outOutletJs.write("var "+varName+" = "+varName+" || {};\n\n");
outOutletJs.write(varName+"[\"" + periodId + "\"] = {};\n");
outOutletJs.write(varName+"[\"" + periodId + "\"].outletIds = ");
json.dump(outletIds, outOutletJs)
outOutletJs.write(";\n");
outOutletJs.write(varName+"[\"" + periodId + "\"].productIds = ");
json.dump(productIds, outOutletJs)
outOutletJs.write(";\n");
outletSales = []
for i in range(0, numOutlets):
outletId = outletIds[i]
productSales = []
outletSales.append(productSales)
for j in range(0, numProducts):
val = m[j,i]
if val == -1:
val = 0
productId = productIds[j]
productSales.append(val)
outOutletJs.write(varName+"[\"" + periodId + "\"].sold = ");
json.dump(outletSales, outOutletJs)
outOutletJs.write(";\n");
out.close()
out2.close()
outOutletJs.close()
out3.close()
print
print "Finished."
| mit | -2,577,288,095,223,949,000 | 22.801724 | 122 | 0.601594 | false | 3.035734 | false | false | false |
wagtail/wagtail | wagtail/admin/rich_text/converters/editor_html.py | 7 | 6380 | from django.utils.functional import cached_property
from django.utils.html import escape
from wagtail.core.models import Page
from wagtail.core.rich_text import features as feature_registry
from wagtail.core.rich_text.rewriters import EmbedRewriter, LinkRewriter, MultiRuleRewriter
from wagtail.core.whitelist import Whitelister, allow_without_attributes
class WhitelistRule:
def __init__(self, element, handler):
self.element = element
self.handler = handler
class EmbedTypeRule:
def __init__(self, embed_type, handler):
self.embed_type = embed_type
self.handler = handler
class LinkTypeRule:
def __init__(self, link_type, handler):
self.link_type = link_type
self.handler = handler
# Whitelist rules which are always active regardless of the rich text features that are enabled
BASE_WHITELIST_RULES = {
'[document]': allow_without_attributes,
'p': allow_without_attributes,
'div': allow_without_attributes,
'br': allow_without_attributes,
}
class DbWhitelister(Whitelister):
"""
A custom whitelisting engine to convert the HTML as returned by the rich text editor
into the pseudo-HTML format stored in the database (in which images, documents and other
linked objects are identified by ID rather than URL):
* accepts a list of WhitelistRules to extend the initial set in BASE_WHITELIST_RULES;
* replaces any element with a 'data-embedtype' attribute with an <embed> element, with
attributes supplied by the handler for that type as defined in embed_handlers;
* rewrites the attributes of any <a> element with a 'data-linktype' attribute, as
determined by the handler for that type defined in link_handlers, while keeping the
element content intact.
"""
def __init__(self, converter_rules):
self.converter_rules = converter_rules
self.element_rules = BASE_WHITELIST_RULES.copy()
for rule in self.converter_rules:
if isinstance(rule, WhitelistRule):
self.element_rules[rule.element] = rule.handler
@cached_property
def embed_handlers(self):
return {
rule.embed_type: rule.handler for rule in self.converter_rules
if isinstance(rule, EmbedTypeRule)
}
@cached_property
def link_handlers(self):
return {
rule.link_type: rule.handler for rule in self.converter_rules
if isinstance(rule, LinkTypeRule)
}
def clean_tag_node(self, doc, tag):
if 'data-embedtype' in tag.attrs:
embed_type = tag['data-embedtype']
# fetch the appropriate embed handler for this embedtype
try:
embed_handler = self.embed_handlers[embed_type]
except KeyError:
# discard embeds with unrecognised embedtypes
tag.decompose()
return
embed_attrs = embed_handler.get_db_attributes(tag)
embed_attrs['embedtype'] = embed_type
embed_tag = doc.new_tag('embed', **embed_attrs)
embed_tag.can_be_empty_element = True
tag.replace_with(embed_tag)
elif tag.name == 'a' and 'data-linktype' in tag.attrs:
# first, whitelist the contents of this tag
for child in tag.contents:
self.clean_node(doc, child)
link_type = tag['data-linktype']
try:
link_handler = self.link_handlers[link_type]
except KeyError:
# discard links with unrecognised linktypes
tag.unwrap()
return
link_attrs = link_handler.get_db_attributes(tag)
link_attrs['linktype'] = link_type
tag.attrs.clear()
tag.attrs.update(**link_attrs)
else:
if tag.name == 'div':
tag.name = 'p'
super(DbWhitelister, self).clean_tag_node(doc, tag)
class EditorHTMLConverter:
def __init__(self, features=None):
if features is None:
features = feature_registry.get_default_features()
self.converter_rules = []
for feature in features:
rule = feature_registry.get_converter_rule('editorhtml', feature)
if rule is not None:
# rule should be a list of WhitelistRule() instances - append this to
# the master converter_rules list
self.converter_rules.extend(rule)
@cached_property
def whitelister(self):
return DbWhitelister(self.converter_rules)
def to_database_format(self, html):
return self.whitelister.clean(html)
@cached_property
def html_rewriter(self):
embed_rules = {}
link_rules = {}
for rule in self.converter_rules:
if isinstance(rule, EmbedTypeRule):
embed_rules[rule.embed_type] = rule.handler.expand_db_attributes
elif isinstance(rule, LinkTypeRule):
link_rules[rule.link_type] = rule.handler.expand_db_attributes
return MultiRuleRewriter([
LinkRewriter(link_rules), EmbedRewriter(embed_rules)
])
def from_database_format(self, html):
return self.html_rewriter(html)
class PageLinkHandler:
"""
PageLinkHandler will be invoked whenever we encounter an <a> element in HTML content
with an attribute of data-linktype="page". The resulting element in the database
representation will be:
<a linktype="page" id="42">hello world</a>
"""
@staticmethod
def get_db_attributes(tag):
"""
Given an <a> tag that we've identified as a page link embed (because it has a
data-linktype="page" attribute), return a dict of the attributes we should
have on the resulting <a linktype="page"> element.
"""
return {'id': tag['data-id']}
@staticmethod
def expand_db_attributes(attrs):
try:
page = Page.objects.get(id=attrs['id'])
attrs = 'data-linktype="page" data-id="%d" ' % page.id
parent_page = page.get_parent()
if parent_page:
attrs += 'data-parent-id="%d" ' % parent_page.id
return '<a %shref="%s">' % (attrs, escape(page.localized.specific.url))
except Page.DoesNotExist:
return "<a>"
| bsd-3-clause | 3,345,996,623,950,275,600 | 34.642458 | 95 | 0.624608 | false | 4.156352 | false | false | false |
ISMiller101/vpsolver | pyvpsolver/vpsolver.py | 1 | 10870 | """
This code is part of the Arc-flow Vector Packing Solver (VPSolver).
Copyright (C) 2013-2015, Filipe Brandao
Faculdade de Ciencias, Universidade do Porto
Porto, Portugal. All rights reserved. E-mail: <fdabrandao@dcc.fc.up.pt>.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import signal
import atexit
import shutil
import tempfile
import subprocess
from . import *
class VBP:
def __init__(self, W, w, b, verbose=None):
self.vbp_file = VPSolver.new_tmp_file(".vbp")
f = open(self.vbp_file,"w")
if type(W)==int:
W=[W]
else:
W = list(W)
print >>f, len(W)
print >>f, " ".join(map(str,W))
print >>f, len(w)
for i in xrange(len(w)):
if type(w[i])==int:
row = [w[i],b[i]]
else:
row = list(w[i])+[b[i]]
assert len(row) == len(W)+1
print >>f, " ".join(map(str,row))
f.close()
if verbose:
f = open(self.vbp_file,"r")
print f.read()
f.close()
self.m = len(b)
self.ndims = len(W)
self.W, self.w, self.b = W, w, b
@classmethod
def fromFile(cls, vbp_file, verbose=None):
f = open(vbp_file, "r")
lst = map(int,f.read().split())
ndims = lst.pop(0)
W = lst[:ndims]
lst = lst[ndims:]
m = lst.pop(0)
w, b = [], []
for i in xrange(m):
w.append(lst[:ndims])
lst = lst[ndims:]
b.append(lst.pop(0))
return cls(W, w, b, verbose)
def __del__(self):
try:
os.remove(self.vbp_file)
except:
pass
class AFG:
def __init__(self, instance, compress=-2, binary=False, vtype="I", verbose=None):
assert isinstance(instance, VBP)
VPSolver.set_verbose(verbose)
self.instance = instance
self.afg_file = VPSolver.new_tmp_file(".afg")
self.output = VPSolver.vbp2afg(instance.vbp_file, self.afg_file, compress, binary, vtype)
self.V, self.A, self.S, self.T = None, None, None, None
def graph(self):
return AFGraph.fromFile(self.afg_file)
def __del__(self):
try:
os.remove(self.afg_file)
except:
pass
class MPS:
def __init__(self, graph, verbose=None):
assert isinstance(graph, AFG)
VPSolver.set_verbose(verbose)
self.afg_graph = graph
self.mps_file = VPSolver.new_tmp_file(".mps")
self.output = VPSolver.afg2mps(graph.afg_file, self.mps_file, verbose=verbose)
def __del__(self):
try:
os.remove(self.mps_file)
except:
pass
class LP:
def __init__(self, graph, verbose=None):
assert isinstance(graph, AFG)
VPSolver.set_verbose(verbose)
self.afg_graph = graph
self.lp_file = VPSolver.new_tmp_file(".lp")
self.output = VPSolver.afg2lp(graph.afg_file, self.lp_file, verbose=verbose)
def __del__(self):
try:
os.remove(self.lp_file)
except:
pass
class VPSolver:
VPSOLVER = "vpsolver"
VBP2AFG = "vbp2afg"
AFG2MPS = "afg2mps"
AFG2LP = "afg2lp"
VBPSOL = "vbpsol"
TMP_DIR = tempfile.mkdtemp()
TMP_CNT = 0
REDIRECT = "2>&1"
PLIST = []
@staticmethod
def set_verbose(verbose):
if verbose != None:
if verbose:
VPSolver.REDIRECT = "2>&1"
else:
VPSolver.REDIRECT = "> /dev/null 2>&1"
@staticmethod
def new_tmp_file(ext = "tmp"):
if not ext.startswith("."): ext = "."+ext
fname = "%s/%d%s" % (VPSolver.TMP_DIR, VPSolver.TMP_CNT, ext)
VPSolver.TMP_CNT += 1
return fname
@staticmethod
@atexit.register
def clear():
for p in VPSolver.PLIST:
try:
os.killpg(p.pid, signal.SIGTERM)
except:
pass
try:
shutil.rmtree(VPSolver.TMP_DIR)
except:
pass
@staticmethod
def run(cmd):
p = subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid)
VPSolver.PLIST.append(p)
p.wait()
@staticmethod
def parse_vbpsol(vpsol_output):
try:
s = vpsol_output.strip()
lst = s[s.rfind("Objective:"):].split("\n")
lst[0] = lst[0].replace("Objective: ", "")
obj = int(lst[0])
lst = lst[2:]
lst = map(lambda x: x.split("x"), lst)
sol = []
for mult, pat in lst:
mult = int(mult)
pat = pat.replace("i=","")
pat = pat.replace("[","").replace("]","")
pat = map(lambda x: int(x)-1,pat.split(","))
sol.append((mult,pat))
except:
return None
return obj, sol
@staticmethod
def vbpsol(afg_file, sol_file, opts="", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(afg_file, AFG):
afg_file = afg_file.afg_file
out_file = VPSolver.new_tmp_file()
VPSolver.run("%s %s %s %s | tee %s %s" % (VPSolver.VBPSOL, afg_file, sol_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output
@staticmethod
def vpsolver(vbp_file, compress=-2, binary=False, vtype="I", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(vbp_file, VBP):
vbp_file = vbp_file.vbp_file
out_file = VPSolver.new_tmp_file()
opts = "%d %d %s" % (compress, binary, vtype)
VPSolver.run("%s %s %s | tee %s %s" % (VPSolver.VPSOLVER, vbp_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output, VPSolver.parse_vbpsol(output)
@staticmethod
def vbp2afg(vbp_file, afg_file, compress=-2, binary=False, vtype="I", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(vbp_file, VBP):
vbp_file = vbp_file.vbp_file
out_file = VPSolver.new_tmp_file()
opts = "%d %d %s" % (compress, binary, vtype)
VPSolver.run("%s %s %s %s | tee %s %s" % (VPSolver.VBP2AFG, vbp_file, afg_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output
@staticmethod
def afg2mps(afg_file, mps_file, opts="", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(afg_file, AFG):
afg_file = afg_file.afg_file
out_file = VPSolver.new_tmp_file()
VPSolver.run("%s %s %s %s | tee %s %s" % (VPSolver.AFG2MPS, afg_file, mps_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output
@staticmethod
def afg2lp(afg_file, lp_file, opts="", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(afg_file, AFG):
afg_file = afg_file.afg_file
out_file = VPSolver.new_tmp_file()
VPSolver.run("%s %s %s %s | tee %s %s" % (VPSolver.AFG2LP, afg_file, lp_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output
@staticmethod
def script(script_name, arg1=None, arg2=None, verbose=None):
VPSolver.set_verbose(verbose)
cmd = script_name
for arg in [arg1, arg2]:
if isinstance(arg, MPS):
cmd += " --mps " + arg.mps_file
elif isinstance(arg, LP):
cmd += " --lp " + arg.lp_file
elif isinstance(arg, AFG):
cmd += " --afg " + arg.afg_file
elif isinstance(arg, VBP):
cmd += " --vbp " + arg.vbp_file
elif isinstance(arg, str):
if arg.endswith(".mps"):
cmd += " --mps " + arg
elif arg.endswith(".lp"):
cmd += " --lp " + arg
elif arg.endswith(".afg"):
cmd += " --afg " + arg
elif arg.endswith(".vbp"):
cmd += " --vbp " + arg
else:
raise Exception("Invalid file extension!")
out_file = VPSolver.new_tmp_file()
VPSolver.run("%s | tee %s %s" % (cmd, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output, VPSolver.parse_vbpsol(output)
@staticmethod
def script_wsol(script_name, model, verbose=None):
VPSolver.set_verbose(verbose)
cmd = script_name
if isinstance(model, MPS):
cmd += " --mps " + model.mps_file
elif isinstance(model, LP):
cmd += " --lp " + model.lp_file
elif isinstance(model,str):
if model.endswith(".mps"):
cmd += " --mps " + model
elif model.endswith(".lp"):
cmd += " --lp " + model
else:
raise Exception("Invalid file extension!")
out_file = VPSolver.new_tmp_file()
sol_file = VPSolver.new_tmp_file(".sol")
VPSolver.run("%s --wsol %s | tee %s %s" % (cmd, sol_file, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
try:
f = open(sol_file)
sol = f.read().split()
vals = {}
assert len(sol)%2 == 0
for i in xrange(0,len(sol),2):
var, value = sol[i], int(round(float(sol[i+1])))
if value != 0:
vals[var] = value
f.close()
os.remove(sol_file)
except:
vals = None
return output, vals
def signal_handler(signal, frame):
print "signal received: %d" % signal
VPSolver.clear()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
| gpl-3.0 | 849,976,155,841,555,500 | 31.642643 | 123 | 0.533947 | false | 3.33129 | false | false | false |
hthompson6/contrail-controller | src/vnsw/agent/uve/cpuinfo.py | 7 | 1958 | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import os
import psutil
from vrouter.cpuinfo.ttypes import *
class CpuInfoData(object):
def __init__(self):
self._process = psutil.Process(os.getpid())
self._num_cpu = 0
#end __init__
def _get_num_cpu(self):
return psutil.NUM_CPUS
#end _get_num_cpu
def _get_sys_mem_info(self):
phymem_info = psutil.phymem_usage()
sys_mem_info = SysMemInfo()
sys_mem_info.total = phymem_info[0]/1024
sys_mem_info.used = phymem_info[1]/1024
sys_mem_info.free = phymem_info[2]/1024
return sys_mem_info
#end _get_sys_mem_info
def _get_mem_info(self):
mem_info = MemInfo()
mem_info.virt = self._process.get_memory_info().vms/1024
mem_info.peakvirt = mem_info.virt
mem_info.res = self._process.get_memory_info().rss/1024
return mem_info
#end _get_mem_info
def _get_cpu_load_avg(self):
load_avg = os.getloadavg()
cpu_load_avg = CpuLoadAvg()
cpu_load_avg.one_min_avg = load_avg[0]
cpu_load_avg.five_min_avg = load_avg[1]
cpu_load_avg.fifteen_min_avg = load_avg[2]
return cpu_load_avg
#end _get_cpu_load_avg
def _get_cpu_share(self):
cpu_percent = self._process.get_cpu_percent(interval=0.1)
return cpu_percent/self._get_num_cpu()
#end _get_cpu_share
def get_cpu_info(self, system=True):
cpu_info = CpuLoadInfo()
num_cpu = self._get_num_cpu()
if self._num_cpu != num_cpu:
self._num_cpu = num_cpu
cpu_info.num_cpu = num_cpu
if system:
cpu_info.sys_mem_info = self._get_sys_mem_info()
cpu_info.cpuload = self._get_cpu_load_avg()
cpu_info.meminfo = self._get_mem_info()
cpu_info.cpu_share = self._get_cpu_share()
return cpu_info
#end get_cpu_info
#end class CpuInfoData
| apache-2.0 | 7,329,107,654,032,415,000 | 28.666667 | 65 | 0.590909 | false | 3.054602 | false | false | false |
tvd-dataset/GameOfThrones | GameOfThrones/__init__.py | 1 | 9809 | #!/usr/bin/env python
# encoding: utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2013-2015 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# AUTHORS
# Hervé BREDIN -- http://herve.niderb.fr/
# Camille GUINAUDEAU
#
from __future__ import unicode_literals
from __future__ import print_function
import re
from pkg_resources import resource_filename
from bs4 import BeautifulSoup
from tvd import Plugin
from tvd import T, TStart, TEnd, Transcription
from tvd import Segment, Annotation
from pyannote.parser.transcription.ctm import CTMParser, IterLinesMixin
class GameOfThrones(Plugin, IterLinesMixin):
def speaker(self, url=None, episode=None, **kwargs):
# absolute path to resource file
path = resource_filename(self.__class__.__name__, url)
annotation = Annotation()
with open(path, 'r') as fp:
for line in fp:
tokens = line.strip().split()
start_time = float(tokens[0])
duration = float(tokens[1])
segment = Segment(start_time, start_time + duration)
speaker = tokens[2]
annotation[segment, speaker] = speaker
return annotation
def outline_www(self, url=None, episode=None, **kwargs):
"""
Parameters
----------
url : str, optional
URL where resource is available
episode : Episode, optional
Episode for which resource should be downloaded
Useful in case a same URL contains resources for multiple episodes.
Returns
-------
G : Transcription
"""
r = self.download_as_utf8(url)
soup = BeautifulSoup(r)
h2 = soup.find_all('h2')
sp = ""
i = 0
outline = {}
for element in h2[0].next_elements:
if element.name == 'p':
if outline.get(i) == "----":
sp = element.text
else:
sp = outline.get(i) + " " + element.text
outline.update({i: sp})
if element.name == 'h2':
i = i + 1
sp = "----"
outline.update({i: sp})
G = Transcription(episode=episode)
t2 = TStart
i = 1
while outline.get(i):
# add /empty/ edge between previous and next annotations
t1 = t2
t2 = T()
G.add_edge(t1, t2)
# add next annotation
t1 = t2
t2 = T()
G.add_edge(t1, t2, scene=outline.get(i))
i = i + 1
# add /empty/ edge between previous annotation and episode end
t1 = t2
t2 = TEnd
G.add_edge(t1, t2)
return G
def _scenes(self, url=None, episode=None):
"""Load file at `url` as Annotation
File must follow the following format:
# start_time end_time label
0.000 10.234 beginning
10.234 56.000 scene_1
"""
# initialize empty annotation
# uri is set to episode when provided
annotation = Annotation(uri=episode)
# absolute path to resource file
path = resource_filename(self.__class__.__name__, url)
# open file and parse it
with open(path, 'r') as f:
for line in f:
start, end, label = line.strip().split()
start = float(start)
end = float(end)
annotation[Segment(start, end)] = label
return annotation
def scenes_outline(self, url=None, episode=None, **kwargs):
return self._scenes(url=url, episode=episode)
def scenes(self, url=None, episode=None, **kwargs):
return self._scenes(url=url, episode=episode)
# load name mapping
def _get_mapping(self):
path = resource_filename(self.__class__.__name__, 'data/mapping.txt')
with open(path, 'r') as _:
mapping = dict(line.split() for line in _.readlines())
return mapping
def transcript_www(self, url=None, episode=None, **kwargs):
# load name mapping
mapping = self._get_mapping()
r = self.download_as_utf8(url)
soup = BeautifulSoup(r)
G = Transcription(episode=episode)
t2 = TStart
div = soup.find_all('div')
transcript = ""
for i in range(0, len(div)):
if re.match("{'class': \['postbody'\]}", unicode(div[i].attrs)):
transcript = div[i]
for i in range(0, len(transcript.contents)):
string = unicode(transcript.contents[i])
if not re.match("\[(.*)\]", string):
if re.match("(.*) : (.*)", string) and \
not re.match("(.*) by : (.*)", string):
ligne = re.split(' : ', transcript.contents[i])
# add /empty/ edge between previous and next annotations
t1 = t2
t2 = T()
G.add_edge(t1, t2)
# add next annotation
t1 = t2
t2 = T()
spk = ligne[0].lower().replace(' ', '_')
if re.match("(.*)_\(|\[(.*)\)|\]", spk):
match = re.match("(.*)_\(|\[(.*)\)|\]", spk)
spk = match.group(1)
spk = mapping.get(spk, spk)
if re.match("(.*)/(.*)", spk):
spks = spk.split('/')
if spks[0] in mapping:
spk = mapping.get(spks[0])
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
if spks[1] in mapping:
spk = mapping.get(spks[1])
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
else:
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
elif (
re.match("(.*): (.*)", string)
and not re.match("Credit: (.*)", string)
and not re.match("(.*) by: (.*)", string)
):
ligne = re.split(': ', transcript.contents[i])
# add /empty/ edge between previous and next annotations
t1 = t2
t2 = T()
G.add_edge(t1, t2)
# add next annotation
t1 = t2
t2 = T()
spk = ligne[0].lower().replace(' ', '_')
if re.match("(.*)_\(|\[(.*)\)|\]", spk):
match = re.match("(.*)_\(|\[(.*)\)|\]", spk)
spk = match.group(1)
spk = mapping.get(spk, spk)
if re.match("(.*)/(.*)", spk):
spks = spk.split('/')
if spks[0] in mapping:
spk = mapping.get(spks[0])
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
if spks[1] in mapping:
spk = mapping.get(spks[1])
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
else:
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
# add /empty/ edge between previous annotation and episode end
t1 = t2
t2 = TEnd
G.add_edge(t1, t2)
return G
def transcript(self, url=None, episode=None, **kwargs):
path = resource_filename(self.__class__.__name__, url)
transcription = Transcription(episode=episode)
# previous dialogue end time
e_dialogue = None
for line in self.iterlines(path):
# ARYA_STARK I'm not a boy!
# speaker = ARYA_STARK
# speech = I'm not a boy!
tokens = line.split()
speaker = tokens[0].strip()
speech = ' '.join(tokens[1:]).strip()
# new dialogue
_s_dialogue, _e_dialogue = T(), T()
# connect dialogue with previous dialogue
if e_dialogue is not None:
transcription.add_edge(e_dialogue, _s_dialogue)
transcription.add_edge(_s_dialogue, _e_dialogue,
speaker=speaker, speech=speech)
# keep track of previous dialogue end time
e_dialogue = _e_dialogue
return transcription
def transcript_aligned(self, url=None, episode=None, **kwargs):
path = resource_filename(self.__class__.__name__, url)
return CTMParser().read(path)()
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| mit | 5,265,190,896,439,193,000 | 32.023569 | 79 | 0.512439 | false | 4.059603 | false | false | false |
kdeloach/model-my-watershed | src/mmw/apps/modeling/geoprocessing.py | 1 | 10022 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import requests
import json
from ast import literal_eval as make_tuple
from celery import shared_task
from celery.exceptions import MaxRetriesExceededError, Retry
from requests.exceptions import ConnectionError
from django_statsd.clients import statsd
from django.core.cache import cache
from django.conf import settings
@shared_task(bind=True, default_retry_delay=1, max_retries=42)
def start(self, opname, input_data, wkaoi=None):
"""
Start a geoproessing operation.
Given an operation name and a dictionary of input data, looks up the
operation from the list of supported operations in settings.GEOP['json'],
combines it with input data, and submits it to Spark JobServer.
This task must always be succeeded by `finish` below.
All errors are passed along and not raised here, so that error handling can
be attached to the final task in the chain, without needing to be attached
to every task.
If a well-known area of interest id is specified in wkaoi, checks to see
if there is a cached result for that wkaoi and operation. If so, returns
that immediately in the 'cached' key. If not, starts the geoprocessing
operation while also passing along the cache key to the next step, so that
the results of geoprocessing may be cached.
:param opname: Name of operation. Must exist in settings.GEOP['json']
:param input_data: Dictionary of values to extend base operation JSON with
:param wkaoi: String id of well-known area of interest. "{table}__{id}"
:return: Dictionary containing either job_id if successful, error if not
"""
if opname not in settings.GEOP['json']:
return {
'error': 'Unsupported operation {}'.format(opname)
}
if not input_data:
return {
'error': 'Input data cannot be empty'
}
outgoing = {}
if wkaoi and settings.GEOP['cache']:
key = 'geop_{}__{}'.format(wkaoi, opname)
outgoing['key'] = key
cached = cache.get(key)
if cached:
outgoing['cached'] = cached
return outgoing
data = settings.GEOP['json'][opname].copy()
data['input'].update(input_data)
try:
outgoing['job_id'] = sjs_submit(data, self.retry)
return outgoing
except Retry as r:
raise r
except Exception as x:
return {
'error': x.message
}
@shared_task(bind=True, default_retry_delay=1, max_retries=42)
def finish(self, incoming):
"""
Retrieve results of geoprocessing.
To be used immediately after the `start` task, this takes the incoming
data and inspects it to see if there are any reported errors. If found,
the errors are passed through to the next task. Otherwise, the incoming
parameters are used to retrieve the job from Spark JobServer, and those
results are returned.
This task must always be preceeded by `start` above. The succeeding task
must take the raw JSON values and process them into information. The JSON
output will look like:
{
'List(1,2)': 3,
'List(4,5)': 6
}
where the values and number of items depend on the input.
All errors are passed along and not raised here, so that error handling can
be attached to the final task in the chain, without needing to be attached
to every task.
If the incoming set of values contains a 'cached' key, then its contents
are returned immediately. If there is a 'key' key, then the results of
geoprocessing will be saved to the cache with that key before returning.
:param incoming: Dictionary containing job_id or error
:return: Dictionary of Spark JobServer results, or error
"""
if 'error' in incoming:
return incoming
if 'cached' in incoming:
return incoming['cached']
try:
result = sjs_retrieve(incoming['job_id'], self.retry)
if 'key' in incoming:
cache.set(incoming['key'], result, None)
return result
except Retry as r:
# Celery throws a Retry exception when self.retry is called to stop
# the execution of any further code, and to indicate to the worker
# that the same task is going to be retried.
# We capture and re-raise Retry to continue this behavior, and ensure
# that it doesn't get passed to the next task like every other error.
raise r
except Exception as x:
return {
'error': x.message
}
@statsd.timer(__name__ + '.sjs_submit')
def sjs_submit(data, retry=None):
"""
Submits a job to Spark Job Server. Returns its Job ID, which
can be used with sjs_retrieve to get the final result.
"""
host = settings.GEOP['host']
port = settings.GEOP['port']
args = settings.GEOP['args']
base_url = 'http://{}:{}'.format(host, port)
jobs_url = '{}/jobs?{}'.format(base_url, args)
try:
response = requests.post(jobs_url, data=json.dumps(data))
except ConnectionError as exc:
if retry is not None:
retry(exc=exc)
if response.ok:
job = response.json()
else:
error = response.json()
if error['status'] == 'NO SLOTS AVAILABLE' and retry is not None:
retry(exc=Exception('No slots available in Spark JobServer.\n'
'Details = {}'.format(response.text)))
elif error['result'] == 'context geoprocessing not found':
reboot_sjs_url = '{}/contexts?reset=reboot'.format(base_url)
context_response = requests.put(reboot_sjs_url)
if context_response.ok:
if retry is not None:
retry(exc=Exception('Geoprocessing context missing in '
'Spark JobServer\nDetails = {}'.format(
context_response.text)))
else:
raise Exception('Geoprocessing context missing in '
'Spark JobServer, but no retry was set.\n'
'Details = {}'.format(
context_response.text))
else:
raise Exception('Unable to create missing geoprocessing '
'context in Spark JobServer.\n'
'Details = {}'.format(context_response.text))
else:
raise Exception('Unable to submit job to Spark JobServer.\n'
'Details = {}'.format(response.text))
if job['status'] == 'STARTED':
return job['result']['jobId']
else:
raise Exception('Submitted job did not start in Spark JobServer.\n'
'Details = {}'.format(response.text))
@statsd.timer(__name__ + '.sjs_retrieve')
def sjs_retrieve(job_id, retry=None):
"""
Given a job ID, will try to retrieve its value. If the job is
still running, will call the optional retry function before
proceeding.
"""
host = settings.GEOP['host']
port = settings.GEOP['port']
url = 'http://{}:{}/jobs/{}'.format(host, port, job_id)
try:
response = requests.get(url)
except ConnectionError as exc:
if retry is not None:
retry(exc=exc)
if response.ok:
job = response.json()
else:
raise Exception('Unable to retrieve job {} from Spark JobServer.\n'
'Details = {}'.format(job_id, response.text))
if job['status'] == 'FINISHED':
return job['result']
elif job['status'] == 'RUNNING':
if retry is not None:
try:
retry()
except MaxRetriesExceededError:
delete = requests.delete(url) # Job took too long, terminate
if delete.ok:
raise Exception('Job {} timed out, '
'deleted.'.format(job_id))
else:
raise Exception('Job {} timed out, unable to delete.\n'
'Details: {}'.format(job_id, delete.text))
else:
if job['status'] == 'ERROR':
status = 'ERROR ({}: {})'.format(job['result']['errorClass'],
job['result']['message'])
else:
status = job['status']
delete = requests.delete(url) # Job in unusual state, terminate
if delete.ok:
raise Exception('Job {} was {}, deleted'.format(job_id, status))
else:
raise Exception('Job {} was {}, could not delete.\n'
'Details = {}'.format(job_id, status, delete.text))
def parse(sjs_result):
"""
Converts raw JSON results from Spark JobServer to dictionary of tuples
If the input is this:
{
'List(1,2)': 3,
'List(4,5)': 6
}
The output will be:
{
(1, 2): 3,
(4, 5): 6
}
:param sjs_result: Dictionary mapping strings like 'List(a,b,c)' to ints
:return: Dictionary mapping tuples of ints to ints
"""
return {make_tuple(key[4:]): val for key, val in sjs_result.items()}
def to_one_ring_multipolygon(area_of_interest):
"""
Given a multipolygon comprising just a single ring structured in a
five-dimensional array, remove one level of nesting and make the AOI's
coordinates a four-dimensional array. Otherwise, no op.
"""
if type(area_of_interest['coordinates'][0][0][0][0]) is list:
multipolygon_shapes = area_of_interest['coordinates'][0]
if len(multipolygon_shapes) > 1:
raise Exception('Unable to parse multi-ring RWD multipolygon')
else:
area_of_interest['coordinates'] = multipolygon_shapes
return area_of_interest
| apache-2.0 | 8,929,279,760,388,009,000 | 33.919861 | 79 | 0.597186 | false | 4.252015 | false | false | false |
ProjectQ-Framework/ProjectQ | projectq/setups/linear.py | 1 | 3413 | # -*- coding: utf-8 -*-
# Copyright 2018 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines a setup to compile to qubits placed in a linear chain or a circle.
It provides the `engine_list` for the `MainEngine`. This engine list contains an AutoReplacer with most of the gate
decompositions of ProjectQ, which are used to decompose a circuit into only two qubit gates and arbitrary single qubit
gates. ProjectQ's LinearMapper is then used to introduce the necessary Swap operations to route interacting qubits
next to each other. This setup allows to choose the final gate set (with some limitations).
"""
from projectq.cengines import LinearMapper
from projectq.ops import CNOT, Swap
from ._utils import get_engine_list_linear_grid_base
def get_engine_list(num_qubits, cyclic=False, one_qubit_gates="any", two_qubit_gates=(CNOT, Swap)):
"""
Returns an engine list to compile to a linear chain of qubits.
Note:
If you choose a new gate set for which the compiler does not yet have standard rules, it raises an
`NoGateDecompositionError` or a `RuntimeError: maximum recursion depth exceeded...`. Also note that even the
gate sets which work might not yet be optimized. So make sure to double check and potentially extend the
decomposition rules. This implemention currently requires that the one qubit gates must contain Rz and at
least one of {Ry(best), Rx, H} and the two qubit gate must contain CNOT (recommended) or CZ.
Note:
Classical instructions gates such as e.g. Flush and Measure are automatically allowed.
Example:
get_engine_list(num_qubits=10, cyclic=False,
one_qubit_gates=(Rz, Ry, Rx, H),
two_qubit_gates=(CNOT,))
Args:
num_qubits(int): Number of qubits in the chain
cyclic(bool): If a circle or not. Default is False
one_qubit_gates: "any" allows any one qubit gate, otherwise provide a tuple of the allowed gates. If the gates
are instances of a class (e.g. X), it allows all gates which are equal to it. If the gate is
a class (Rz), it allows all instances of this class. Default is "any"
two_qubit_gates: "any" allows any two qubit gate, otherwise provide a tuple of the allowed gates. If the gates
are instances of a class (e.g. CNOT), it allows all gates which are equal to it. If the gate
is a class, it allows all instances of this class. Default is (CNOT, Swap).
Raises:
TypeError: If input is for the gates is not "any" or a tuple.
Returns:
A list of suitable compiler engines.
"""
return get_engine_list_linear_grid_base(
LinearMapper(num_qubits=num_qubits, cyclic=cyclic), one_qubit_gates, two_qubit_gates
)
| apache-2.0 | 8,752,461,956,665,076,000 | 50.712121 | 118 | 0.693232 | false | 3.905034 | false | false | false |
Eomys/MoSQITo | mosqito/functions/tonality_tnr_pr/find_highest_tone.py | 1 | 2208 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 20:23:01 2020
@author: Salomé
"""
# Standard library imports
import numpy as np
# Mosqito functions import
from mosqito.functions.tonality_tnr_pr.critical_band import critical_band
def find_highest_tone(freqs, spec_db, index, nb_tones, ind):
"""
Method to find the two highest tones in a given spectrum from a given index
according to their critical band
Parameters
----------
freqs : numpy.array
frequency axis
spec_db : numpy.array
signal spectrum in dB
index : numpy.array
list of candidate tones index
index : numpy.array
list of candidate tones index
nb_tones : integer
number of candidate tones non examinated
Returns
-------
ind_p : integer
index of the highest tone in the critical band
ind_p : integer
index of the second highest tone in the critical band
index : numpy.array
list of candidate tones index updated
nb_tones : integer
number of candidate tones non examinated updated
"""
f = freqs[ind]
# critical band centered on f
f1, f2 = critical_band(f)
low_limit_idx = np.argmin(np.abs(freqs - f1))
high_limit_idx = np.argmin(np.abs(freqs - f2))
# Other tones in the critical band centered on f tones
multiple_idx = index[index > low_limit_idx]
multiple_idx = multiple_idx[multiple_idx < high_limit_idx]
if len(multiple_idx) > 1:
sort_spec = np.argsort(-1 * spec_db[multiple_idx])
# highest tones in the critical band
ind_p = multiple_idx[sort_spec[0]]
ind_s = multiple_idx[sort_spec[1]]
# suppression of the lower values
for s in sort_spec[2:]:
sup = np.where(index == multiple_idx[s])[0]
index = np.delete(index, sup)
nb_tones -= 1
if ind_p != ind:
# screening to find the highest value in the critical band centered on fp
ind_p, ind_s, index, nb_tones = find_highest_tone(
freqs, spec_db, index, nb_tones, ind_p
)
else:
ind_p = ind
ind_s = None
return ind_p, ind_s, index, nb_tones
| apache-2.0 | 6,190,862,336,084,805,000 | 27.294872 | 85 | 0.613502 | false | 3.641914 | false | false | false |
poderomedia/node-mapper | server/db.py | 1 | 1621 | import pymongo
from bson.objectid import ObjectId
def formatObjectIDs(collectionName, results):
for result in results: # For each result is passed, convert the _id to the proper mID, cID, etc.
result[collectionName[0]+'ID'] = str(result.pop('_id')) # Note the .pop removes the _id from the dict
return results
class mongoInstance(object):
def getConfig(self, key):
result = MongoInstance.client['NodeMapper'].config.find_one({'key': key})
config = result['config']
return { 'Config': config }
def postConfig(self, key, config):
doc = {
'config': config
}
print MongoInstance.client['NodeMapper'].config.find_and_modify({'key': key}, {'$set': doc}, upsert=True, new=True)
return { 'result': 'inserted' }
def getData(self, key):
result = MongoInstance.client['NodeMapper'].data.find_one({'key': key})
nodes = result['nodes']
connections = result['connections']
return { 'Nodes': nodes, 'Connections': connections }
def postData(self, key, nodes, connections):
doc = {
'nodes': nodes,
'connections': connections
}
print MongoInstance.client['NodeMapper'].data.find_and_modify({'key': key}, {'$set': doc}, upsert=True, new=True)
return { 'result': 'inserted' }
# Client corresponding to a single connection
@property
def client(self):
if not hasattr(self, '_client'):
self._client = pymongo.MongoClient(host='localhost:27017')
return self._client
# A Singleton Object
MongoInstance = mongoInstance()
| mit | -9,180,188,330,341,057,000 | 32.081633 | 123 | 0.62739 | false | 3.963325 | true | false | false |
jayhorn/DynSlicer | do_like_javac/arg.py | 1 | 2759 | import argparse
import os
import sys
import tools
import capture
DEFAULT_OUTPUT_DIRECTORY = os.path.join(os.getcwd(), 'dljc-out')
# token that identifies the end of the options for do-like-javac and the beginning
# of the compilation command
CMD_MARKER = '--'
class AbsolutePathAction(argparse.Action):
"""Convert a path from relative to absolute in the arg parser"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(values))
base_parser = argparse.ArgumentParser(add_help=False)
base_group = base_parser.add_argument_group('global arguments')
base_group.add_argument('-o', '--out', metavar='<directory>',
default=DEFAULT_OUTPUT_DIRECTORY, dest='output_directory',
action=AbsolutePathAction,
help='The directory to log results.')
base_group.add_argument('--log_to_stderr', action='store_true',
help='''Redirect log messages to stderr instead of log file''')
base_group.add_argument('-t', '--tool', metavar='<tool>',
action='store',default=None,
help='A comma separated list of tools to run. Valid tools: ' + ', '.join(tools.TOOLS))
# base_group.add_argument('-c', '--checker', metavar='<checker>',
# action='store',default='NullnessChecker',
# help='A checker to check (for checker/inference tools)')
def split_args_to_parse():
split_index = len(sys.argv)
if CMD_MARKER in sys.argv:
split_index = sys.argv.index(CMD_MARKER)
args, cmd = sys.argv[1:split_index], sys.argv[split_index + 1:]
command_name = os.path.basename(cmd[0]) if len(cmd) > 0 else None
capturer = capture.get_capturer(command_name)
return args, cmd, capturer
def create_argparser():
parser = argparse.ArgumentParser(
parents=[base_parser] + tools.parsers(),
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
group = parser.add_argument_group(
'supported compiler/build-system commands')
supported_commands = ', '.join(capture.supported_commands())
group.add_argument(
CMD_MARKER,
metavar='<cmd>',
dest='nullarg',
default=None,
help=('Command to run the compiler/build-system. '
'Supported build commands: ' + supported_commands),
)
return parser
def parse_args():
to_parse, cmd, capturer = split_args_to_parse()
global_argparser = create_argparser()
args = global_argparser.parse_args(to_parse)
if capturer:
return args, cmd, capturer
else:
global_argparser.print_help()
sys.exit(os.EX_OK)
| apache-2.0 | -8,718,145,238,980,006,000 | 33.924051 | 110 | 0.633563 | false | 3.924609 | false | false | false |
mikeckennedy/write-pythonic-code-demos | code/ch_04_collections/_04_generators.py | 1 | 1132 | # ############ yield and generators #############
# Create by Michael Kennedy (@mkennedy)
# Fibonacci numbers:
# 1, 1, 2, 3, 5, 8, 13, 21, ...
def classic_fibonacci(limit):
nums = []
current, nxt = 0, 1
while current < limit:
current, nxt = nxt, nxt + current
nums.append(current)
return nums
# can we do better?
def generator_fibonacci():
current, nxt = 0, 1
while True:
current, nxt = nxt, nxt + current
yield current
# generator are composible:
def even_generator(numbers):
for n in numbers:
if n % 2 == 0:
yield n
# consume both generators as a pipeline here
def even_fib():
for n in even_generator(generator_fibonacci()):
yield n
if __name__ == '__main__':
print("Classic")
for m in classic_fibonacci(100):
print(m, end=', ')
print()
print("generator")
for m in generator_fibonacci():
print(m, end=', ')
if m > 100:
break
print()
print("composed")
for m in even_fib():
print(m, end=', ')
if m > 1000000:
break
print()
| mit | -471,263,068,550,504,060 | 17.866667 | 51 | 0.537102 | false | 3.430303 | false | false | false |
zenefits/sentry | src/sentry/plugins/base/configuration.py | 4 | 4816 | from __future__ import absolute_import
import logging
import six
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.http import Http404
from requests.exceptions import HTTPError
from sentry import options
from sentry.api import client
from sentry.api.serializers import serialize
from sentry.models import ProjectOption
from sentry.utils import json
def react_plugin_config(plugin, project, request):
response = client.get('/projects/{}/{}/plugins/{}/'.format(
project.organization.slug,
project.slug,
plugin.slug,
), request=request)
return mark_safe("""
<div id="ref-plugin-config"></div>
<script>
$(function(){
ReactDOM.render(React.createFactory(Sentry.PluginConfig)({
project: %s,
organization: %s,
data: %s
}), document.getElementById('ref-plugin-config'));
});
</script>
""" % (
json.dumps_htmlsafe(serialize(project, request.user)),
json.dumps_htmlsafe(serialize(project.organization, request.user)),
json.dumps_htmlsafe(response.data)
))
def default_plugin_config(plugin, project, request):
if plugin.can_enable_for_projects() and \
not plugin.can_configure_for_project(project):
raise Http404()
plugin_key = plugin.get_conf_key()
form_class = plugin.get_conf_form(project)
template = plugin.get_conf_template(project)
if form_class is None:
return HttpResponseRedirect(reverse(
'sentry-manage-project', args=[project.organization.slug, project.slug]))
test_results = None
form = form_class(
request.POST if request.POST.get('plugin') == plugin.slug else None,
initial=plugin.get_conf_options(project),
prefix=plugin_key,
)
if form.is_valid():
if 'action_test' in request.POST and plugin.is_testable():
try:
test_results = plugin.test_configuration(project)
except Exception as exc:
if isinstance(exc, HTTPError):
test_results = '%s\n%s' % (exc, exc.response.text[:256])
elif hasattr(exc, 'read') and callable(exc.read):
test_results = '%s\n%s' % (exc, exc.read()[:256])
else:
logging.exception('Plugin(%s) raised an error during test',
plugin_key)
test_results = 'There was an internal error with the Plugin'
if not test_results:
test_results = 'No errors returned'
else:
for field, value in six.iteritems(form.cleaned_data):
key = '%s:%s' % (plugin_key, field)
if project:
ProjectOption.objects.set_value(project, key, value)
else:
options.set(key, value)
messages.add_message(
request, messages.SUCCESS,
_('Your settings were saved successfully.'))
return HttpResponseRedirect(request.path)
# TODO(mattrobenolt): Reliably determine if a plugin is configured
# if hasattr(plugin, 'is_configured'):
# is_configured = plugin.is_configured(project)
# else:
# is_configured = True
is_configured = True
return mark_safe(render_to_string(template, {
'form': form,
'request': request,
'plugin': plugin,
'plugin_description': plugin.get_description() or '',
'plugin_test_results': test_results,
'plugin_is_configured': is_configured,
}, context_instance=RequestContext(request)))
def default_issue_plugin_config(plugin, project, form_data):
plugin_key = plugin.get_conf_key()
for field, value in six.iteritems(form_data):
key = '%s:%s' % (plugin_key, field)
if project:
ProjectOption.objects.set_value(project, key, value)
else:
options.set(key, value)
def default_plugin_options(plugin, project):
form_class = plugin.get_conf_form(project)
if form_class is None:
return {}
NOTSET = object()
plugin_key = plugin.get_conf_key()
initials = plugin.get_form_initial(project)
for field in form_class.base_fields:
key = '%s:%s' % (plugin_key, field)
if project is not None:
value = ProjectOption.objects.get_value(project, key, NOTSET)
else:
value = options.get(key)
if value is not NOTSET:
initials[field] = value
return initials
| bsd-3-clause | -1,648,359,412,165,057,500 | 33.647482 | 85 | 0.617317 | false | 4.109215 | true | false | false |
angdraug/nova | nova/scheduler/client/report.py | 23 | 1801 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import conductor
from nova import exception
from nova.i18n import _LI
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SchedulerReportClient(object):
"""Client class for updating the scheduler."""
def __init__(self):
self.conductor_api = conductor.API()
def update_resource_stats(self, context, name, stats):
"""Creates or updates stats for the desired service.
:param context: local context
:param name: name of resource to update
:type name: immutable (str or tuple)
:param stats: updated stats to send to scheduler
:type stats: dict
"""
if 'id' in stats:
compute_node_id = stats['id']
updates = stats.copy()
del updates['id']
else:
raise exception.ComputeHostNotCreated(name=str(name))
self.conductor_api.compute_node_update(context,
{'id': compute_node_id},
updates)
LOG.info(_LI('Compute_service record updated for '
'%s') % str(name))
| apache-2.0 | 8,584,580,416,695,243,000 | 32.981132 | 78 | 0.625208 | false | 4.371359 | false | false | false |
xuru/bowling | app/models/frame.py | 1 | 3912 | from app import db
from app.models.base import BaseMixin
class Roll(db.Model, BaseMixin):
"""
The number of pins on a turn in bowling
.. py:attribute:: pins
The number of pins knocked down in a roll.
:type: int
"""
id = db.Column(db.Integer, primary_key=True)
pins = db.Column(db.Integer)
# for back ref
frame_id = db.Column(db.Integer, db.ForeignKey('frame.id'))
def __init__(self, frame, pins):
self.pins = pins
self.frame_id = frame.id
db.session.add(self)
class Frame(db.Model, BaseMixin):
"""
A frame in bowling.
.. py:attribute:: number
The fame number.
:type: int
.. py:attribute:: score
The total score for the frame (this is a running total calculated from previous frames)
:type: int
.. py:attribute:: rolls
A list of rolls in this frame.
:type: A list of :py:class:`app.models.frame.Roll`
"""
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer)
score = db.Column(db.Integer)
# for back ref
player_id = db.Column(db.Integer, db.ForeignKey('player.id'))
rolls = db.relationship(
'Roll', backref=db.backref('frame', lazy='joined'), lazy='dynamic')
def __init__(self, player, number):
self.number = number
self.player_id = player.id
db.session.add(self)
def total_pins(self):
"""
Helper method to get the total number of pins in this frame.
:return: The total number of pins dropped in this frame.
:rtype: int
"""
return sum([roll.pins for roll in list(self.rolls.all())])
def get_rolls(self):
"""
Helper method to get the rolls in this frame.
:return: The rolls for this frame.
:rtype: A list of :py:class:`app.models.frame.Roll`
"""
return list(self.rolls.all())
def roll(self, pins):
"""
Add a roll to this frame.
:param int pins: The number of pins knocked over for this roll.
:return: The roll that was added.
:rtype: :py:class:`app.models.frame.Roll`
:raises Exception: If the allowed number of rolls has been exceeded.
"""
rolls = self.get_rolls()
rolls_allowed = 2
if self.number == 10 and len(rolls) and rolls[0].pins == 10:
rolls_allowed = 3
if len(rolls) >= rolls_allowed:
raise Exception("Exceeded maximum rolls")
roll = Roll(self, pins)
roll.save()
self.rolls.append(roll)
return roll
def is_strike(self):
"""
Helper method to determine if this frame is a strike.
:return: Truth
:rtype: bool
"""
if len(self.rolls.all()) == 1 and self.total_pins() == 10:
return True
return False
def is_spare(self):
"""
Helper method to determine if this frame is a spare.
:return: Truth
:rtype: bool
"""
if len(self.rolls.all()) == 2 and self.total_pins() == 10:
return True
return False
def is_complete(self):
"""
Checks if this frame is complete.
:return: Truth
:rtype: bool
"""
rolls = self.rolls.all()
if self.number == 10:
return self.is_complete10(rolls)
return sum([roll.pins for roll in rolls]) == 10 or len(rolls) == 2
def is_complete10(self, rolls):
"""
Takes frame 10 into account when it checks if this frame is complete.
:return: Truth
:rtype: bool
"""
n = len(rolls)
if n < 3:
return False
# strike
if n == 3 and rolls[0] == 10:
return False
return True
def __repr__(self):
return '<Frame %d %d>' % (self.id, self.score)
| mit | -4,616,859,188,055,716,000 | 23.45 | 95 | 0.554448 | false | 3.776062 | false | false | false |
forallsystems/21stbadgepathways | users/middleware.py | 1 | 2303 | from django.conf import settings
from forallschools.apps import constants
from forallschools.apps.core.models import App
from django.http import HttpResponseRedirect, HttpResponse
#import six
from django.contrib import messages
from django.shortcuts import redirect
from django.utils.http import urlquote
from social.exceptions import SocialAuthBaseException
class SocialAuthExceptionMiddleware(object):
"""Middleware that handles Social Auth AuthExceptions by providing the user
with a message, logging an error, and redirecting to some next location.
By default, the exception message itself is sent to the user and they are
redirected to the location specified in the SOCIAL_AUTH_LOGIN_ERROR_URL
setting.
This middleware can be extended by overriding the get_message or
get_redirect_uri methods, which each accept request and exception.
"""
def process_exception(self, request, exception):
self.strategy = getattr(request, 'social_strategy', None)
if self.strategy is None or self.raise_exception(request, exception):
return
if isinstance(exception, SocialAuthBaseException):
backend_name = self.strategy.backend.name
message = self.get_message(request, exception)
url = self.get_redirect_uri(request, exception)
message = message.replace('google-oauth2','Google')
if request.user.is_authenticated():
# Ensure that messages are added to authenticated users only,
# otherwise this fails
messages.error(request, message,
extra_tags='' + backend_name)
else:
url += ('?' in url and '&' or '?') + \
'message={0}&backend={1}'.format(urlquote(message),
backend_name)
return redirect(url)
def raise_exception(self, request, exception):
return self.strategy.setting('RAISE_EXCEPTIONS', settings.DEBUG)
def get_message(self, request, exception):
return six.text_type(exception)
def get_redirect_uri(self, request, exception):
return self.strategy.setting('LOGIN_ERROR_URL') | gpl-2.0 | 9,084,169,206,132,100,000 | 39.160714 | 79 | 0.647851 | false | 4.768116 | false | false | false |
hlmgreen/LeafPy | Linux/Capturev9.py | 3 | 18774 | __author__ = 'mark greenwood'
import wx
import time
import pygame
import pygame.camera
import os
# initialises connected devices
pygame.camera.init()
camList = pygame.camera.list_cameras()
class MyApp(wx.App):
"""Builds the main GUI application"""
def OnInit(self):
self.frame = MyFrame()
self.SetTopWindow(self.frame)
self.frame.CenterOnScreen()
self.frame.Show()
return True
class StreamWindow(wx.Frame):
"""Builds a window for displaying a camera test stream upon selection of test button"""
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, title="Test", size=(1280, 720), style=wx.DEFAULT_FRAME_STYLE)
wx.Frame.CenterOnScreen(self)
self.panel = wx.Panel(self)
def draw(self, selection):
"""Blits images to the window and draws grid lines on top of each one. Grid lines correspond to area for
cropping in tracking so plants must fit within."""
cam = pygame.camera.Camera(selection, (1280, 720)) # gets selected camera
self.Bind(wx.EVT_CLOSE, self.close_stream) # binds close event to X button
try:
cam.start()
self.run = True
while self.run == True:
img = cam.get_image()
pygame.draw.lines(img, (0, 0, 0), False, [[130, 20], [1150, 20], [1150, 700], [130, 700], [130, 20]], 2)
pygame.draw.lines(img, (0, 0, 0), False, [[334, 20], [334, 700], [538, 700], [538, 20], [742, 20],
[742, 700], [946, 700], [946, 20]], 2)
pygame.draw.lines(img, (0, 0, 0), False, [[130, 247], [1150, 247], [1150, 474], [130, 474]], 2)
img = pygame.image.tostring(img, "RGB", False) #converts to cross package format
bitmap = wx.BitmapFromBuffer(1280, 720, img) #convert to bitmap for display
self.bitmap = wx.StaticBitmap(self.panel, bitmap=bitmap)
self.Update()
self.Show()
wx.Yield()
cam.stop()
self.Destroy() # stop cam and then close window
except SystemError:
print "Please select a camera"
self.Destroy()
def close_stream(self, event):
"""Close stream event- breaks the loop on click of X button"""
self.run = False
class MyFrame(wx.Frame):
"""Builds the main GUI frame containing all the input selections and events"""
def __init__(self):
super(MyFrame, self).__init__(None, id=wx.ID_ANY, title="Image Capture", size=(1000, 600),
name="MyFrame")
#Creates the panel to sit inside the main window
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.WHITE)
#Camera 1 inputs
text_box1 = wx.StaticText(self.panel, label="Plate 1", pos=(5, 30))
self.combo_box1 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 25))
test_button1 = wx.Button(self.panel, label="Test camera", pos=(285, 25), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click, test_button1)
#Camera 2
text_box2 = wx.StaticText(self.panel, label="Plate 2", pos=(5, 65))
self.combo_box2 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 60))
test_button2 = wx.Button(self.panel, label="Test camera", pos=(285, 60), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click2, test_button2)
#cam 3
text_box3 = wx.StaticText(self.panel, label="Plate 3", pos=(5, 100))
self.combo_box3 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 95))
test_button3 = wx.Button(self.panel, label="Test camera", pos=(285, 95), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click3, test_button3)
#cam 4
text_box4 = wx.StaticText(self.panel, label="Plate 4", pos=(5, 135))
self.combo_box4 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 130))
test_button4 = wx.Button(self.panel, label="Test camera", pos=(285, 130), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click4, test_button4)
#cam 5
text_box5 = wx.StaticText(self.panel, label="Plate 5", pos=(5, 170))
self.combo_box5 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 165))
test_button5 = wx.Button(self.panel, label="Test camera", pos=(285, 165), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click5, test_button5)
#cam 6
text_box6 = wx.StaticText(self.panel, label="Plate 6", pos=(5, 205))
self.combo_box6 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 200))
test_button6 = wx.Button(self.panel, label="Test camera", pos=(285, 200), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click6, test_button6)
#cam 7
text_box7 = wx.StaticText(self.panel, label="Plate 7", pos=(5, 240))
self.combo_box7 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 235))
test_button7 = wx.Button(self.panel, label="Test camera", pos=(285, 235), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click7, test_button7)
#cam 8
text_box8 = wx.StaticText(self.panel, label="Plate 8", pos=(5, 275))
self.combo_box8 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 270))
test_button8 = wx.Button(self.panel, label="Test camera", pos=(285, 270), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click8, test_button8)
#cam 9
text_box9 = wx.StaticText(self.panel, label="Plate 9", pos=(500, 30))
self.combo_box9 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos= (575, 25))
test_button9 = wx.Button(self.panel, label="Test camera", pos=(780, 25), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click9, test_button9)
#cam 10
text_box10 = wx.StaticText(self.panel, label="Plate 10", pos=(500, 65))
self.combo_box10 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 60))
test_button10 = wx.Button(self.panel, label="Test camera", pos=(780, 60), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click10, test_button10)
#cam 11
text_box11 = wx.StaticText(self.panel, label="Plate 11", pos=(500, 100))
self.combo_box11 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 95))
test_button11 = wx.Button(self.panel, label="Test camera", pos=(780, 95), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click11, test_button11)
#cam 12
text_box12 = wx.StaticText(self.panel, label="Plate 12", pos=(500, 135))
self.combo_box12 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 130))
test_button12 = wx.Button(self.panel, label="Test camera", pos=(780, 130), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click12, test_button12)
#cam 13
text_box13 = wx.StaticText(self.panel, label="Plate 13", pos=(500, 170))
self.combo_box13 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 165))
test_button13 = wx.Button(self.panel, label="Test camera", pos=(780, 165), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click13, test_button13)
#cam 14
text_box14 = wx.StaticText(self.panel, label="Plate 14", pos=(500, 205))
self.combo_box14 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 200))
test_button14 = wx.Button(self.panel, label="Test camera", pos=(780, 200), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click14, test_button14)
#cam 15
text_box15 = wx.StaticText(self.panel, label="Plate 15", pos=(500, 240))
self.combo_box15 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 235))
test_button15 = wx.Button(self.panel, label="Test camera", pos=(780, 235), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click15, test_button15)
#cam 16
text_box16 = wx.StaticText(self.panel, label="Plate 16", pos=(500, 275))
self.combo_box16 = wx.ComboBox(self.panel,value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 270))
test_button16 = wx.Button(self.panel, label="Test camera", pos=(780, 270), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click16, test_button16)
#Time data, run button and save path
save_dialog = wx.StaticText(self.panel, label='Save directory', pos=(400, 325))
save_button = wx.Button(self.panel, label='....', pos=(510, 320), size=(80,28))
self.Bind(wx.EVT_BUTTON, self.on_click18, save_button)
time_text = wx.StaticText(self.panel, label="Time Interval (s)", pos=(400, 352))
self.time_input = wx.TextCtrl(self.panel, pos=(510, 350))
cycle_text = wx.StaticText(self.panel, label="Cycles", pos=(400, 382))
self.cycle_input = wx.TextCtrl(self.panel, pos=(510, 380))
run_button = wx.Button(self.panel, -1, size=(190, 30), pos=(400, 415), label='Run Program')
self.Bind(wx.EVT_BUTTON, self.on_click17, run_button)
#error/progress text box
self.error_box_text = wx.TextCtrl(self.panel, value='', size=(990, 120), pos=(5, 475),
style = wx.TE_READONLY + wx.TE_MULTILINE)
self.gauge = wx.Gauge(self.panel, size=(990, 20), pos=(5, 450))
#events - converts drop down camera selection to string which is passed to the stream window.
def on_click(self, event):
selection = self.combo_box1.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click2(self, event):
selection = self.combo_box2.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click3(self, event):
selection = self.combo_box3.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click4(self, event):
selection = self.combo_box4.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click5(self, event):
selection = self.combo_box5.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click6(self, event):
selection = self.combo_box6.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click7(self, event):
selection = self.combo_box7.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click8(self, event):
selection = self.combo_box8.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click9(self, event):
selection = self.combo_box9.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click10(self, event):
selection = self.combo_box10.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click11(self, event):
selection = self.combo_box11.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click12(self, event):
selection = self.combo_box12.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click13(self, event):
selection = self.combo_box13.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click14(self, event):
selection = self.combo_box14.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click15(self, event):
selection = self.combo_box15.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click16(self, event):
selection = self.combo_box16.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click18(self, event):
self.save_dlg = wx.DirDialog(self, 'Choose or create a directory for your test', defaultPath=os.getcwd(),
style=wx.DD_CHANGE_DIR)
self.save_dlg.ShowModal()
def camerasnp(self, cam, save_as):
"""This capture an image and saves it, cam is the name of the device, saveas is the name to save document as"""
pygame.camera.init()
cam = pygame.camera.Camera(cam, (1920, 1080)) # change to cam resolution
cam.start()
img = cam.get_image()
pygame.image.save(img, save_as) # saves image
cam.stop()
def timelapser(self, time_sec, loops, matrix, save_dlg):
"""Runs time-lapse with measured time between each cycle"""
print 'Time interval = %s' % time_sec
print 'Number of intervals = %s' % loops
counter = 0
keys = matrix.keys()
keys.sort()
self.error_box_text.WriteText('Running %s loops at %s seconds intervals' % (str(loops), time_sec))
wx.Yield() # pauses the process momentarily to update text box
while counter < loops:
old_time = time.time()
text = 'Running loop %s' % counter
print text
self.error_box_text.WriteText('\n%s' % text) # updates with loop number
self.gauge.SetValue(counter) # updates progress bar
wx.Yield()
counter += 1
for cam_shoot in keys:
if matrix[cam_shoot] == "":
continue
else:
for snap in range(0, 3): # takes 3 images at each time point
self.camerasnp(matrix[cam_shoot], '%s/Plate%s-%s-c%s.png' % (save_dlg, cam_shoot, counter, snap))
new_time = time.time()
time.sleep(time_sec-(new_time-old_time))
def on_click17(self, event):
"""Main run event - gets selected inputs and then return error messages if invalid else runs program"""
try:
all_selected_cameras = {1: (self.combo_box1.GetStringSelection()),
2: (self.combo_box2.GetStringSelection()),
3: (self.combo_box3.GetStringSelection()),
4: (self.combo_box4.GetStringSelection()),
5: (self.combo_box5.GetStringSelection()),
6: (self.combo_box6.GetStringSelection()),
7: (self.combo_box7.GetStringSelection()),
8: (self.combo_box8.GetStringSelection()),
9: (self.combo_box9.GetStringSelection()),
10: (self.combo_box10.GetStringSelection()),
11: (self.combo_box11.GetStringSelection()),
12: (self.combo_box12.GetStringSelection()),
13: (self.combo_box13.GetStringSelection()),
14: (self.combo_box14.GetStringSelection()),
15: (self.combo_box15.GetStringSelection()),
16: (self.combo_box16.GetStringSelection())}
run_time = float(self.time_input.GetValue())
loops = float(self.cycle_input.GetValue())
save_dlg = self.save_dlg.GetPath()
self.gauge.SetRange(loops-1) # sets progress gauge max
#error handling - checks if inputs are valid
for key, value in all_selected_cameras.items(): # if somebody clicks 'sel camera' it changes to empty string
if value == 'Select camera':
all_selected_cameras[key] = ''
if all(val == '' for val in all_selected_cameras.values()): # prints an error if no cam is selected
self.error_box_text.WriteText('Please select a camera\n')
if run_time < 60: # this prevents errors where time taken for images is longer than interval
self.error_box_text.WriteText('Error - please select a longer interval')
#if inputs are valid program is run
else:
self.error_box_text.WriteText('Saving to %s\n' % save_dlg)
self.timelapser(run_time, loops, all_selected_cameras, save_dlg)
complete_dialog = wx.MessageDialog(None, message="Image capture complete",
style=wx.OK + wx.ID_JUSTIFY_CENTER + wx.ICON_INFORMATION)
complete_dialog.ShowModal()
except(ValueError, AttributeError):
self.error_box_text.WriteText('Error: please select all parameters\n')
if __name__ == '__main__':
app = MyApp(False)
app.MainLoop()
| apache-2.0 | 7,260,016,199,539,547,000 | 48.275591 | 124 | 0.577448 | false | 3.689133 | true | false | false |
jbradberry/django-diplomacy | diplomacy/templatetags/maps.py | 1 | 1280 | import json
from django import template
from ..engine import standard
from ..engine.utils import subregion_display
register = template.Library()
colors = {'austria-hungary': '#a41a10',
'england': '#1010a3',
'france': '#126dc0',
'germany': '#5d5d5d',
'italy': '#30a310',
'russia': '#7110a2',
'turkey': '#e6e617'}
@register.inclusion_tag('diplomacy/map_card.html', takes_context=True)
def map(context, width, height):
game = context['game']
turn = context.get('turn', game.current_turn())
data = {'width': width, 'height': height}
data['colors'] = json.dumps(colors)
if turn:
units = turn.get_units()
owns = turn.get_ownership()
data['owns'] = json.dumps(
[(o['territory'], o['government']) for o in owns]
)
data['units'] = json.dumps(
[(subregion_display(u['subregion']), u['u_type'], u['government'])
for u in units
if not u['dislodged']]
)
else:
data['owns'] = json.dumps(
[(T, P)
for P in standard.powers
for T, (p, sc, unit) in standard.starting_state.items()
if p == P])
data['units'] = json.dumps([])
return data
| mit | -5,957,185,914,430,594,000 | 26.826087 | 78 | 0.5375 | false | 3.431635 | false | false | false |
menpo/lsfm | lsfm/visualize.py | 1 | 2233 | import numpy as np
from menpo.image import Image
from menpo.shape import ColouredTriMesh
from menpo.transform import AlignmentSimilarity
from menpo3d.rasterize import rasterize_mesh
from scipy.stats import chi2
from .camera import perspective_camera_for_template
from .data import load_template
from .shading import lambertian_shading
from matplotlib import pyplot as plt
def rasterize_mesh_at_template(mesh, img_shape=(640, 480),
pose_angle_deg=0, shaded=False):
camera = perspective_camera_for_template(img_shape,
pose_angle_deg=pose_angle_deg)
mesh_aligned = AlignmentSimilarity(mesh, load_template()).apply(mesh)
if shaded:
mesh_aligned = lambertian_shading(mesh_aligned)
return rasterize_mesh(camera.apply(mesh_aligned), img_shape)
def visualize_nicp_weighting(template, weighting):
colours = ((weighting[:, None] * np.array([1, 0, 0])) +
((1 - weighting[:, None]) * np.array([1, 1, 1])))
print('min: {}, max: {}'.format(weighting.min(), weighting.max()))
ColouredTriMesh(template.points, trilist=template.trilist,
colours=colours).view()
def visualize_pruning(w_norm, n_retained,
title='Initial model weights vs theoretical for pruning'):
fig, ax1 = plt.subplots()
ax1.set_title(title)
ax1.hist(w_norm, normed=True, bins=200, alpha=0.6, histtype='stepfilled',
range=[0, n_retained * 5])
ax1.axvline(x=n_retained, linewidth=1, color='r')
ax1.set_ylabel('PDF', color='b')
ax2 = ax1.twinx()
ax2.set_ylabel('Survival Function', color='r')
ax1.set_xlabel('w_norm')
x = np.linspace(chi2.ppf(0.001, n_retained),
chi2.ppf(0.999, n_retained), 100)
ax2.plot(x, chi2.sf(x, n_retained),
'g-', lw=1, alpha=0.6, label='chi2 pdf')
ax1.plot(x, chi2.pdf(x, n_retained),
'r-', lw=1, alpha=0.6, label='chi2 pdf')
def visualize_nicp_result(mesh):
l = rasterize_mesh_at_template(mesh, pose_angle_deg=+20, shaded=True)
r = rasterize_mesh_at_template(mesh, pose_angle_deg=-20, shaded=True)
return Image(np.concatenate([l.pixels, r.pixels], axis=-1))
| bsd-3-clause | -179,740,341,825,600,160 | 35.606557 | 80 | 0.640842 | false | 3.033967 | false | false | false |
cgio/invader | invader.py | 1 | 6905 | # Invader
# https://github.com/cgio/invader
def fs(path, pattern, start_offset=0, chunk_size=-1, chunk_limit=-1,
find_all=True):
"""
Yields offset of found byte pattern within a file.
Supports wildcard bytes, starting offset, reading in chunks, and read
limits.
Args:
path (str): The file path.
pattern (str): The sequence of bytes, e.g. 'FF??E82623D7'.
'??' represents a single byte wildcard.
Spaces between bytes are supported.
start_offset (int): The offset to start searching from.
chunk_size (int): The read length per chunk (-1 == entire file).
chunk_limit (int): The max # of chunks to read (-1 == all chunks).
find_all (bool): True == find all instances. False == only first
instance.
Returns:
int: On success, yield found offset.
bool: On error, yield False.
Example:
import invader
for found_offset in invader.fs(
r'C:\target.exe',
'?? 01 55 ?? ?? 4B 20 1E 1D ?? 15',
start_offset=0x1000,
chunk_size=1024,
chunk_limit=10,
find_all=False
):
if found_offset is not False:
print(hex(found_offset))
"""
# Only -1 or > 0 is allowed
if chunk_size == 0 or chunk_limit == 0:
yield False
pattern = pattern.replace(' ', '')
# If no path, invalid pattern, or pattern is all wildcards
if len(path) == 0 or len(pattern) < 2 or len(
pattern) % 2 != 0 or pattern.count('?') == len(pattern):
yield False
# Correct invalid values
if start_offset < 0:
start_offset = 0
# If chunk_size == entire file, chunk_limit becomes irrelevant
if chunk_size == -1:
chunk_limit = -1
# Get largest segment bytes
pattern_largest_segment = list(filter(None, pattern.split('??')))
pattern_largest_segment.sort(key=len, reverse=True)
pattern_largest_segment = pattern_largest_segment[0]
pattern_largest_segment_position = pattern.index(
pattern_largest_segment) // 2
pattern_largest_segment = bytes.fromhex(pattern_largest_segment)
# Search method 1 (no wildcards)
if pattern.count('?') == 0:
pattern_bytes = bytes.fromhex(pattern)
chunk_position = 0
with open(path, 'rb') as f:
if start_offset > 0:
f.seek(start_offset)
while True:
if chunk_limit > 0:
if chunk_position / chunk_size >= chunk_limit:
return
try:
data = f.read(chunk_size)
except MemoryError:
yield False
if not data:
return
i = 0
found_position = 0
while True:
try:
found_position = data.index(pattern_bytes,
found_position + i)
if chunk_size > 0:
yield chunk_position + found_position + \
start_offset
else:
yield found_position + start_offset
if find_all is False:
return
except ValueError:
break
i += 1
chunk_position += chunk_size
continue
return
# Create a list of wildcard positions
pattern_wildcard_positions = []
for i in range(0, len(pattern), 2):
pattern_byte = pattern[i:i + 2]
if pattern_byte == '??':
pattern_wildcard_positions.append(i // 2)
# Remove wildcards from pattern string and convert to bytes
pattern_len = len(pattern) // 2
pattern_bytes = pattern.replace('?', '')
pattern_bytes = bytes.fromhex(pattern_bytes)
# Search method 2 (wildcards)
possible_positions = []
end_of_file = False
first_result = True
chunk_position = 0
with open(path, 'rb') as f:
if start_offset > 0:
f.seek(start_offset)
while not end_of_file:
if chunk_limit > 0:
if chunk_position / chunk_size >= chunk_limit:
return
try:
data = f.read(chunk_size)
except MemoryError:
yield False
if not data:
end_of_file = True
chunk_search = True
while chunk_search:
try:
if first_result is True:
possible_positions.append(
data.index(pattern_largest_segment))
first_result = False
else:
possible_positions.append(
data.index(pattern_largest_segment,
possible_positions[-1] + 1))
except ValueError:
if chunk_size > 0:
chunk_position += chunk_size
chunk_search = False
for possible_position in possible_positions:
possible_position -= pattern_largest_segment_position
match_count = 0
pattern_bytes_pos = 0
data_offset_pos = 0
i = 0
while i < pattern_len:
if i in pattern_wildcard_positions:
match_count += 1
data_offset_pos += 1
i += 1
continue
elif pattern_bytes[pattern_bytes_pos] == data[
possible_position + data_offset_pos]:
match_count += 1
data_offset_pos += 1
pattern_bytes_pos += 1
i += 1
continue
i += 1
if match_count == pattern_len:
if find_all is True:
if chunk_size > 0:
yield chunk_position + possible_position + \
start_offset - chunk_size
else:
yield possible_position + start_offset
else:
yield possible_position + chunk_position + \
start_offset - chunk_size
return
possible_positions = []
first_result = True
return
| mit | 4,482,172,912,568,077,300 | 35.123656 | 74 | 0.459232 | false | 4.960489 | false | false | false |
umlfri/umlfri2 | umlfri2/application/addon/online/addon.py | 1 | 1390 | class OnlineAddOn:
def __init__(self, application, identifier, versions):
self.__application = application
self.__identifier = identifier
self.__versions = tuple(sorted(versions, key=lambda ver: ver.version, reverse=True))
for version in self.__versions:
version._set_addon(self)
@property
def identifier(self):
return self.__identifier
@property
def name(self):
return self.__versions[0].name
@property
def author(self):
return self.__versions[0].author
@property
def homepage(self):
return self.__versions[0].homepage
@property
def license(self):
return self.__versions[0].license
@property
def icon(self):
return self.__versions[0].icon
@property
def description(self):
return self.__versions[0].description
@property
def requirements(self):
yield from self.__versions[0].requirements
@property
def provisions(self):
yield from self.__versions[0].provisions
@property
def versions(self):
yield from self.__versions
@property
def latest_version(self):
return self.__versions[0]
@property
def local_addon(self):
return self.__application.addons.local.get_addon(self.__identifier)
| gpl-3.0 | -5,201,756,451,669,748,000 | 23.821429 | 92 | 0.597122 | false | 4.66443 | false | false | false |
frewsxcv/servo | components/style/binding_tools/check_bindings.py | 10 | 1110 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
LICENSE = """\
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* automatically generated by check_bindings.py. */
"""
BINDINGS_PATH = os.path.join("..", "gecko_bindings")
INPUT_FILE = os.path.join(BINDINGS_PATH, "bindings.rs")
OUTPUT_FILE = os.path.join(BINDINGS_PATH, "check_bindings.rs")
TEMPLATE = """\
[ Servo_{name}, bindings::Servo_{name} ];
"""
with open(INPUT_FILE, "r") as bindings, open(OUTPUT_FILE, "w+") as tests:
tests.write(LICENSE)
tests.write("fn assert_types() {\n")
pattern = re.compile("fn\s*Servo_([_a-zA-Z0-9]+)\s*\(")
for line in bindings:
match = pattern.search(line)
if match:
tests.write(TEMPLATE.format(name=match.group(1)))
tests.write("}\n")
| mpl-2.0 | -5,468,983,674,139,104,000 | 29 | 73 | 0.654955 | false | 3.162393 | false | false | false |
antivirtel/Flexget | flexget/plugins/input/betaseries_list.py | 18 | 6366 | """Input plugin for www.betaseries.com"""
from __future__ import unicode_literals, division, absolute_import
from hashlib import md5
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests
from flexget.utils.cached_input import cached
log = logging.getLogger('betaseries_list')
API_URL_PREFIX = 'http://api.betaseries.com/'
class BetaSeriesList(object):
"""
Emits an entry for each serie followed by one or more BetaSeries account.
See http://www.betaseries.com/
Configuration examples:
# will get all series followed by the account identified by your_user_name
betaseries_list:
username: your_user_name
password: your_password
api_key: your_api_key
# will get all series followed by the account identified by some_other_guy
betaseries_list:
username: your_user_name
password: your_password
api_key: your_api_key
members:
- some_other_guy
# will get all series followed by the accounts identified by guy1 and guy2
betaseries_list:
username: your_user_name
password: your_password
api_key: your_api_key
members:
- guy1
- guy2
Api key can be requested at http://www.betaseries.com/api.
This plugin is meant to work with the import_series plugin as follow:
import_series:
from:
betaseries_list:
username: xxxxx
password: xxxxx
api_key: xxxxx
"""
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'api_key': {'type': 'string'},
'members': {
'type': 'array',
'items': {
"title": 'member name',
"type": "string"
}
}
},
'required': ['username', 'password', 'api_key'],
'additionalProperties': False
}
@cached('betaseries_list', persist='2 hours')
def on_task_input(self, task, config):
username = config['username']
password = config['password']
api_key = config['api_key']
members = config.get('members', [username])
titles = set()
try:
user_token = create_token(api_key, username, password)
for member in members:
titles.update(query_series(api_key, user_token, member))
except (requests.RequestException, AssertionError) as err:
log.critical('Failed to get series at BetaSeries.com: %s' % err.message, exc_info=err)
log.verbose("series: " + ", ".join(titles))
entries = []
for t in titles:
e = Entry()
e['title'] = t
entries.append(e)
return entries
def create_token(api_key, login, password):
"""
login in and request an new API token.
http://www.betaseries.com/wiki/Documentation#cat-members
:param string api_key: Api key requested at http://www.betaseries.com/api
:param string login: Login name
:param string password: Password
:return: User token
"""
r = requests.post(API_URL_PREFIX + 'members/auth', params={
'login': login,
'password': md5(password).hexdigest()
}, headers={
'Accept': 'application/json',
'X-BetaSeries-Version': '2.1',
'X-BetaSeries-Key': api_key,
})
assert r.status_code == 200, "Bad HTTP status code: %s" % r.status_code
j = r.json()
error_list = j['errors']
for err in error_list:
log.error(str(err))
if not error_list:
return j['token']
def query_member_id(api_key, user_token, login_name):
"""
Get the member id of a member identified by its login name.
:param string api_key: Api key requested at http://www.betaseries.com/api
:param string user_token: obtained with a call to create_token()
:param string login_name: The login name of the member
:return: Id of the member identified by its login name or `None` if not found
"""
r = requests.get(API_URL_PREFIX + 'members/search', params={
'login': login_name
}, headers={
'Accept': 'application/json',
'X-BetaSeries-Version': '2.1',
'X-BetaSeries-Key': api_key,
'X-BetaSeries-Token': user_token,
})
assert r.status_code == 200, "Bad HTTP status code: %s" % r.status_code
j = r.json()
error_list = j['errors']
for err in error_list:
log.error(str(err))
found_id = None
if not error_list:
for candidate in j['users']:
if candidate['login'] == login_name:
found_id = candidate['id']
break
return found_id
def query_series(api_key, user_token, member_name=None):
"""
Get the list of series followed by the authenticated user
:param string api_key: Api key requested at http://www.betaseries.com/api
:param string user_token: Obtained with a call to create_token()
:param string member_name: [optional] A member name to get the list of series from. If None, will query the member
for whom the user_token was for
:return: List of serie titles or empty list
"""
params = {}
if member_name:
member_id = query_member_id(api_key, user_token, member_name)
if member_id:
params = {'id': member_id}
else:
log.error("member %r not found" % member_name)
return []
r = requests.get(API_URL_PREFIX + 'members/infos', params=params, headers={
'Accept': 'application/json',
'X-BetaSeries-Version': '2.1',
'X-BetaSeries-Key': api_key,
'X-BetaSeries-Token': user_token,
})
assert r.status_code == 200, "Bad HTTP status code: %s" % r.status_code
j = r.json()
error_list = j['errors']
for err in error_list:
log.error(str(err))
if not error_list:
return [x['title'] for x in j['member']['shows'] if x['user']['archived'] is False]
else:
return []
@event('plugin.register')
def register_plugin():
plugin.register(BetaSeriesList, 'betaseries_list', api_ver=2)
| mit | -1,441,884,288,757,939,200 | 30.98995 | 118 | 0.587653 | false | 3.828022 | true | false | false |
junkoda/fs2 | py/fs/lpt.py | 1 | 1448 | import fs._fs as c
from fs.particles import Particles
def init(nc, boxsize, a, ps, seed, kind):
"""Generate 2LPT displacements and particle positions.
This function generates a random Gaussian initial condition and
create a grid of particles with 2LPT displacements. The velocities
are 0.
Args:
nc (int): Number of particles per dimension;
number of particles np = nc**3.
boxsize (float): length of the periodic box on a side [1/h Mpc].
a (float): scale factor at which the positions are computed.
ps (PowerSpectrum): Linear power spectrum extrapolated to a=1.
seed (int): random seed for the random Gaussian initial density field
kind (str): kind of xv, 'zeldovich', '2lpt', or 'cola'
cola sets v=0
Returns:
An instance of class Particles.
"""
return Particles(_particles=c._lpt(nc, boxsize, a, seed, ps._ps,
kind.lower()))
def set_offset(offset):
"""Set offset with respect to grid points
x = (ix + offset)*dx,
where ix is an integer, dx = boxsize/nc.
Args:
offset (float): offset (0 <= offset < 1)
"""
c._set_offset(offset)
def set_zeldovich_force(particles, a):
"""Set Zel'dovich (1LPT) force to particles.force
Args:
a (float): scale factor of the force
"""
c._set_zeldovich_force(particles._particles, a)
| gpl-3.0 | -7,267,806,520,855,188,000 | 27.392157 | 77 | 0.615331 | false | 3.751295 | false | false | false |
mapleoin/obswatch | tests.py | 1 | 9851 | import unittest
import smtplib
import minimock
from minimock import assert_same_trace, mock, Mock, TraceTracker
from StringIO import StringIO
import osc.core
import obswatch
class TestObswatch(unittest.TestCase):
# nose knows about setUpClass, but python 2.6's unittest doesn't
# @classmethod
# def setUpClass(cls):
def setUp(self):
repo = Mock('repo')
repo.name = 'standard'
repo.arch = 'x86_64'
package = Mock('package')
package.project = 'openSUSE:11.3'
package.name = 'osc'
self.build = obswatch.Build(package=package, repo=repo,
interested={'geeko':'geeko@opensuse.org'})
self.package = package
self.tt = TraceTracker()
obswatch.SLEEP_TIME = 0
def tearDown(self):
minimock.restore()
self.tt.clear()
def test_get_latest_packages(self):
mock('obswatch.http_GET', tracker=self.tt,
returns=StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<latest_added>
<package created="2010-09-09T14:03:06+02:00" name="antivir" project="home:varkoly:branches:openSUSE:11.3:NonFree"/>
<project created="2010-09-09T14:03:05+02:00" name="home:varkoly:branches:openSUSE:11.3:NonFree"/>
<package created="2010-09-09T13:50:37+02:00" name="test9" project="home:enzokiel:test"/>
<package created="2010-09-09T13:12:54+02:00" name="kernel-bfs-source" project="home:jingtw"/>
<package created="2010-09-09T13:12:08+02:00" name="getdata" project="home:christiantrippe:branches:KDE:Distro:Factory"/>
<package created="2010-09-09T13:05:13+02:00" name="perl-String-CRC32" project="home:seife:byd"/>
<package created="2010-09-09T13:05:04+02:00" name="autogen" project="home:psmt:branches:Base:System"/>
</latest_added>'''))
result = obswatch.get_latest_packages(7)
assert_same_trace(self.tt,"""Called obswatch.http_GET(
'%sstatistics/latest_updated?limit=7')""" % obswatch.APIURL)
for p in result:
self.assertTrue(isinstance(p, obswatch.Package))
self.assertEqual(result[0].name, 'antivir')
self.assertEqual(len(result), 6) # second one is a project
def test_get_user_email(self):
mock('obswatch.http_GET', tracker=self.tt,
returns=StringIO('''<person>
<login>Geeko</login>
<email>geeko@opensuse.org</email>
<realname>Geeko Chameleon</realname>
<watchlist/>
</person>'''))
result = obswatch.get_user_email('Geeko')
assert_same_trace(self.tt, """Called obswatch.http_GET(
'%sperson/Geeko')""" % obswatch.APIURL)
self.assertEqual(result, 'geeko@opensuse.org')
def test_users_from_url(self):
mock('obswatch.http_GET', tracker=self.tt,
returns=StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<project name="superkde" created="2005-01-01T00:00:02+01:00" updated="2007-01-19T10:44:45+01:00">
<title>SuperKDE</title>
<description>SuperKDE is a heavily tuned version of KDE.</description>
<link project="openSUSE:11.2:Update" />
<link project="openSUSE:11.2" />
<person role="maintainer" userid="Geeko"/>
<person role="maintainer" userid="BrownGeeko"/>
<group role="reviewer" groupid="release_team"/>
<build>
<disable />
</build>
<repository name="kde4:factory" rebuild="transitive">
<path project="kde4" repository="factory"/>
<arch>i386</arch>
<arch>x86_64</arch>
</repository>
</project>'''))
mock('obswatch.get_user_email', returns='geeko@opensuse.org')
result = obswatch.get_users_from_url('%ssource/superkde/_meta' %
obswatch.APIURL)
assert_same_trace(self.tt, """Called obswatch.http_GET(
'%ssource/superkde/_meta')""" % obswatch.APIURL)
self.assertEqual(len(result), 2)
self.assertEqual(result['Geeko'], 'geeko@opensuse.org')
self.assertEqual(result['BrownGeeko'], 'geeko@opensuse.org')
def test_get_builds(self):
mock('osc.core.http_GET', tracker=self.tt,
returns=StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<project name="superkde" created="2005-01-01T00:00:02+01:00" updated="2007-01-19T10:44:45+01:00">
<title>SuperKDE</title>
<description>SuperKDE is a heavily tuned version of KDE.</description>
<link project="openSUSE:11.2:Update" />
<link project="openSUSE:11.2" />
<person role="maintainer" userid="ernie"/>
<group role="reviewer" groupid="release_team"/>
<build>
<disable />
</build>
<useforbuild>
<disable />
</useforbuild>
<repository name="kde4:factory" rebuild="transitive">
<path project="kde4" repository="factory"/>
<arch>i386</arch>
<arch>x86_64</arch>
</repository>
<repository name="suselinux-9.3">
<path project="suselinux-9.3" repository="standard"/>
<arch>i386</arch>
</repository>
<repository name="gnomespecial" rebuild="local">
<path project="gnome3" repository="suselinux-9.3"/>
<path project="suselinux-9.3" repository="standard"/>
<arch>i386</arch>
</repository>
</project>'''))
# source/superkde/_meta
# gets called by osc.core.get_repos_of_project
mock('obswatch.get_interested',
returns={'Geeko': 'geeko@opensuse.org'})
superkde = Mock('package')
superkde.name = 'superkde'
superkde.project = 'superkde'
superkde.created = '2007-01-19T10:44:45+01:00'
result = obswatch.get_builds(superkde)
assert_same_trace(self.tt, """Called osc.core.http_GET(
'%ssource/superkde/_meta')""" % obswatch.APIURL)
def test_build_get_remote_status(self):
mock('obswatch.http_GET', tracker=self.tt,
returns=StringIO('''<status package="osc" code="disabled">
<details></details>
</status>'''))
code = self.build.get_remote_status()
assert_same_trace(self.tt, """Called obswatch.http_GET(
'%sbuild/openSUSE:11.3/standard/x86_64/osc/_status')""" %
obswatch.APIURL)
self.assertEqual(code, 'disabled')
def test_process_same_status(self):
self.build.get_remote_status = lambda : self.build.status
result = obswatch.process_build(self.build)
self.assertTrue(result)
def test_process_intermediate(self):
self.build.get_remote_status = lambda : 'building'
result = obswatch.process_build(self.build)
self.assertTrue(result)
self.assertEqual(self.build.status, 'building')
def test_process_other(self):
self.build.get_remote_status = lambda : 'excluded'
result = obswatch.process_build(self.build)
self.assertFalse(result)
def test_process_unknown(self):
self.build.get_remote_status = lambda : 'infundibulated'
self.assertRaises(Exception, obswatch.process_build, self.build)
def test_process_final_not_succeeded(self):
self.build.get_remote_status = lambda : 'failed'
result = obswatch.process_build(self.build)
self.assertFalse(result)
def test_final_succeeded(self):
self.build.get_remote_status = lambda : 'succeeded'
mock('obswatch.Build.get_binaries', returns={'foo':'bar'})
mock('obswatch.send_email', tracker=self.tt)
result = obswatch.process_build(self.build)
self.assertFalse(result)
expected_output = """Called obswatch.send_email(
'geeko',
'geeko@opensuse.org',
<obswatch.Build instance at ...>,
{'foo': 'bar'})"""
assert_same_trace(self.tt, expected_output)
def test_interested(self):
mock('obswatch.get_users_from_url', returns_func=lambda url: {url: url})
result = obswatch.get_interested(self.package)
# both the project and package page should be checked for users
self.assertEqual(result,
{'https://api.opensuse.org/source/openSUSE:11.3/_meta': 'https://api.opensuse.org/source/openSUSE:11.3/_meta',
'https://api.opensuse.org/source/openSUSE:11.3/osc/_meta': 'https://api.opensuse.org/source/openSUSE:11.3/osc/_meta'})
def test_send_email(self):
mock('smtplib.SMTP', returns=Mock('smtp_connection', tracker=self.tt),
tracker=self.tt)
obswatch.send_email('geeko', 'geeko@opensuse.org',
'yourpackage',
{'rpm1': 'http://opensuse.org/rpm1',
'rpm2': 'http://opensuse.org/rpm2'})
expected_output = """Called smtplib.SMTP('localhost')
Called smtp_connection.sendmail(
'osc@opensuse.org',
['geeko@opensuse.org'],
'To: geeko@opensuse.org\\nFrom: osc@opensuse.org\\nSubject: (osc) build succeeded: yourpackage\\n\\nThe package yourpackage has finished building and can now be downloaded from:\\nrpm1 - http://opensuse.org/rpm1\\nrpm2 - http://opensuse.org/rpm2')
Called smtp_connection.quit()
"""
assert_same_trace(self.tt, expected_output)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 7,210,041,340,860,714,000 | 39.706612 | 251 | 0.584509 | false | 3.576979 | true | false | false |
frespa81/tellduscontroller | MacroParser/Macros.py | 1 | 3739 | #!/usr/bin/python
import threading
import time
import glob
import MacroParser
import Telldus
class MacroCollection(object):
"""This is a collection of macros defined in xml format that are read from
a list of folders.
Attribute:
folder_list -- The list with the paths from where to collect the macros
"""
def __init__(self, folder_list):
"""Initializes the MacroCollection object. """
self.PotentialMacroFiles = []
self.collection = {}
for folder in folder_list:
# Yes for every folder in the list...
folder = folder.strip()
# TODO: if this is to be used on let's say an inferior operating system, this has to be adapted...
if folder[-1] != '/':
folder.append('/*.xml')
else:
folder.append('*.xml')
# add the macro files to our list of files...
self.PotentialMacroFiles.extend( glob.glob(folder) )
# OK go through the potential macro files and make macro objects out
# of them.
for PotentialMacroFile in self.PotentialMacroFiles:
macro = None
try:
macro = xml_macro( PotentialMacroFile )
except:
pass
else:
macro.name
class wakeup_macro( threading.Thread ):
"""This is a macro that can be started and be self maintained.
It feeds the command queue in the controller with commands until the macro
completes.
Attribute:
unit_id -- The specific dimmer that should be used for this wakeup sequence.
"""
def __init__( self, unit_id ):
"""Initialize the wakeup macro. """
threading.Thread.__init__(self)
self.unit_id = unit_id
def run(self):
"""This is where the magic happens! """
dim_value = 0
print time.asctime()
print "wakeup macro for unit %d started" % self.unit_id
while dim_value < 255:
if dim_value < 10:
dim_value += 1
elif dim_value <20:
dim_value += 2
else:
dim_value += 5
if dim_value > 255:
dim_value = 255
# Create the command!
cmd = Telldus.Controller.TelldusCommand( "dim::%d:%d:" % (self.unit_id, dim_value) , False )
# enqueue a command that sets the level on a lamp, this is later
# received by the Telldus Controller
Telldus.Controller.CommandQueue.put( cmd )
# Sleep for a while so we don't ramp up the lux level to quick
time.sleep( 10 )
print time.asctime()
print "wakeup macro for unit %d completed" % self.unit_id
class xml_macro( threading.Thread ):
"""This is a xml macro that reads a file and then executes it.
Attribute:
unit_id -- The specific dimmer that should be used for this wakeup sequence.
"""
def __init__( self, macro_file_path ):
"""Initialize the wakeup macro. """
threading.Thread.__init__(self)
# print os.getcwd()
self.macroObjects = MacroParser.Macro( macro_file_path )
def run(self):
"""This is where the magic happens! """
self.macroObjects.execute()
| gpl-3.0 | -8,679,974,787,027,215,000 | 26.5 | 110 | 0.507087 | false | 4.799743 | false | false | false |
bolkedebruin/airflow | airflow/_vendor/nvd3/cumulativeLineChart.py | 6 | 4048 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from .NVD3Chart import NVD3Chart, TemplateMixin
class cumulativeLineChart(TemplateMixin, NVD3Chart):
"""
A cumulative line chart is used when you have one important grouping representing
an ordered set of data and one value to show, summed over time.
Python example::
from nvd3 import cumulativeLineChart
chart = cumulativeLineChart(name='cumulativeLineChart', x_is_date=True)
xdata = [1365026400000000, 1365026500000000, 1365026600000000]
ydata = [6, 5, 1]
y2data = [36, 55, 11]
extra_serie = {"tooltip": {"y_start": "There are ", "y_end": " calls"}}
chart.add_serie(name="Serie 1", y=ydata, x=xdata, extra=extra_serie)
extra_serie = {"tooltip": {"y_start": "", "y_end": " mins"}}
chart.add_serie(name="Serie 2", y=y2data, x=xdata, extra=extra_serie)
chart.buildhtml()
Javascript generated:
.. raw:: html
<div id="cumulativeLineChart"><svg style="height:450px; width:100%"></svg></div>
<script>
data_cumulativeLineChart=[{"values": [{"y": 6, "x": 1365026400000000},
{"y": 5, "x": 1365026500000000},
{"y": 1, "x": 1365026600000000}],
"key": "Serie 1", "yAxis": "1"},
{"values": [{"y": 36, "x": 1365026400000000},
{"y": 55, "x": 1365026500000000},
{"y": 11, "x": 1365026600000000}], "key": "Serie 2", "yAxis": "1"}];
nv.addGraph(function() {
var chart = nv.models.cumulativeLineChart();
chart.margin({top: 30, right: 60, bottom: 20, left: 60});
var datum = data_cumulativeLineChart;
chart.xAxis
.tickFormat(function(d) { return d3.time.format('%d %b %Y')(new Date(parseInt(d))) });
chart.yAxis
.tickFormat(d3.format(',.1%'));
chart.tooltipContent(function(key, y, e, graph) {
var x = d3.time.format("%d %b %Y")(new Date(parseInt(graph.point.x)));
var y = String(graph.point.y);
if(key == 'Serie 1'){
var y = 'There are ' + String(e) + ' calls';
}if(key == 'Serie 2'){
var y = String(e) + ' mins';
}
tooltip_str = '<center><b>'+key+'</b></center>' + y + ' on ' + x;
return tooltip_str;
});
chart.showLegend(true);
d3.select('#cumulativeLineChart svg')
.datum(datum)
.transition().duration(500)
.attr('height', 450)
.call(chart); });
</script>
"""
CHART_FILENAME = "./cumulativelinechart.html"
template_chart_nvd3 = NVD3Chart.template_environment.get_template(CHART_FILENAME)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = 'cumulativeLineChart'
height = kwargs.get('height', 450)
width = kwargs.get('width', None)
if kwargs.get('x_is_date', False):
self.set_date_flag(True)
self.create_x_axis('xAxis',
format=kwargs.get('x_axis_format', '%d %b %Y'),
date=True)
self.set_custom_tooltip_flag(True)
else:
self.create_x_axis('xAxis', format=kwargs.get(
'x_axis_format', '.2f'))
self.create_y_axis('yAxis', format=kwargs.get('y_axis_format', '.1%'))
self.set_graph_height(height)
if width:
self.set_graph_width(width)
| apache-2.0 | -8,209,313,798,998,288,000 | 37.923077 | 114 | 0.513587 | false | 3.650135 | false | false | false |
schriftgestalt/Mekka-Scripts | Paths/New Tab with Small Paths.py | 1 | 7421 | #MenuTitle: New Tab with Small Paths
# -*- coding: utf-8 -*-
__doc__="""
Finds small paths (smaller tahn a user-definable threshold) in glyphs and open a new tab with affected glyphs.
"""
import vanilla
import GlyphsApp
class FindSmallPaths( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 250
windowHeight = 190
windowWidthResize = 300 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Find Small Paths", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.FindSmallPaths.mainwindow" # stores last window position and size
)
# UI elements:
self.w.text_1 = vanilla.TextBox( (15, 10, -15, 30), "Open a new tab with glyphs that contain paths with an area smaller than:", sizeStyle='small' )
self.w.minArea = vanilla.TextBox( (15, 42, -15, 15+3), "1000 square units", sizeStyle = 'small', alignment="center")
self.w.sliderMin = vanilla.EditText( ( 15, 60-1, 50, 19), "10", sizeStyle='small', callback=self.SliderUpdate )
self.w.sliderMax = vanilla.EditText( (-15-50, 60-1, -15, 19), "10000", sizeStyle='small', callback=self.SliderUpdate )
self.w.areaSlider= vanilla.Slider((15+50+10, 60, -15-50-10, 19), value=0.1, minValue=0.0, maxValue=1.0, sizeStyle='small', callback=self.SliderUpdate )
self.w.deleteThemRightAway = vanilla.CheckBox( (15, 80+10, -15, 20), "Delete Small Paths Right Away", value=False, callback=self.CheckBoxUpdate, sizeStyle='small' )
self.w.afterOverlapRemoval = vanilla.CheckBox( (15, 100+10, -15, 20), "After Decomposition and Overlap Removal (slower)", value=False, callback=self.CheckBoxUpdate, sizeStyle='small' )
# Run Button:
self.w.runButton = vanilla.Button((-120, -20-15, -15, -15), "Open Tab", sizeStyle='regular', callback=self.FindSmallPathsMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print "Note: 'Find Small Paths' could not load preferences. Will resort to defaults"
self.CheckBoxUpdate(None)
self.SliderUpdate(None)
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.FindSmallPaths.sliderMin"] = self.w.sliderMin.get()
Glyphs.defaults["com.mekkablue.FindSmallPaths.sliderMax"] = self.w.sliderMax.get()
Glyphs.defaults["com.mekkablue.FindSmallPaths.areaSlider"] = float(self.w.areaSlider.get())
Glyphs.defaults["com.mekkablue.FindSmallPaths.deleteThemRightAway"] = int(self.w.deleteThemRightAway.get())
Glyphs.defaults["com.mekkablue.FindSmallPaths.afterOverlapRemoval"] = int(self.w.afterOverlapRemoval.get())
except Exception as e:
print e
return False
return True
def LoadPreferences( self ):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_(
{
"com.mekkablue.FindSmallPaths.sliderMin": "10",
"com.mekkablue.FindSmallPaths.sliderMax": "100000",
"com.mekkablue.FindSmallPaths.areaSlider": 0.1,
"com.mekkablue.FindSmallPaths.deleteThemRightAway": 0,
"com.mekkablue.FindSmallPaths.afterOverlapRemoval": 0
}
)
self.w.sliderMin.set( Glyphs.defaults["com.mekkablue.FindSmallPaths.sliderMin"] )
self.w.sliderMax.set( Glyphs.defaults["com.mekkablue.FindSmallPaths.sliderMax"] )
self.w.areaSlider.set( float(Glyphs.defaults["com.mekkablue.FindSmallPaths.areaSlider"]) )
self.w.deleteThemRightAway.set( bool(Glyphs.defaults["com.mekkablue.FindSmallPaths.deleteThemRightAway"]) )
self.w.afterOverlapRemoval.set( bool(Glyphs.defaults["com.mekkablue.FindSmallPaths.afterOverlapRemoval"]) )
except Exception as e:
print e
return False
return True
def CheckBoxUpdate(self, sender):
try:
# mutually exclusive check boxes:
theOne = self.w.afterOverlapRemoval
theOther = self.w.deleteThemRightAway
theOther.enable(not bool(theOne.get()))
theOne.enable(not bool(theOther.get()))
# Hack as long as vanilla.CheckBox.getNSButton is not implemented:
if theOne.get():
theOther.set(False)
if theOther.get():
theOne.set(False)
# save prefs:
if not self.SavePreferences( self ):
print "Note: 'Find Small Paths' could not write preferences."
return True
except Exception as e:
print e
return False
def SliderUpdate( self, sender ):
try:
minArea = self.CurrentMinArea()
if not sender == self.w.areaSlider:
if not self.SavePreferences( self ):
print "Note: 'Find Small Paths' could not write preferences."
return True
except:
return False
def CurrentMinArea(self):
minimum = float(self.w.sliderMin.get())
maximum = float(self.w.sliderMax.get())
# check for integrity of min and max values:
if minimum < 1.0:
minimum = 1.0
self.w.sliderMin.set( "%i"%minimum )
if maximum < minimum:
maximum = minimum+10.0
self.w.sliderMax.set( "%i"%maximum )
sliderPos = float( self.w.areaSlider.get() )
minArea = minimum + sliderPos * (maximum-minimum)
self.w.minArea.set( "%i square units" % minArea )
return minArea
def FindSmallPathsMain( self, sender ):
try:
minArea = self.CurrentMinArea()
smallPathsShouldBeDeleted = self.w.deleteThemRightAway.get()
overlapsShouldBeRemovedFirst = self.w.afterOverlapRemoval.get()
glyphsWithSmallPaths = []
thisFont = Glyphs.font # frontmost font
for thisGlyph in thisFont.glyphs:
thisGlyph.beginUndo() # begin undo grouping
for thisLayer in thisGlyph.layers:
if thisLayer.paths:
if overlapsShouldBeRemovedFirst:
checkLayer = thisLayer.copyDecomposedLayer()
checkLayer.removeOverlap()
for thisPath in checkLayer.paths:
if thisPath.area() < minArea:
glyphsWithSmallPaths.append(thisGlyph.name)
else:
for i in range(len(thisLayer.paths))[::-1]:
thisPath = thisLayer.paths[i]
if thisPath.area() < minArea:
glyphsWithSmallPaths.append(thisGlyph.name)
if smallPathsShouldBeDeleted:
print "deleting", thisPath
del thisLayer.paths[i]
thisGlyph.endUndo() # end undo grouping
if glyphsWithSmallPaths:
tabString = "/"+"/".join( set(glyphsWithSmallPaths) )
thisFont.newTab( tabString )
else:
Message("No Small Paths Found", "No glyphs with paths smaller than %i square units found in the frontmost font." % minArea, OKButton="Cool")
# listOfSelectedLayers = thisFont.selectedLayers # active layers of currently selected glyphs
# for thisLayer in listOfSelectedLayers: # loop through layers
# thisGlyph = thisLayer.parent
# print thisGlyph.name, thisLayer.name
# # output all node coordinates:
# for thisPath in thisLayer.paths:
# for thisNode in thisLayer.nodes:
# print "-- %.1f %.1f" % ( thisNode.x, thisNode.y )
if not self.SavePreferences( self ):
print "Note: 'Find Small Paths' could not write preferences."
self.w.close() # delete if you want window to stay open
except Exception, e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print "Find Small Paths Error: %s" % e
FindSmallPaths() | apache-2.0 | -4,299,180,225,305,441,000 | 37.455959 | 186 | 0.703814 | false | 3.156529 | false | false | false |
Zorro666/renderdoc | util/test/tests/D3D11/D3D11_Shader_ISA.py | 2 | 2951 | import renderdoc as rd
from typing import List
import rdtest
class D3D11_Shader_ISA(rdtest.TestCase):
demos_test_name = 'D3D11_Shader_ISA'
def check_capture(self):
action = self.find_action("GPU=")
self.check(action is not None)
is_amd = 'AMD' in action.customName
self.controller.SetFrameEvent(action.next.eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
refl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Vertex)
isas: List[str] = self.controller.GetDisassemblyTargets(True)
if isas == []:
raise rdtest.TestFailureException("Expected some disassembly targets, got none!")
# Generic testing can't do much, we just ensure that we can successfully get a non-empty disassembly string
for isa in isas:
disasm: str = self.controller.DisassembleShader(pipe.GetGraphicsPipelineObject(), refl, isa)
if len(disasm) < 32:
raise rdtest.TestFailureException("Disassembly for target '{}' is degenerate: {}".format(isa, disasm))
rdtest.log.success("All disassembly targets successfully fetched and seem reasonable")
# We make this a hard failure. Users can fix this by installing the plugins, and we don't want automated
# overnight tests to suddenly stop checking
if 'AMDIL' not in isas:
raise rdtest.TestFailureException(
"AMDIL is not an available disassembly target. Are you missing plugins?")
disasm: str = self.controller.DisassembleShader(pipe.GetGraphicsPipelineObject(), refl, 'AMDIL')
expected = [
'il_vs',
'dcl_output_position',
'end',
]
for fragment in expected:
if not fragment in disasm:
raise rdtest.TestFailureException(
"AMDIL ISA doesn't contain '{}' as expected: {}".format(fragment, disasm))
if 'RDNA (Navi 10)' not in isas:
raise rdtest.TestFailureException(
"RDNA (Navi 10) is not an available disassembly target. Are you missing plugins?")
disasm: str = self.controller.DisassembleShader(pipe.GetGraphicsPipelineObject(), refl, 'RDNA (Navi 10)')
expected = [
'asic(GFX10)',
'vgpr_count',
'wave_size',
's_endpgm',
]
for fragment in expected:
if not fragment in disasm:
raise rdtest.TestFailureException(
"RDNA ISA doesn't contain '{}' as expected: {}".format(fragment, disasm))
rdtest.log.success("AMD disassembly is as expected")
# D3D11 doesn't have live driver disassembly, can't test it
if not is_amd:
rdtest.log.print("Not testing live driver disassembly outside AMD")
else:
rdtest.log.print("No live driver disassembly to test on D3D11") | mit | -3,720,529,310,528,970,000 | 36.367089 | 118 | 0.626228 | false | 4.127273 | true | false | false |
IEEE-SB-Passau/pelican-deployment-system | example_config.py | 1 | 3488 | # Copyright 2016 Peter Dahlberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.date import DateTrigger
if __name__ == "__main__":
raise SystemExit("Not meant to be run directly!")
def _rsync_cmd(dest):
cmd = ("rsync --delete-delay --recursive --times --stats --delay-updates "
"'{output}/' '{dest}'")
return cmd.format(dest=dest, output="{output}")
# configure the logger
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
# make sure git does not block giving pw prompts, git 2.3+ only
os.environ["GIT_TERMINAL_PROMPT"] = "0"
os.environ["GIT_ASKPASS"] = "echo" # also to avoid interactiveness
os.environ["GIT_EDITOR"] = "true" # also to avoid interactiveness
os.environ["GIT_PAGER"] = "cat" # also to avoid interactiveness
# avoid system config, we want default behaviour
os.environ["GIT_CONFIG_NOSYSTEM"] = "yes"
# needs to be a byte like object
GITHUB_SECRET = b"changetosomethingrandomlong"
RUNNERS = {
# unique name of the runner, avoid spaces and other obscure characters
"website_master": {
# directory where building takes place, will be created if not there
# multiple runners may point to the same one
"working_directory": "/tmp/test",
# upstream url of the repository which contains the website
# use https://git::@github.com... to avoid pw prompts and instead fail
# (e.g. if github gives errornously 401 temporarily, git would block)
# os.environ["GIT_TERMINAL_PROMPT"] = "0" does the same but git 2.3+only
"clone_url": "https://git::@github.com/IEEE-SB-Passau/pelican-ieee-passau.git",
# branch which will be built
"git_branch": "master",
# command which installs the generated directory tree to it's final
# destination (the wwwroot) e.g. rsync. {output} will be replaced by
# the path to the generator output
"final_install_command": _rsync_cmd("/tmp/testroot"),
# command which builds the website
# important: specify {output} as output path of the generator
# if you use toy you may use {toxresult} as the path to the result.json
"build_command": ('tox -e pelican --result-json "{toxresult}" '
'--recreate -- -d --output "{output}"'),
# will be added to env when running build_command
"build_env": {"PELICAN_SITEURL": "//apu:800"}
}
}
# define crojobs as sequence of (runner, trigger) pairs, for cron triggers see
# http://apscheduler.readthedocs.io/en/latest/modules/triggers/cron.html
SCHEDULED_BUILD_JOBS = [
("website_master", CronTrigger(minute="*/30")),
("website_master", DateTrigger()) # once at start
]
# user, pass for /status/... subpages, if not set or None no auth is done
def STATUS_AUTH_BASIC_FN(user, passw):
return user == "powerpoint" and passw == "karaoke"
| apache-2.0 | 5,242,572,374,823,577,000 | 39.55814 | 87 | 0.678899 | false | 3.656184 | false | false | false |
1Strategy/security-fairy | denied_notification.py | 1 | 11240 | import boto3
import gzip
import json
import logging
import os
import re
from tools import Arn
from setup_logger import create_logger
from aws_session_manager import AWS_Session
from botocore.exceptions import ProfileNotFound
logger = create_logger(name="denied_notification.py")
try:
SESSION = boto3.session.Session(profile_name='training',
region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
# global SESSION
# SESSION = SESSION.get_session()
topic_arn = os.environ.get('sns_arn', 'arn:aws:sns:us-east-1:281782457076:security_fairy_topic')
dynamodb_table = os.environ.get('dynamodb_table', 'arn:aws:dynamodb:us-east-1:281782457076:table/security_fairy_dynamodb_table')
# Extract Bucket and Key from an SNS notification
# message = json.loads(event['Records'][0]['Sns']['Message'])
# bucket = message['s3Bucket']
# key = message['s3ObjectKey'][0]
# Extracted Bucket and Key from S3 event notification
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
# where to save the downloaded file
file_path = '/tmp/cloudtraillogfile.gz'
# downloads file to above path
boto3.client('s3').download_file(bucket, key, file_path)
# opens gz file for reading
gzfile = gzip.open(file_path, 'r')
# loads contents of the Records key into variable (our actual cloudtrail log entries!)
records = json.loads(gzfile.readlines()[0])['Records']
access_denied_records = check_records_for_error_code(records)
security_fairy_access_denied_records = get_security_fairy_audited_entities(access_denied_records)
write_denied_actions_to_dynamodb(security_fairy_access_denied_records, dynamodb_table)
send_access_denied_notifications(access_denied_records, topic_arn)
def check_records_for_error_code(records, error_codes = ['AccessDenied', 'AccessDeniedException','Client.UnauthorizedOperation']):
matched_error_records = []
for record in records:
if record.get('errorCode', None) in error_codes:
logger.debug(record)
extracted_information = {}
arn = Arn(record['userIdentity'].get('arn', None))
role_name = arn.get_entity_name()
service_name = arn.get_service()
extracted_information['arn'] = arn.get_full_arn()
extracted_information['error_code'] = record['errorCode']
extracted_information['denied_action'] = service_name + ':' + record['eventName']
if not extracted_information in matched_error_records:
logger.info('extracted_information doesn\'t already exist in list of access denieds')
matched_error_records.append(extracted_information)
logger.debug(matched_error_records)
return matched_error_records
def send_access_denied_notifications(access_denied_records, topic_arn):
if access_denied_records:
response = boto3.client('sns', region_name = 'us-east-1')\
.publish( TopicArn=topic_arn,
Message=json.dumps(access_denied_records),
Subject='Automated AWS Notification - Access Denied')
def write_denied_actions_to_dynamodb(access_denied_records, dynamodb_table):
#take in the below:
# [{"error_code": "AccessDenied", "arn": "arn:aws:sts::281782457076:assumed-role/serverless_api_gateway_step_functions/BackplaneAssumeRoleSession", "denied_action": "states:StartExecution"}, {"error_code": "AccessDenied", "arn": "arn:aws:sts::281782457076:assumed-role/serverless_api_gateway_step_functions/BackplaneAssumeRoleSession", "denied_action": "states:StartExecution"}]
# read the dynamodb_table, if the action already exists, do nothing
dynamodb_client = SESSION.client('dynamodb')
for record in access_denied_records:
entity_arn = record['arn']
execution_id, existing_denied_actions = get_existing_denied_actions(entity_arn, dynamodb_table)
updated_denied_actions = existing_denied_actions
if not record['denied_action'] in existing_denied_actions:
updated_denied_actions.append(record['denied_action'])
dynamodb_client.update_item(TableName=dynamodb_table,
Key={
"execution_id": {
"S": execution_id
}
},
AttributeUpdates={
"denied_actions": {
"Value":{"SS": updated_denied_actions}
}
})
def get_security_fairy_audited_entities(access_denied_records):
audited_entities = []
for record in access_denied_records:
entity = Arn(record['arn'])
entity.convert_assumed_role_to_role()
entity_arn = entity.get_full_arn()
logger.debug(entity_arn)
if entity.is_role() and is_access_denied_security_fairy_audited_role(entity_arn):
logger.debug('Adding access_denied_record to list')
record['arn'] = entity_arn
audited_entities.append(record)
logger.info(audited_entities)
return audited_entities
def get_existing_denied_actions(entity_arn, dynamodb_table):
dynamodb_client = SESSION.client('dynamodb')
response = dynamodb_client.scan(
TableName=dynamodb_table,
IndexName='entity_arn',
AttributesToGet=[
'execution_id',
'entity_arn',
'denied_actions'
],
ScanFilter={
'entity_arn': {
'AttributeValueList': [
{
'S': entity_arn
}
],
'ComparisonOperator': 'EQ'
}
}
)['Items'][0]
existing_denied_actions = [] if response.get('denied_actions') is None else response['denied_actions']['SS']
execution_id = response['execution_id']['S']
logger.info(existing_denied_actions)
return execution_id, existing_denied_actions
def is_access_denied_security_fairy_audited_role(role_arn):
iam_client = SESSION.client('iam')
#Consumes an role arn and examines its attached policies to see
#if they were created by security-fairy
role = Arn(role_arn)
role_name = role.get_entity_name()
logger.info(role_name)
attached_policies = iam_client.list_attached_role_policies(RoleName=role_name)
# Examines all attached policies and search for an attached policy with the
# following format: *_security_fairy_revised_policy
# (see security_fairy_revised_policy_approve.py line 58)
logger.debug("Policies attached to {}:".format(role.get_full_arn()))
for policy in attached_policies['AttachedPolicies']:
logger.info(policy['PolicyName'])
if '-security-fairy-revised-policy' in policy['PolicyName']:
return True
return False
if __name__ == '__main__':
# arn = 'arn:aws:iam::281782457076:role/1s_tear_down_role'
# logging.info(is_access_denied_security_fairy_audited_role(arn))
access_denied_records = [{"error_code": "AccessDenied", "arn": "arn:aws:sts::281782457076:assumed-role/serverless_api_gateway_step_functions/BackplaneAssumeRoleSession", "denied_action": "states:StartExecution"},
{"error_code": "AccessDenied", "arn": "arn:aws:sts::281782457076:assumed-role/1s_tear_down_role/potato", "denied_action": "route53:CreateHostedZone"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:user/dbrewer@experlogix.com", "denied_action": "codebuild:StartBuild"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:user/tj.eaglescout@gmail.com", "denied_action": "codebuild:StartBuild"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:user/chase.thompson-baugh@simplymac.com", "denied_action": "codebuild:StartBuild"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:user/steven.nourse@vivintsolar.com", "denied_action": "codebuild:StartBuild"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:role/1s_tear_down_role", "denied_action": "codebuild:StartBuild"}]
# dynamodb_table = 'security_fairy_dynamodb_table'
# existing_denied_actions('arn:aws:iam::281782457076:role/1s_tear_down_role', dynamodb_table)
security_fairy_access_denied_records = get_security_fairy_audited_entities(access_denied_records)
write_denied_actions_to_dynamodb(security_fairy_access_denied_records,'security_fairy_dynamodb_table')
# if __name__ == '__main__':
# EVENT = {
# "Records": [
# {
# "eventVersion": "2.0",
# "eventTime": "2017-08-23T17:27:20.482Z",
# "requestParameters": {
# "sourceIPAddress": "184.72.102.183"
# },
# "s3": {
# "configurationId": "log_posted",
# "object": {
# "eTag": "f88cc0ba387febb9d1922bcf3624e249",
# "sequencer": "00599DBAF77B4804AE",
# "key": "AWSLogs/281782457076/CloudTrail/us-east-1/2017/08/23/281782457076_CloudTrail_us-east-1_20170823T1725Z_Nobz9PDTfkS2itSG.json.gz",
# "size": 4342
# },
# "bucket": {
# "arn": "arn:aws:s3:::1strategy-training-traillogs",
# "name": "1strategy-training-traillogs",
# "ownerIdentity": {
# "principalId": "A3F4AZ9K861LVS"
# }
# },
# "s3SchemaVersion": "1.0"
# },
# "responseElements": {
# "x-amz-id-2": "qakr7pYcVWfsXM/BEncmZ/zQVPQnIAyN5ggRIF+9/+5JhAhhmMDZDJunlhhFowOKzGF9mNtF1Ys=",
# "x-amz-request-id": "5A68EDF6D1F0C933"
# },
# "awsRegion": "us-west-2",
# "eventName": "ObjectCreated:Put",
# "userIdentity": {
# "principalId": "AWS:AROAI6ZMWVXR3IZ6MKNSW:i-0c91c32104e81c79d"
# },
# "eventSource": "aws:s3"
# }
# ]
# }
# lambda_handler(EVENT, {})
| apache-2.0 | 3,453,758,107,201,995,000 | 46.829787 | 382 | 0.56895 | false | 3.781965 | false | false | false |
mrquim/repository.mrquim | repo/script.module.myconnpy/lib/mysql/connector/dbapi.py | 16 | 2252 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module implements some constructors and singletons as required by the
DB API v2.0 (PEP-249).
"""
# Python Db API v2
apilevel = '2.0'
threadsafety = 1
paramstyle = 'pyformat'
import time
import datetime
from mysql.connector import constants
class _DBAPITypeObject:
def __init__(self, *values):
self.values = values
def __cmp__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
Binary = str
STRING = _DBAPITypeObject(constants.FieldType.get_string_types())
BINARY = _DBAPITypeObject(constants.FieldType.get_binary_types())
NUMBER = _DBAPITypeObject(constants.FieldType.get_number_types())
DATETIME = _DBAPITypeObject(constants.FieldType.get_timestamp_types())
ROWID = _DBAPITypeObject()
| gpl-2.0 | 3,972,306,058,080,460,000 | 30.71831 | 78 | 0.732682 | false | 3.697865 | false | false | false |
carmark/vbox | src/VBox/ValidationKit/testmanager/webui/wuihlpgraphmatplotlib.py | 1 | 12445 | # -*- coding: utf-8 -*-
# $Id: wuihlpgraphmatplotlib.py 56295 2015-06-09 14:29:55Z vboxsync $
"""
Test Manager Web-UI - Graph Helpers - Implemented using matplotlib.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 56295 $"
# Standard Python Import and extensions installed on the system.
import re;
import StringIO;
import matplotlib; # pylint: disable=F0401
matplotlib.use('Agg'); # Force backend.
import matplotlib.pyplot; # pylint: disable=F0401
from numpy import arange as numpy_arange; # pylint: disable=E0611
# Validation Kit imports.
from testmanager.webui.wuihlpgraphbase import WuiHlpGraphBase;
class WuiHlpGraphMatplotlibBase(WuiHlpGraphBase):
""" Base class for the matplotlib graphs. """
def __init__(self, sId, oData, oDisp):
WuiHlpGraphBase.__init__(self, sId, oData, oDisp);
self._fXkcdStyle = True;
def setXkcdStyle(self, fEnabled = True):
""" Enables xkcd style graphs for implementations that supports it. """
self._fXkcdStyle = fEnabled;
return True;
def _createFigure(self):
"""
Wrapper around matplotlib.pyplot.figure that feeds the figure the
basic graph configuration.
"""
if self._fXkcdStyle and matplotlib.__version__ > '1.2.9':
matplotlib.pyplot.xkcd(); # pylint: disable=E1101
matplotlib.rcParams.update({'font.size': self._cPtFont});
oFigure = matplotlib.pyplot.figure(figsize = (float(self._cxGraph) / self._cDpiGraph,
float(self._cyGraph) / self._cDpiGraph),
dpi = self._cDpiGraph);
return oFigure;
def _produceSvg(self, oFigure, fTightLayout = True):
""" Creates an SVG string from the given figure. """
oOutput = StringIO.StringIO();
if fTightLayout:
oFigure.tight_layout();
oFigure.savefig(oOutput, format = 'svg');
if self._oDisp and self._oDisp.isBrowserGecko('20100101'):
# This browser will stretch images to fit if no size or width is given.
sSubstitute = r'\1 \3 reserveAspectRatio="xMidYMin meet"';
else:
# Chrome and IE likes to have the sizes as well as the viewBox.
sSubstitute = r'\1 \3 reserveAspectRatio="xMidYMin meet" \2 \4';
return re.sub(r'(<svg) (height="\d+pt") (version="\d+.\d+" viewBox="\d+ \d+ \d+ \d+") (width="\d+pt")',
sSubstitute,
oOutput.getvalue().decode('utf8'),
count = 1);
class WuiHlpBarGraph(WuiHlpGraphMatplotlibBase):
"""
Bar graph.
"""
def __init__(self, sId, oData, oDisp = None):
WuiHlpGraphMatplotlibBase.__init__(self, sId, oData, oDisp);
self.fpMax = None;
self.fpMin = 0;
self.cxBarWidth = None;
def setRangeMax(self, fpMax):
""" Sets the max range."""
self.fpMax = float(fpMax);
return None;
def renderGraph(self): # pylint: disable=R0914
aoTable = self._oData.aoTable;
#
# Extract/structure the required data.
#
aoSeries = list();
for j in range(len(aoTable[1].aoValues)):
aoSeries.append(list());
asNames = list();
oXRange = numpy_arange(self._oData.getGroupCount());
fpMin = self.fpMin;
fpMax = self.fpMax;
if self.fpMax is None:
fpMax = float(aoTable[1].aoValues[0]);
for i in range(1, len(aoTable)):
asNames.append(aoTable[i].sName);
for j in range(len(aoTable[i].aoValues)):
fpValue = float(aoTable[i].aoValues[j]);
aoSeries[j].append(fpValue);
if fpValue < fpMin:
fpMin = fpValue;
if fpValue > fpMax:
fpMax = fpValue;
fpMid = fpMin + (fpMax - fpMin) / 2.0;
if self.cxBarWidth is None:
self.cxBarWidth = 1.0 / (len(aoTable[0].asValues) + 1.1);
# Render the PNG.
oFigure = self._createFigure();
oSubPlot = oFigure.add_subplot(1, 1, 1);
aoBars = list();
for i in range(len(aoSeries)):
sColor = self.calcSeriesColor(i);
aoBars.append(oSubPlot.bar(oXRange + self.cxBarWidth * i,
aoSeries[i],
self.cxBarWidth,
color = sColor,
align = 'edge'));
#oSubPlot.set_title('Title')
#oSubPlot.set_xlabel('X-axis')
#oSubPlot.set_xticks(oXRange + self.cxBarWidth);
oSubPlot.set_xticks(oXRange);
oLegend = oSubPlot.legend(aoTable[0].asValues, loc = 'best', fancybox = True);
oLegend.get_frame().set_alpha(0.5);
oSubPlot.set_xticklabels(asNames, ha = "left");
#oSubPlot.set_ylabel('Y-axis')
oSubPlot.set_yticks(numpy_arange(fpMin, fpMax + (fpMax - fpMin) / 10 * 0, fpMax / 10));
oSubPlot.grid(True);
fpPadding = (fpMax - fpMin) * 0.02;
for i in range(len(aoBars)):
aoRects = aoBars[i]
for j in range(len(aoRects)):
oRect = aoRects[j];
fpValue = float(aoTable[j + 1].aoValues[i]);
if fpValue <= fpMid:
oSubPlot.text(oRect.get_x() + oRect.get_width() / 2.0,
oRect.get_height() + fpPadding,
aoTable[j + 1].asValues[i],
ha = 'center', va = 'bottom', rotation = 'vertical', alpha = 0.6, fontsize = 'small');
else:
oSubPlot.text(oRect.get_x() + oRect.get_width() / 2.0,
oRect.get_height() - fpPadding,
aoTable[j + 1].asValues[i],
ha = 'center', va = 'top', rotation = 'vertical', alpha = 0.6, fontsize = 'small');
return self._produceSvg(oFigure);
class WuiHlpLineGraph(WuiHlpGraphMatplotlibBase):
"""
Line graph.
"""
def __init__(self, sId, oData, oDisp = None, fErrorBarY = False):
# oData must be a WuiHlpGraphDataTableEx like object.
WuiHlpGraphMatplotlibBase.__init__(self, sId, oData, oDisp);
self._cMaxErrorBars = 12;
self._fErrorBarY = fErrorBarY;
def setErrorBarY(self, fEnable):
""" Enables or Disables error bars, making this work like a line graph. """
self._fErrorBarY = fEnable;
return True;
def renderGraph(self): # pylint: disable=R0914
aoSeries = self._oData.aoSeries;
oFigure = self._createFigure();
oSubPlot = oFigure.add_subplot(1, 1, 1);
if self._oData.sYUnit is not None:
oSubPlot.set_ylabel(self._oData.sYUnit);
if self._oData.sXUnit is not None:
oSubPlot.set_xlabel(self._oData.sXUnit);
cSeriesNames = 0;
cYMin = 1000;
cYMax = 0;
for iSeries, oSeries in enumerate(aoSeries):
sColor = self.calcSeriesColor(iSeries);
cYMin = min(cYMin, min(oSeries.aoYValues));
cYMax = max(cYMax, max(oSeries.aoYValues));
if not self._fErrorBarY:
oSubPlot.errorbar(oSeries.aoXValues, oSeries.aoYValues, color = sColor);
elif len(oSeries.aoXValues) > self._cMaxErrorBars:
if matplotlib.__version__ < '1.3.0':
oSubPlot.errorbar(oSeries.aoXValues, oSeries.aoYValues, color = sColor);
else:
oSubPlot.errorbar(oSeries.aoXValues, oSeries.aoYValues,
yerr = [oSeries.aoYErrorBarBelow, oSeries.aoYErrorBarAbove],
errorevery = len(oSeries.aoXValues) / self._cMaxErrorBars,
color = sColor );
else:
oSubPlot.errorbar(oSeries.aoXValues, oSeries.aoYValues,
yerr = [oSeries.aoYErrorBarBelow, oSeries.aoYErrorBarAbove],
color = sColor);
cSeriesNames += oSeries.sName is not None;
if cYMin != 0 or cYMax != 0:
oSubPlot.set_ylim(bottom = 0);
if cSeriesNames > 0:
oLegend = oSubPlot.legend([oSeries.sName for oSeries in aoSeries], loc = 'best', fancybox = True);
oLegend.get_frame().set_alpha(0.5);
if self._sTitle is not None:
oSubPlot.set_title(self._sTitle);
if self._cxGraph >= 256:
oSubPlot.minorticks_on();
oSubPlot.grid(True, 'major', axis = 'both');
oSubPlot.grid(True, 'both', axis = 'x');
if True:
# oSubPlot.axis('off');
#oSubPlot.grid(True, 'major', axis = 'none');
#oSubPlot.grid(True, 'both', axis = 'none');
matplotlib.pyplot.setp(oSubPlot, xticks = [], yticks = []);
return self._produceSvg(oFigure);
class WuiHlpLineGraphErrorbarY(WuiHlpLineGraph):
"""
Line graph with an errorbar for the Y axis.
"""
def __init__(self, sId, oData, oDisp = None):
WuiHlpLineGraph.__init__(self, sId, oData, fErrorBarY = True);
class WuiHlpMiniSuccessRateGraph(WuiHlpGraphMatplotlibBase):
"""
Mini rate graph.
"""
def __init__(self, sId, oData, oDisp = None):
"""
oData must be a WuiHlpGraphDataTableEx like object, but only aoSeries,
aoSeries[].aoXValues, and aoSeries[].aoYValues will be used. The
values are expected to be a percentage, i.e. values between 0 and 100.
"""
WuiHlpGraphMatplotlibBase.__init__(self, sId, oData, oDisp);
self.setFontSize(6);
def renderGraph(self): # pylint: disable=R0914
assert len(self._oData.aoSeries) == 1;
oSeries = self._oData.aoSeries[0];
# hacking
#self.setWidth(512);
#self.setHeight(128);
# end
oFigure = self._createFigure();
from mpl_toolkits.axes_grid.axislines import SubplotZero;
oAxis = SubplotZero(oFigure, 111);
oFigure.add_subplot(oAxis);
# Disable all the normal axis.
oAxis.axis['right'].set_visible(False)
oAxis.axis['top'].set_visible(False)
oAxis.axis['bottom'].set_visible(False)
oAxis.axis['left'].set_visible(False)
# Use the zero axis instead.
oAxis.axis['yzero'].set_axisline_style('-|>');
oAxis.axis['yzero'].set_visible(True);
oAxis.axis['xzero'].set_axisline_style('-|>');
oAxis.axis['xzero'].set_visible(True);
if oSeries.aoYValues[-1] == 100:
sColor = 'green';
elif oSeries.aoYValues[-1] > 75:
sColor = 'yellow';
else:
sColor = 'red';
oAxis.plot(oSeries.aoXValues, oSeries.aoYValues, '.-', color = sColor, linewidth = 3);
oAxis.fill_between(oSeries.aoXValues, oSeries.aoYValues, facecolor = sColor, alpha = 0.5)
oAxis.set_xlim(left = -0.01);
oAxis.set_xticklabels([]);
oAxis.set_xmargin(1);
oAxis.set_ylim(bottom = 0, top = 100);
oAxis.set_yticks([0, 50, 100]);
oAxis.set_ylabel('%');
#oAxis.set_yticklabels([]);
oAxis.set_yticklabels(['', '%', '']);
return self._produceSvg(oFigure, False);
| gpl-2.0 | 1,656,052,364,727,503,000 | 37.649068 | 120 | 0.568742 | false | 3.536516 | false | false | false |
tijn/hosts.tmLanguage | support.py | 1 | 1215 | import sublime
import sublime_plugin
class HostsFileViewListener(sublime_plugin.ViewEventListener):
SYNTAX = 'hosts.sublime-syntax'
@classmethod
def is_applicable(cls, settings):
try:
return (settings and
settings.get('syntax', '').lower().endswith(cls.SYNTAX))
except Exception as e:
return False
def on_hover(self, point, hover_zone):
if ((hover_zone != sublime.HOVER_TEXT or not
self.view.match_selector(point, 'meta.hostname meta.punycode'))):
return
expression_region = next(
r for r in self.view.find_by_selector('meta.hostname')
if r.contains(point))
hostname = self.view.substr(expression_region)
try:
hover_text = str.encode(hostname).decode('idna')
except Exception as e:
hover_text = 'Could not parse Punycode expression'
html = '''
<body id="render-punycode">
<div>{}</div>
</body>
'''.format(hover_text)
self.view.show_popup(html, sublime.HIDE_ON_MOUSE_MOVE_AWAY,
location=point, max_width=512, max_height=60)
| mit | -2,744,826,861,513,747,500 | 29.375 | 78 | 0.57284 | false | 3.919355 | false | false | false |
showerst/openstates | openstates/al/bills.py | 1 | 18080 | import datetime
import re
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
import lxml.html
import scrapelib
_action_re = (
('Introduced', 'bill:introduced'),
('(Forwarded|Delivered) to Governor', 'governor:received'),
('Amendment (?:.*)Offered', 'amendment:introduced'),
('Substitute (?:.*)Offered', 'amendment:introduced'),
('Amendment (?:.*)adopted', 'amendment:passed'),
('Amendment lost', 'amendment:failed'),
('Read for the first time and referred to',
['bill:reading:1', 'committee:referred']),
('(r|R)eferred to', 'committee:referred'),
('Read for the second time', 'bill:reading:2'),
('(S|s)ubstitute adopted', 'bill:substituted'),
('(m|M)otion to Adopt (?:.*)adopted', 'amendment:passed'),
('(m|M)otion to (t|T)able (?:.*)adopted', 'amendment:passed'),
('(m|M)otion to Adopt (?:.*)lost', 'amendment:failed'),
('(m|M)otion to Read a Third Time and Pass adopted', 'bill:passed'),
('(m|M)otion to Concur In and Adopt adopted', 'bill:passed'),
('Third Reading Passed', 'bill:passed'),
('Reported from', 'committee:passed'),
('Indefinitely Postponed', 'bill:failed'),
('Passed by House of Origin', 'bill:passed'),
('Passed Second House', 'bill:passed'),
# memorial resolutions can pass w/o debate
('Joint Rule 11', ['bill:introduced', 'bill:passed']),
('Lost in', 'bill:failed'),
('Favorable from', 'committee:passed:favorable'),
)
def _categorize_action(action):
for pattern, types in _action_re:
if re.findall(pattern, action):
return types
return 'other'
class ALBillScraper(BillScraper):
jurisdiction = 'al'
CHAMBERS = {'H': 'lower', 'S': 'upper'}
DATE_FORMAT = '%m/%d/%Y'
# Tweak which responses are acceptible to the scrapelib internals
def accept_response(self, response, **kwargs):
# Errored requests should be retried
if response.status_code >= 400:
return False
# Almost all GET requests should _not_ get redirected
elif (response.status_code == 302 and
response.request.method == 'GET' and
'ALISONLogin.aspx' not in response.request.url):
return False
# Standard GET responses must have an ASP.NET VIEWSTATE
# If they don't, it means the page is a trivial error message
elif (not lxml.html.fromstring(response.text).xpath(
'//input[@id="__VIEWSTATE"]/@value') and
response.request.method == 'GET'):
return False
else:
return True
def _set_session(self, session):
''' Activate an ASP.NET session, and set the legislative session '''
SESSION_SET_URL = ('http://alisondb.legislature.state.al.us/'
'Alison/SelectSession.aspx')
doc = lxml.html.fromstring(self.get(url=SESSION_SET_URL).text)
(viewstate, ) = doc.xpath('//input[@id="__VIEWSTATE"]/@value')
(viewstategenerator, ) = doc.xpath(
'//input[@id="__VIEWSTATEGENERATOR"]/@value')
# Find the link whose text matches the session metadata _scraped_name on the session list page
# The __EVENTARGUMENT form value we need to set the session is the second argument
# to the __doPostBack JS function, which is the href of each that link
(target_session, ) = doc.xpath('//table[@id="ContentPlaceHolder1_gvSessions"]//tr//a/font'
'[text()="{}"]/parent::a/@href'.format(self.session_name))
target_session = target_session.replace("javascript:__doPostBack('ctl00$ContentPlaceHolder1$gvSessions','",'')
target_session = target_session.replace("')",'')
form = {
'__EVENTTARGET': 'ctl00$ContentPlaceHolder1$gvSessions',
'__EVENTARGUMENT': target_session,
'__VIEWSTATE': viewstate,
'__VIEWSTATEGENERATOR': viewstategenerator,
}
self.post(url=SESSION_SET_URL, data=form, allow_redirects=True)
def _get_bill_list(self, url):
'''
For the bill list and resolution list, require that at least
one piece of legislation has been found
'''
for _retry in range(self.retry_attempts):
html = self.get(url=url).text
doc = lxml.html.fromstring(html)
listing = doc.xpath('//table[@id="ContentPlaceHolder1_gvBills"]/tr')[1:]
if listing:
return listing
elif doc.xpath(
'//span[@id="ContentPlaceHolder1_lblCount"]/font/text()'
) == ["0 Instruments", ]:
self.warning("Missing either bills or resolutions")
return []
else:
print "Attempt"
print doc.xpath(
'//span[@id="ContentPlaceHolder1_lblCount"]/text()'
)
continue
else:
raise AssertionError("Bill list not found")
def _get_bill_response(self, url):
''' Ensure that bill pages loaded fully '''
try:
html = self.get(url=url, allow_redirects=False).text
if lxml.html.fromstring(html).xpath(
'//span[@id="ContentPlaceHolder1_lblShotTitle"]'):
return html
# If a bill page doesn't exist yet, ignore redirects and timeouts
except scrapelib.HTTPError:
pass
return None
def scrape(self, session, chambers):
self.validate_session(session)
self.session = session
self.session_name = (self.metadata['session_details']
[self.session]['_scraped_name'])
self.session_id = (self.metadata['session_details']
[self.session]['internal_id'])
self._set_session(session)
# Acquire and process a list of all bills
BILL_TYPE_URL = ('http://alisondb.legislature.state.al.us/Alison/'
'SESSBillsBySelectedStatus.aspx')
BILL_LIST_URL = ('http://alisondb.legislature.state.al.us/Alison/'
'SESSBillsList.aspx?STATUSCODES=Had%20First%20Reading'
'%20House%20of%20Origin&BODY=999999')
doc = lxml.html.fromstring(self.get(url=BILL_TYPE_URL).text)
(viewstate, ) = doc.xpath('//input[@id="__VIEWSTATE"]/@value')
(viewstategenerator, ) = doc.xpath(
'//input[@id="__VIEWSTATEGENERATOR"]/@value')
form = {
'__EVENTTARGET': 'ctl00$ContentPlaceHolder1$gvStatus$ctl02$ctl00',
'__EVENTARGUMENT': 'Select$0',
'__VIEWSTATE': viewstate,
'__VIEWSTATEGENERATOR': viewstategenerator,
'ctl00$ScriptManager1': 'ctl00$UpdatePanel1|ctl00$'
'MainDefaultContent$gvStatus$ctl02$ctl00'
}
self.post(url=BILL_TYPE_URL, data=form, allow_redirects=True)
self.scrape_bill_list(BILL_LIST_URL)
self._set_session(session)
# Acquire and process a list of all resolutions
RESOLUTION_TYPE_URL = (
'http://alisondb.legislature.state.al.us/Alison/'
'SESSResosBySelectedStatus.aspx?BODYID=1755')
RESOLUTION_LIST_URL = (
'http://alisondb.legislature.state.al.us/Alison/'
'SESSResosList.aspx?STATUSCODES=Had%20First%20Reading'
'%20House%20of%20Origin&BODY=999999')
resText = self.get(url=RESOLUTION_TYPE_URL).text
doc = lxml.html.fromstring(resText)
(viewstate, ) = doc.xpath('//input[@id="__VIEWSTATE"]/@value')
(viewstategenerator, ) = doc.xpath(
'//input[@id="__VIEWSTATEGENERATOR"]/@value')
form = {
'__EVENTTARGET': 'ctl00$ContentPlaceHolder1$gvStatus$ctl02$ctl00',
'__EVENTARGUMENT': 'Select$0',
'__VIEWSTATE': viewstate,
'__VIEWSTATEGENERATOR': viewstategenerator,
'ctl00$ScriptManager1': 'tctl00$UpdatePanel1|ctl00$'
'MainDefaultContent$gvStatus$ctl02$ctl00'
}
deb = self.post(url=RESOLUTION_TYPE_URL, data=form, allow_redirects=True)
self.scrape_bill_list(RESOLUTION_LIST_URL)
def scrape_bill_list(self, url):
bill_list = self._get_bill_list(url)
for bill_info in bill_list:
(bill_id, ) = bill_info.xpath('td[1]/font/input/@value')
(sponsor, ) = bill_info.xpath('td[2]/font/input/@value')
(subject, ) = bill_info.xpath('td[3]//text()')
subject = subject.strip()
chamber = self.CHAMBERS[bill_id[0]]
if 'B' in bill_id:
bill_type = 'bill'
elif 'JR' in bill_id:
bill_type = 'joint resolution'
elif 'R' in bill_id:
bill_type = 'resolution'
else:
raise AssertionError(
"Unknown bill type for bill '{}'".format(bill_id))
bill = Bill(
session=self.session,
chamber=chamber,
bill_id=bill_id,
title='',
type=bill_type
)
if subject:
bill['subjects'] = [subject]
if sponsor:
bill.add_sponsor(type='primary', name=sponsor)
bill.add_source(url)
bill_url = ('http://alisondb.legislature.state.al.us/Alison/'
'SESSBillStatusResult.aspx?BILL={}'.format(bill_id))
bill.add_source(bill_url)
bill_html = self._get_bill_response(bill_url)
if bill_html is None:
self.warning("Bill {} has no webpage, and will be skipped".
format(bill_id))
continue
bill_doc = lxml.html.fromstring(bill_html)
if( bill_doc.xpath(
'//span[@id="ContentPlaceHolder1_lblShotTitle"]') ):
title = bill_doc.xpath(
'//span[@id="ContentPlaceHolder1_lblShotTitle"]')[0].text_content().strip()
if not title:
title = "[No title given by state]"
bill['title'] = title
version_url_base = (
'http://alisondb.legislature.state.al.us/ALISON/'
'SearchableInstruments/{0}/PrintFiles/{1}-'.
format(self.session, bill_id))
versions = bill_doc.xpath(
'//table[@class="box_versions"]/tr/td[2]/font/text()')
for version in versions:
name = version
if version == "Introduced":
version_url = version_url_base + 'int.pdf'
elif version == "Engrossed":
version_url = version_url_base + 'eng.pdf'
elif version == "Enrolled":
version_url = version_url_base + 'enr.pdf'
else:
raise NotImplementedError(
"Unknown version type found: '{}'".format(name))
bill.add_version(
name=name,
url=version_url,
mimetype='application/pdf'
)
# Fiscal notes exist, but I can't figure out how to build their URL
fiscal_notes = bill_doc.xpath(
'//table[@class="box_fiscalnote"]')[1:]
for fiscal_note in fiscal_notes:
pass
# Budget Isolation Resolutions are handled as extra actions/votes
birs = bill_doc.xpath(
'//div[@class="box_bir"]//table//table/tr')[1:]
for bir in birs:
bir_action = bir.xpath('td[1]')[0].text_content().strip()
# Sometimes ALISON's database puts another bill's
# actions into the BIR action list; ignore these
if bill_id not in bir_action:
self.warning(
"BIR action found ({}) ".format(bir_action) +
"that doesn't match the bill ID ({})".format(bill_id))
continue
bir_date = datetime.datetime.strptime(
bir.xpath('td[2]/font/text()')[0], self.DATE_FORMAT)
bir_type = bir.xpath('td[1]/font/text()')[0].split(" ")[0]
bir_chamber = self.CHAMBERS[bir_type[0]]
bir_text = "{0}: {1}".format(
bir_type, bir.xpath('td[3]/font/text()')[0].strip())
bill.add_action(
actor=bir_chamber,
action=bir_text,
date=bir_date,
type="other"
)
try:
(bir_vote_id, ) = bir.xpath('td[4]/font/input/@value')
except ValueError:
bir_vote_id = ''
bir_vote_id = bir_vote_id.strip()
if bir_vote_id.startswith("Roll "):
bir_vote_id = bir_vote_id.split(" ")[-1]
self.scrape_vote(
bill=bill,
vote_chamber=bir_type[0],
bill_id="{0}%20for%20{1}".format(bir_type, bill_id),
vote_id=bir_vote_id,
vote_date=bir_date,
action_text=bir_text
)
actions = bill_doc.xpath('//table[@id="ContentPlaceHolder1_gvHistory"]/tr')[1:]
action_date = None
for action in actions:
# If actions occur on the same day, only one date will exist
if (action.xpath('td[1]/font/text()')[0].
encode('ascii', 'ignore').strip()):
action_date = datetime.datetime.strptime(
action.xpath('td[1]/font/text()')[0], self.DATE_FORMAT)
(action_chamber, ) = action.xpath('td[2]/font/text()')
(action_text, ) = action.xpath('td[4]/font/text()')
action_type = _categorize_action(action_text)
# check for occasional extra last row
if not action_chamber.strip():
continue
# The committee cell is just an abbreviation, so get its name
actor = self.CHAMBERS[action_chamber]
try:
action_committee = re.search(
r'.*? referred to the .*? committee on (.*?)$',
action_text).group(1).strip()
except AttributeError:
action_committee = ''
bill.add_action(
actor=actor,
action=action_text,
date=action_date,
type=action_type,
committees=action_committee if action_committee else None
)
try:
vote_button = action.xpath('td[9]//text()')[0].strip()
except:
vote_button = ''
if vote_button.startswith("Roll "):
vote_id = vote_button.split(" ")[-1]
self.scrape_vote(
bill=bill,
vote_chamber=action_chamber,
bill_id=bill_id,
vote_id=vote_id,
vote_date=action_date,
action_text=action_text
)
self.save_bill(bill)
def scrape_vote(self, bill, vote_chamber, bill_id, vote_id, vote_date,
action_text):
url = ('http://alisondb.legislature.state.al.us/Alison/'
'GetRollCallVoteResults.aspx?'
'VOTE={0}&BODY={1}&INST={2}&SESS={3}'.
format(vote_id, vote_chamber, bill_id, self.session_id))
doc = lxml.html.fromstring(self.get(url=url).text)
voters = {'Y': [], 'N': [], 'P': [], 'A': []}
voters_and_votes = doc.xpath('//table/tr/td/font/text()')
capture_vote = False
name = ''
for item in voters_and_votes:
if capture_vote:
capture_vote = False
if name:
voters[item].append(name)
else:
capture_vote = True
name = item
if (name.endswith(", Vacant") or
name.startswith("Total ") or
not name.strip()):
name = ''
# Check name counts against totals listed on the site
total_yea = doc.xpath('//*[starts-with(text(), "Total Yea")]/text()')
if total_yea:
total_yea = int(total_yea[0].split(":")[-1])
assert total_yea == len(voters['Y']), "Yea count incorrect"
else:
total_yea = len(voters['Y'])
total_nay = doc.xpath('//*[starts-with(text(), "Total Nay")]/text()')
if total_nay:
total_nay = int(total_nay[0].split(":")[-1])
assert total_nay == len(voters['N']), "Nay count incorrect"
else:
total_nay = len(voters['N'])
total_absent = doc.xpath(
'//*[starts-with(text(), "Total Absent")]/text()')
if total_absent:
total_absent = int(total_absent[0].split(":")[-1])
assert total_absent == len(voters['A']), "Absent count incorrect"
total_other = len(voters['P']) + len(voters['A'])
vote = Vote(
self.CHAMBERS[vote_chamber[0]], vote_date, action_text,
total_yea > total_nay, total_yea, total_nay, total_other)
vote.add_source(url)
for member in voters['Y']:
vote.yes(member)
for member in voters['N']:
vote.no(member)
for member in (voters['A'] + voters['P']):
vote.other(member)
bill.add_vote(vote)
| gpl-3.0 | 1,621,733,867,048,164,000 | 39.357143 | 118 | 0.516593 | false | 3.869863 | false | false | false |
soccermetrics/soccermetrics-client-py | soccermetrics/rest/resources/match.py | 1 | 7424 | from soccermetrics.rest.resources import Resource
from soccermetrics.rest.resources.events import MatchEvents
from soccermetrics.rest.resources.statistics import MatchStatistics
class MatchResource(Resource):
"""
Represents a Match REST resource (<play>/matches/<resource> endpoints).
The Match Resources are a collection of macro-events, micro-events, and
summary statistics resources in the Soccermetrics Connect API.
Derived from :class:`resources.Resource`.
"""
def __init__(self, play, base_uri, auth):
"""
Constructor of MatchResource class.
:param play: Type of teams playing in matches.
:type play: string
:param base_uri: Base URI of API.
:type base_uri: string
:param auth: Authentication credential.
:type auth: tuple
"""
super(MatchResource, self).__init__(base_uri,auth)
self.base_endpoint = "%s/%s/matches" % (self.endpoint, play)
self.match = None
self.resource = None
def EndpointURI(self):
"""
Construct URI of Match REST resource.
URI is of format ``/matches/<match>/<resource>/``.
:returns: URI of REST resource.
:rtype: string
"""
return '/'.join(str(x) for x in [self.base_endpoint,self.match,self.resource] if x)
def get(self, match=None, uid=None, **kwargs):
"""
Retrieves a representation of Match REST resource.
If the status code is 200 (OK), returns the representation. Otherwise,
returns an error.
:param match: Unique ID associated with football match.
:type match: integer
:param uid: Unique ID of API resource representation.
:type uid: integer
:param kwargs: Collection of query parameters.
:type kwargs: dict
:returns: Resource representation.
:rtype: Return value of :func:`MatchResource.get`.
"""
self.match = match
self.endpoint = self.EndpointURI()
return super(MatchResource, self).get(uid, **kwargs)
def head(self):
"""
Retrieves header data of Match REST resource.
:returns: Header data.
:rtype: Return value of :func:`MatchResource.head`.
"""
self.match = None
self.endpoint = self.EndpointURI()
return super(MatchResource, self).head()
def options(self):
"""
Retrieves documentation of Match REST resource.
If the status code is 200 (OK), returns the documentation. Otherwise,
returns an error.
Link resources are not included in the documentation.
:returns: Resource documentation data.
:rtype: Return value of :func:`MatchResource.options`.
"""
self.match = None
self.endpoint = self.EndpointURI()
return super(MatchResource, self).options()
class MatchInformation(MatchResource):
"""
Access to Match Information resources (/<play>/matches/info resource).
Derived from :class:`MatchResource`.
"""
def __init__(self, play, base_uri, auth):
super(MatchInformation, self).__init__(play, base_uri, auth)
self.resource = "info"
class MatchConditions(MatchResource):
"""
Access to Match Conditions resources (/<play>/matches/conditions resource).
Derived from :class:`MatchResource`.
"""
def __init__(self, play, base_uri, auth):
super(MatchConditions, self).__init__(play, base_uri, auth)
self.resource = "conditions"
class MatchLineups(MatchResource):
"""
Access to Match Lineups resources (/<play>/matches/lineups resource).
Derived from :class:`MatchResource`.
"""
def __init__(self, play, base_uri, auth):
super(MatchLineups, self).__init__(play, base_uri, auth)
self.resource = "lineups"
class MatchGoals(MatchResource):
"""
Access to Match Goals resources (/<play>/matches/goals resource).
Derived from :class:`MatchResource`.
"""
def __init__(self, play, base_uri, auth):
super(MatchGoals, self).__init__(play, base_uri, auth)
self.resource = "goals"
class MatchPenalties(MatchResource):
"""
Access to Match Penalties resources (/<play>/matches/penalties resource).
Derived from :class:`MatchResource`.
"""
def __init__(self, play, base_uri, auth):
super(MatchPenalties, self).__init__(play, base_uri, auth)
self.resource = "penalties"
class MatchOffenses(MatchResource):
"""
Access to Match Offenses resources (/<play>/matches/offenses resource).
Derived from :class:`MatchResource`.
"""
def __init__(self, play, base_uri, auth):
super(MatchOffenses, self).__init__(play, base_uri, auth)
self.resource = "offenses"
class MatchSubstitutions(MatchResource):
"""
Access to Match Substitutions resources (/<play>/matches/substitutions resource).
Derived from :class:`MatchResource`.
"""
def __init__(self, play, base_uri, auth):
super(MatchSubstitutions, self).__init__(play, base_uri, auth)
self.resource = "substitutions"
class MatchShootouts(MatchResource):
"""
Access to Match Shootouts resources (/<play>/matches/shootouts resource).
Derived from :class:`MatchResource`.
"""
def __init__(self, play, base_uri, auth):
super(MatchShootouts, self).__init__(play, base_uri, auth)
self.resource = "shootouts"
class MatchPlay(object):
"""
Access to Match objects for a specific type of match (club,
national team).
+----------------+---------------------------+
| Attribute | Description |
+================+===========================+
| information | Match information |
+----------------+---------------------------+
| lineups | Match lineups |
+----------------+---------------------------+
| conditions | Match conditions |
+----------------+---------------------------+
| goals | Goal events |
+----------------+---------------------------+
| penalties | Penalty kick events |
+----------------+---------------------------+
| offenses | Disciplinary events |
+----------------+---------------------------+
| substitutions | Substitution events |
+----------------+---------------------------+
| shootouts | Penalty shootout events |
+----------------+---------------------------+
| stats | Match statistics |
+----------------+---------------------------+
| events | Match micro-events |
+----------------+---------------------------+
"""
def __init__(self, play, base_uri, auth):
self.information = MatchInformation(play, base_uri, auth)
self.lineups = MatchLineups(play, base_uri, auth)
self.conditions = MatchConditions(play, base_uri, auth)
self.goals = MatchGoals(play, base_uri, auth)
self.penalties = MatchPenalties(play, base_uri, auth)
self.offenses = MatchOffenses(play, base_uri, auth)
self.substitutions = MatchSubstitutions(play, base_uri, auth)
self.shootouts = MatchShootouts(play, base_uri, auth)
self.stats = MatchStatistics(play, base_uri, auth)
self.events = MatchEvents(play, base_uri, auth) | mit | 204,587,171,825,993,250 | 33.21659 | 91 | 0.573006 | false | 4.328863 | false | false | false |
miguelgrinberg/api-pycon2014 | manage.py | 3 | 1068 | #!/usr/bin/env python
from flask import Flask, g, jsonify
from flask.ext.script import Manager
from api.app import create_app
from api.models import db, User
manager = Manager(create_app)
@manager.command
def createdb():
app = create_app()
with app.app_context():
db.drop_all()
db.create_all()
@manager.command
def adduser(username):
"""Register a new user."""
from getpass import getpass
password = getpass()
password2 = getpass(prompt='Confirm: ')
if password != password2:
import sys
sys.exit('Error: passwords do not match.')
db.create_all()
user = User(username=username, password=password)
db.session.add(user)
db.session.commit()
print('User {0} was registered successfully.'.format(username))
@manager.command
def test():
from subprocess import call
call(['nosetests', '-v',
'--with-coverage', '--cover-package=api', '--cover-branches',
'--cover-erase', '--cover-html', '--cover-html-dir=cover'])
if __name__ == '__main__':
manager.run()
| mit | 8,564,597,526,883,689,000 | 23.272727 | 71 | 0.638577 | false | 3.747368 | false | false | false |
evenmarbles/mlpy | mlpy/mdp/stateaction.py | 1 | 42394 | from __future__ import division, print_function, absolute_import
import copy
import math
import numpy as np
from .distrib import ProbabilityDistribution
class Experience(object):
"""Experience base class.
Representation of an experience occurring from acting in the environment.
Parameters
----------
state : State
The representation of the current state.
action : Action
The executed action.
next_state : State
The representation of the state following from acting
with `action` in state `state`.
reward : int or float
The reward awarded by the environment for the state-action
pair.
Attributes
----------
state : State
The experienced state
action : Action
The experienced action.
next_state : State
The experienced next state.
reward : float
The experienced reward.
"""
__slots__ = ('state', 'action', 'next_state', 'reward')
def __init__(self, state, action, next_state, reward=None):
self.state = state
self.action = action
self.next_state = next_state
self.reward = reward
def __str__(self):
s = "state={0} act={1} next_state={2}".format(self.state, self.action, self.next_state) if self.reward else \
"state={0} act={1} reward={2:.2f} next_state={3}".format(
self.state, self.action, self.reward, self.next_state)
return s
class RewardFunction(object):
"""The reward function.
The reward function is responsible for calculating the proper value
of the reward. Callback functions can be specified for custom calculation
of the reward value.
Attributes
----------
cb_get : callable
Callback function to retrieve the reward value.
cb_set : callable
Callback function to set the reward value.
reward : float
The reward value.
bonus
rmax : float
The maximum possible reward.
activate_bonus : bool
Flag activating/deactivating the bonus.
Notes
-----
To ensure that the correct value of the reward is being accessed,
the user should not access the class variables directly but instead
use the methods :meth:`set` and :meth:`get` to set and get the reward
respectively.
Examples
--------
>>> RewardFunction.cb_get = staticmethod(lambda r, s: np.dot(s, RewardFunction.reward))
In this cas the reward function is calculated by taking the dot product
of the stored reward and a passed in value.
>>> RewardFunction.reward = [0.1, 0.9. 1.0, 0.0]
This sets the reward for all instances of the reward function.
>>> reward_func = RewardFunction()
>>> print reward_func.get([0.9, 0.5, 0.0, 1.0])
0.54
This calculates the reward `r` according to previously defined the
callback function.
"""
__slots__ = ('_bonus', 'activate_bonus', 'reward', 'rmax', 'cb_get', 'cb_set',)
cb_get = None
cb_set = None
reward = 0.0
rmax = 0.0
activate_bonus = False
@property
def bonus(self):
"""The bonus added to the reward to encourage exploration.
Returns
-------
float :
The bonus added to the reward.
"""
return self._bonus
@bonus.setter
def bonus(self, value):
self._bonus = value
def __init__(self):
self._bonus = 0.0
""":type: float"""
def __getstate__(self):
return {
'reward': self.reward,
'rmax': self.rmax,
'bonus': self.bonus,
'activate_bonus': self.activate_bonus
}
def __setstate__(self, d):
for name, value in d.iteritems():
if not name == 'bonus':
setattr(type(self), name, value)
else:
setattr(self, name, value)
def set(self, value, *args, **kwargs):
"""Set the reward value.
If :meth:`cb_set` is set, the callback will be called
to set the value.
Parameters
----------
args : tuple
Positional arguments passed to the callback.
kwargs : dict
Non-positional arguments passed to the callback.
"""
if self.cb_set is not None:
type(self).reward = self.cb_set(*args, **kwargs)
return
type(self).reward = value
def get(self, *args, **kwargs):
"""Retrieve the reward value.
If :meth:`cb_get` is set, the callback will be called
to retrieve the value.
Parameters
----------
args : tuple
Positional arguments passed to the callback.
kwargs : dict
Non-positional arguments passed to the callback.
Returns
-------
float :
The (calculated) reward value.
"""
reward = self.reward
if self.cb_get is not None:
reward = self.cb_get(self.reward, *args, **kwargs)
if self.activate_bonus:
reward = max(self.reward + self.bonus, self.rmax)
return reward
class StateActionInfo(object):
"""The models interface.
Contains all relevant information predicted by a model for a
given state-action pair. This includes the (predicted) reward and
transition probabilities to possible next states.
Attributes
----------
transition_proba : ProbabilityDistribution
The transition probability distribution.
reward_func : RewardFunction
The reward function.
visits : int
The number of times the state-action pair has been visited.
known : bool
Flag indicating whether a reward value is known or not.
"""
__slots__ = ('transition_proba', 'reward_func', 'visits', 'known')
def __init__(self):
self.transition_proba = ProbabilityDistribution()
self.reward_func = RewardFunction()
self.visits = 0
self.known = False
def __getstate__(self):
data = {}
for name in self.__slots__:
data[name] = getattr(self, name)
return data
def __setstate__(self, d):
for name, value in d.iteritems():
setattr(self, name, value)
class StateData(object):
"""State information interface.
Information about the state can be accessed here.
Parameters
----------
state_id : int
The unique id of the state
actions : list[Action]
List of actions that can be taken in this state.
Attributes
----------
id : int
The unique id of the state.
models : dict
The reward and transition models for each action.
q : dict
The q-table, containing a q-value for each action.
steps_away : int
The number of steps the state is away from its closest neighbor.
"""
__slots__ = ('id', 'models', 'q', 'steps_away')
def __init__(self, state_id, actions):
self.id = state_id
""":type: int"""
self.models = {a: StateActionInfo() for a in actions}
""":type: dict[Action, StateActionInfo]"""
# Randomizing the initial q-values impedes performance
# self.q = {a: ((0.01 - 0.0) * np.random.random() + 0.0) for a in actions}
self.q = {a: 0.0 for a in actions}
""":type: dict[Action, float]"""
self.steps_away = 100000
""":type: int"""
def __getstate__(self):
data = {}
for name in self.__slots__:
data[name] = getattr(self, name)
return data
def __setstate__(self, d):
for name, value in d.iteritems():
setattr(self, name, value)
class MDPPrimitive(object):
"""A Markov decision process primitive.
The base class for :class:`State` and :class:`Action`. Primitives
are represented by a list of features. They optionally can have a `name`.
Parameters
----------
features : array_like, shape (`nfeatures`,)
List of features, where `nfeatures` is the number of features
identifying the primitive.
name : str, optional
The name of the primitive. Default is "".
Attributes
----------
name
dtype : {DTYPE_FLOAT, DTYPE_INT, DTYPE_OBJECT}
The type of the features.
nfeatures : int
The number of features.
discretized : bool
Flag indicating whether the features are discretized or not.
min_features : list
The minimum value for each feature.
max_features : list
The minimum value for each feature.
states_per_dim : list
The number of states per dimension.
description : dict
A description of the features.
Raises
------
ValueError
If the feature array is not one-dimensional.
Notes
-----
Use the `description` to encode action information. The information
should contain the list of all available feature combinations, the
name of each feature.
:Examples:
A description of an action with three possible discrete actions:
::
{
"out": {"value": [-0.004]},
"in": {"value": [0.004]},
"kick": {"value": [-1.0]}
}
A description of an action with one possible continuous action with
name `move`, a value of `*` allows to find the action for every
feature array. Additional information encodes the feature name together
with its index into the feature array are given for each higher level
element of feature array:
::
{
"move": {
"value": "*",
"descr": {
"LArm": {"dx": 0, "dy": 1, "dz": 2},
"RArm": {"dx": 3, "dy": 4, "dz": 5},
"LLeg": {"dx": 6, "dy": 7, "dz": 8},
"RLeg": {"dx": 9, "dy": 10, "dz": 11},
"Torso": {"dx": 12, "dy": 13, "dz": 14}
}
}
}
Similarly, a continuous state can be encoded as follows, which identifies
the name of each feature together with its index into the feature array:
::
{
"LArm": {"x": 0, "y": 1, "z": 2},
"RArm": {"x": 3, "y": 4, "z": 5},
"LLeg": {"x": 6, "y": 7, "z": 8},
"RLeg": {"x": 9, "y": 10, "z": 11},
"Torso": {"x": 12, "y": 13, "z": 14}
}
A discrete state can be encoded by identifying the position of each feature:
::
{
"image x-position": 0,
"displacement (mm)": 1
}
Alternatively, the feature can be identified by a list of features, giving he
positional description:
::
["image x-position", "displacement (mm)"]
Rather then setting the attributes directly, use the methods :meth:`set_nfeatures`,
:meth:`set_dtype`, :meth:`set_description`, :meth:`set_discretized`, :meth:`set_minmax_features`,
and :meth:`set_states_per_dim` in order to enforce type checking.
"""
__slots__ = ('dtype', 'nfeatures', 'description', 'discretized', 'min_features', 'max_features',
'states_per_dim', '_features', '_name', 'ix')
DTYPE_OBJECT = np.object
DTYPE_FLOAT = np.float64
DTYPE_INT = np.int32
dtype = DTYPE_FLOAT
nfeatures = None
description = None
discretized = False
min_features = None
max_features = None
states_per_dim = None
@property
def name(self):
"""The name of the MDP primitive.
Returns
-------
str :
The name of the primitive.
"""
return self._name
@classmethod
def set_nfeatures(cls, n):
"""Set the number of features.
Parameters
----------
n : int
The number of features.
Raises
------
ValueError
If `n` is not of type integer.
"""
if not isinstance(n, int):
raise ValueError("Attribute 'nfeatures' must be of <type 'int'>, got %s" % str(type(n)))
cls.nfeatures = n
@classmethod
def set_dtype(cls, value=DTYPE_FLOAT):
"""Set the feature's data type.
Parameters
----------
value : {DTYPE_FLOAT, DTYPE_INT, DTYPE_OBJECT}
The data type.
Raises
------
ValueError
If the data type is not one of the allowed types.
"""
if value not in [np.float64, np.int32, np.object]:
raise ValueError("Attribute 'dtype' must be one of the allowed types, got %s" % str(type(value)))
cls.dtype = value
@classmethod
def set_description(cls, descr):
"""Set the feature description.
This extracts the number of features from the description and checks
that it matches with the `nfeatures`. If `nfeatures` is None, `nfeatures`
is set to the extracted value.
Parameters
----------
descr : dict
The feature description.
Raises
------
ValueError
If the number of features extracted from the description does not
match `nfeatures` or if `name` isn't of type string.
Notes
-----
Use the `description` to encode action information. The information
should contain the list of all available feature combinations, the
name of each feature.
Examples
--------
A description of an action with three possible discrete actions:
::
{
"out": {"value": [-0.004]},
"in": {"value": [0.004]},
"kick": {"value": [-1.0]}
}
A description of an action with one possible continuous action with
name `move`, a value of `*` allows to find the action for every
feature array. Additional information encodes the feature name together
with its index into the feature array are given for each higher level
element of feature array:
::
{
"move": {
"value": "*",
"descr": {
"LArm": {"dx": 0, "dy": 1, "dz": 2},
"RArm": {"dx": 3, "dy": 4, "dz": 5},
"LLeg": {"dx": 6, "dy": 7, "dz": 8},
"RLeg": {"dx": 9, "dy": 10, "dz": 11},
"Torso": {"dx": 12, "dy": 13, "dz": 14}
}
}
}
Similarly, a continuous state can be encoded as follows, which identifies
the name of each feature together with its index into the feature array:
::
{
"LArm": {"x": 0, "y": 1, "z": 2},
"RArm": {"x": 3, "y": 4, "z": 5},
"LLeg": {"x": 6, "y": 7, "z": 8},
"RLeg": {"x": 9, "y": 10, "z": 11},
"Torso": {"x": 12, "y": 13, "z": 14}
}
A discrete state can be encoded by identifying the position of each feature:
::
"descr": {
"image x-position": 0,
"displacement (mm)": 1
}
Alternatively, the feature can be identified by a list of features, giving he
positional description:
::
["image x-position", "displacement (mm)"]
"""
nfeatures = None
if isinstance(descr, dict):
config = descr.itervalues().next()
if 'descr' in config:
nfeatures = sum(len(v) for v in config['descr'].itervalues())
if cls.nfeatures is not None and not cls.nfeatures == nfeatures:
raise ValueError("Dimension mismatch: array described by 'descr' is a vector of length %d,"
" but attribute cls.nfeatures = %d" % (nfeatures, cls.nfeatures))
elif 'value' in config and not config['value'] == '*':
nfeatures = len(config['value'])
if cls.nfeatures is not None and not cls.nfeatures == nfeatures:
raise ValueError("Dimension mismatch: array described by 'value' is a vector of length %d,"
" but attribute cls.nfeatures = %d" % (nfeatures, cls.nfeatures))
else:
nfeatures = sum(len(v) for v in descr.itervalues())
if cls.nfeatures is not None and not cls.nfeatures == nfeatures:
raise ValueError("Dimension mismatch: 'descr' is a vector of length %d,"
" but attribute cls.nfeatures = %d" % (nfeatures, cls.nfeatures))
elif isinstance(descr, list):
nfeatures = len(descr)
if cls.nfeatures is not None and not cls.nfeatures == nfeatures:
raise ValueError("Dimension mismatch: 'descr' is a vector of length %d,"
" but attribute cls.nfeatures = %d" % (nfeatures, cls.nfeatures))
if cls.nfeatures is None:
cls.nfeatures = nfeatures
cls.description = descr
@classmethod
def set_discretized(cls, val=False):
"""Sets the `discretized` flag.
Parameters
----------
val : bool
Flag identifying whether the features are discretized or not.
Default is False.
Raises
------
ValueError
If `val` is not boolean type.
"""
if not isinstance(val, bool):
raise ValueError("Attribute 'nfeatures' must be of <type 'bool'>, got %s" % str(type(val)))
cls.discretized = val
@classmethod
def set_minmax_features(cls, _min, _max):
"""Sets the minimum and maximum value for each feature.
This extracts the number of features from the `_min` and `_max`
values and ensures that it matches with `nfeatures`. If `nfeatures`
is None, the `nfeatures` attribute is set to the extracted value.
Parameters
----------
_min : array_like, shape(`nfeatures`,)
The minimum value for each feature
_max : array_like, shape(`nfeatures`,)
The maximum value for each feature
Raises
------
ValueError
If the arrays are not one-dimensional vectors, the shapes of the
arrays don't match, or the number of features does not agree with
the attribute `nfeatures`.
"""
_min = np.asarray(_min, dtype=cls.dtype)
_max = np.asarray(_max, dtype=cls.dtype)
dim = _min.size
if dim == 1:
_min.shape = (1,)
dim = _max.size
if dim == 1:
_max.shape = (1,)
if _min.shape[0] != _max.shape[0]:
raise ValueError("Dimension mismatch: array '_min' is a vector of length %d,"
" but '_max' is of length %d" % (_min.shape[0], _max.shape[0]))
if cls.nfeatures is None:
cls.nfeatures = _min.shape[0]
if _min.shape[0] != cls.nfeatures:
raise ValueError("Arrays '_min' and '_max' must be of length %d." % cls.nfeatures)
cls.min_features = _min
cls.max_features = _max
@classmethod
def set_states_per_dim(cls, nstates):
"""Sets the number of states per feature.
This extracts the number of features from `nstates` and compares
it to the attribute `nfeatures`. If it doesn't match, an exception
is thrown. If the `nfeatures` attribute is None, `nfeatures` is set
to the extracted value.
Parameters
----------
nstates : array_like, shape (`nfeatures`,)
The number of states per features
Raises
------
ValueError
If the array is not a vector of length `nfeatures`.
"""
nstates = np.asarray(nstates, dtype=cls.dtype)
dim = nstates.size
if dim == 1:
nstates.shape = (1,)
if cls.nfeatures is None:
cls.nfeatures = nstates.shape[0]
if nstates.ndim != 1 or nstates.shape[0] != cls.nfeatures:
raise ValueError("Array 'nstates' must be a vector of length %d." % cls.nfeatures)
cls.states_per_dim = nstates
def __init__(self, features, name=None):
if type(self).dtype is None:
type(self).dtype = MDPPrimitive.DTYPE_FLOAT
self._features = np.asarray(features)
if self._features.ndim != 1:
raise ValueError("Array 'features' must be one-dimensional,"
" but features.ndim = %d" % self._features.ndim)
self._name = name if name is not None else ""
if not isinstance(self._name, basestring):
raise ValueError("'name' must be a string, but got %s" % str(type(self._name)))
if type(self).nfeatures is None:
type(self).nfeatures = self._features.shape[0]
elif not self._features.shape[0] == type(self).nfeatures:
raise ValueError("Dimension mismatch: array 'features' is a vector of length %d, but"
" attribute cls.nfeatures = %d" % (self._features.shape[0], type(self).nfeatures))
if type(self).discretized and type(self).states_per_dim:
self.discretize()
# noinspection PyUnusedLocal
def __get__(self, instance, owner):
return self._features
def __getitem__(self, index):
checker = np.vectorize(lambda x: isinstance(x, slice))
if index > len(self) and not np.any(checker(index)):
raise IndexError("Assignment index out of range")
return self._features[index]
def __setitem__(self, index, value):
if index > len(self):
raise IndexError("Assignment index out of range")
self._features[index] = value
def __len__(self):
return len(self._features)
def __contains__(self, item):
return item in self._features
def __hash__(self):
return hash(tuple(self._features)) if self._features is not None else None
def __eq__(self, other):
return np.array_equal(other.get(), self._features)
def __sub__(self, other):
return self._features - other
def __mul__(self, other):
return self._features * other
def __imul__(self, other):
self._features *= other
return self
def __iter__(self):
self.ix = 0
return self
def __str__(self):
features = np.array_str(self.encode())
return "\'" + self._name + "\':\t" + features if self._name else features
def __repr__(self):
features = np.array_str(self.encode())
return "\'" + self._name + "\':\t" + features if self._name else features
def next(self):
if self.ix == len(self):
raise StopIteration
item = self._features[self.ix]
self.ix += 1
return item
def __copy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k in self.__slots__:
try:
setattr(result, k, copy.copy(getattr(self, k)))
except AttributeError:
pass
return result
def __getstate__(self):
data = {}
for name in self.__slots__:
if not name == 'ix':
data[name] = getattr(self, name)
return data
def __setstate__(self, d):
for name, value in d.iteritems():
if name not in ['nfeatures', 'dtype', 'description', 'discretized',
'min_features', 'max_features', 'states_per_dim']:
setattr(self, name, value)
type(self).nfeatures = self._features.shape[0]
def get(self):
"""Return the feature array.
Returns
-------
ndarray :
The feature array.
"""
return self._features
def tolist(self):
"""Returns the feature array as a list.
Returns
-------
list :
The features list.
"""
return self._features.tolist()
def set(self, features):
"""Sets the feature array to the given array.
Parameters
----------
features : array_like, shape (`nfeatures`,)
The new feature values.
"""
features = np.asarray(features, dtype=type(self).dtype)
if features.ndim != 1 or features.shape[0] != type(self).nfeatures:
raise ValueError("Array 'features' must be a vector of length %d." % type(self).nfeatures)
self._features = np.asarray(features)
def discretize(self):
"""Discretizes the state.
Discretize the state using the information from the minimum and
maximum values for each feature and the number of states attributed
to each feature.
"""
if not self.discretized:
return
nfeatures = type(self).nfeatures
min_features = type(self).min_features
max_features = type(self).max_features
states_per_dim = type(self).states_per_dim
if min_features is None or min_features.shape[0] != nfeatures:
raise ValueError("Attribute 'min_features' must be a vectors of length %d." % nfeatures)
if max_features is None or max_features.shape[0] != nfeatures:
raise ValueError("Attribute 'max_features' must be a vectors of length %d." % nfeatures)
if states_per_dim is None or states_per_dim.shape[0] != nfeatures:
raise ValueError("Attribute 'states_per_dim' must be a vectors of length %d." % nfeatures)
ds = []
for i, feat in enumerate(self):
factor = math.ceil(
(max_features[i] - min_features[i]) / states_per_dim[i])
if feat > 0:
bin_num = int((feat + factor / 2) / factor)
else:
bin_num = int((feat - factor / 2) / factor)
ds.append(bin_num * factor)
self._features = np.asarray(ds)
def encode(self):
# noinspection PyUnresolvedReferences,PyUnusedLocal
"""Encodes the state into a human readable representation.
Returns
-------
ndarray :
The encoded state.
Notes
-----
Optionally this method can be overwritten at runtime.
Examples
--------
>>> def my_encode(self)
... pass
...
>>> MDPPrimitive.encode = my_encode
"""
return self._features
@classmethod
def decode(cls, _repr):
# noinspection PyUnresolvedReferences,PyUnusedLocal
"""Decodes the state into its original representation.
Parameters
----------
_repr : tuple
The readable representation of the primitive.
Returns
-------
State :
The decoded state.
Notes
-----
Optionally this method can be overwritten at runtime.
Examples
--------
>>> def my_decode(cls, _repr)
... pass
...
>>> MDPPrimitive.decode = classmethod(my_decode)
"""
return cls(_repr)
@staticmethod
def key_to_index(key):
# noinspection PyUnresolvedReferences,PyUnusedLocal
"""Maps internal name to group index.
Maps the internal name of a feature to the index of the corresponding
feature grouping. For example for a feature vector consisting of the
x-y-z position of the left and the right arm, the features for the left
and the right arm can be extracted separately as a group, effectively
splitting the feature vector into two vectors with x, y, and z at the
positions specified by the the mapping of this function.
Parameters
----------
key : str
The key into the mapping
Returns
-------
int :
The index in the feature array.
Raises
------
NotImplementedError
If the child class does not implement this function.
Notes
-----
Optionally this method can be overwritten at runtime.
Examples
--------
>>> def my_key_to_index(key)
... return {
... "x": 0,
... "y": 1,
... "z": 2
... }[key]
...
>>> State.description = {'LArm': {'x': 0, 'y': 1, 'z': 2}
... 'RArm': {'x': 3, 'y': 4, 'z': 5}}
>>> State.key_to_index = staticmethod(my_key_to_index)
This specifies the mapping in both direction.
>>> state = [0.1, 0.4, 0.3. 4.6. 2.5. 0.9]
>>>
>>> mapping = State.description['LArm']
>>>
>>> larm = np.zeros[len(mapping.keys())]
>>> for key, axis in mapping.iteritems():
... larm[State.key_to_index(key)] = state[axis]
...
>>> print larm
[0.1, 0.4, 0.3]
This extracts the features for the left arm from the `state` vector.
"""
raise NotImplementedError
# noinspection PyAbstractClass,PyUnresolvedReferences
class State(MDPPrimitive):
"""Representation of the state.
States are represented by an array of features.
Parameters
----------
features : array_like, shape (`nfeatures`,)
List of features, where `nfeatures` is the number of features
identifying the primitive.
name : str, optional
The name of the primitive. Default is ''.
Attributes
----------
name
dtype : {DTYPE_FLOAT, DTYPE_INT, DTYPE_OBJECT}
The type of the features.
nfeatures : int
The number of features.
discretized : bool
Flag indicating whether the features are discretized or not.
min_features : list
The minimum value for each feature.
max_features : list
The minimum value for each feature.
states_per_dim : list
The number of states per dimension.
description : dict
A description of the features.
Notes
-----
Use the `description` to encode action information. The information
should contain the list of all available feature combinations, the
name of each feature.
:Examples:
A description of an action with three possible discrete actions:
::
{
"out": {"value": [-0.004]},
"in": {"value": [0.004]},
"kick": {"value": [-1.0]}
}
A description of an action with one possible continuous action with
name `move`, a value of `*` allows to find the action for every
feature array. Additional information encodes the feature name together
with its index into the feature array are given for each higher level
element of feature array:
::
{
"move": {
"value": "*",
"descr": {
"LArm": {"dx": 0, "dy": 1, "dz": 2},
"RArm": {"dx": 3, "dy": 4, "dz": 5},
"LLeg": {"dx": 6, "dy": 7, "dz": 8},
"RLeg": {"dx": 9, "dy": 10, "dz": 11},
"Torso": {"dx": 12, "dy": 13, "dz": 14}
}
}
}
Similarly, a continuous state can be encoded as follows, which identifies
the name of each feature together with its index into the feature array:
::
{
"LArm": {"x": 0, "y": 1, "z": 2},
"RArm": {"x": 3, "y": 4, "z": 5},
"LLeg": {"x": 6, "y": 7, "z": 8},
"RLeg": {"x": 9, "y": 10, "z": 11},
"Torso": {"x": 12, "y": 13, "z": 14}
}
A discrete state can be encoded by identifying the position of each feature:
::
{
"image x-position": 0,
"displacement (mm)": 1
}
Alternatively, the feature can be identified by a list of features, giving he
positional description:
::
["image x-position", "displacement (mm)"]
Rather then setting the attributes directly, use the methods :meth:`set_nfeatures`,
:meth:`set_dtype`, :meth:`set_description`, :meth:`set_discretized`, :meth:`set_minmax_features`,
and :meth:`set_states_per_dim` in order to enforce type checking.
Examples
--------
>>> State.description = {'LArm': {'x': 0, 'y': 1, 'z': 2}
... 'RArm': {'x': 3, 'y': 4, 'z': 5}}
This description identifies the features to be the x-y-z-position of
the left and the right arm. The position into the feature array is given
by the integer numbers.
>>> def my_key_to_index(key)
... return {
... "x": 0,
... "y": 1,
... "z": 2
... }[key]
...
>>> State.key_to_index = staticmethod(my_key_to_index)
This defines a mapping for each key.
>>> state = [0.1, 0.4, 0.3. 4.6. 2.5. 0.9]
>>>
>>> mapping = State.description['LArm']
>>>
>>> larm = np.zeros[len(mapping.keys())]
>>> for key, axis in mapping.iteritems():
... larm[State.key_to_index(key)] = state[axis]
...
>>> print larm
[0.1, 0.4, 0.3]
This extracts the features for the left arm from the `state` vector.
>>> s1 = State([0.1, 0.4, 0.2])
>>> s2 = State([0.5, 0.3, 0.5])
>>> print s1 - s2
[-0.4, 0.1, -0.3]
Subtract states from each other.
>>> print s1 * s2
[0.05, 0.12, 0.1]
Multiplies two states with each other.
>>> s1 *= s2
>>> print s1
[0.05, 0.12, 0.1]
Multiplies two states in place.
"""
initial_states = None
"""List of initial states.
:type: str | list"""
terminal_states = None
"""List of terminal states.
:type: str | list"""
def __init__(self, features, name=None):
super(State, self).__init__(features, name)
def is_initial(self):
"""Checks if the state is an initial state.
Returns
-------
bool :
Whether the state is an initial state or not.
"""
if State.initial_states is None:
return False
if isinstance(State.initial_states, list):
return self.name in State.initial_states
return self.name == self.initial_states
def is_terminal(self):
"""Checks if the state is a terminal state.
Returns
-------
bool :
Whether the state is a terminal state or not.
"""
if State.terminal_states is None:
return False
if isinstance(State.terminal_states, list):
return self.name in State.terminal_states
return self.name == self.terminal_states
# noinspection PyMethodMayBeStatic
def is_valid(self):
# noinspection PyUnresolvedReferences,PyUnusedLocal
"""Check if this state is a valid state.
Returns
-------
bool :
Whether the state is valid or not.
Notes
-----
Optionally this method can be overwritten at runtime.
Examples
--------
>>> def my_is_valid(self)
... pass
...
>>> MDPPrimitive.is_valid = my_is_valid
"""
return True
# noinspection PyAbstractClass,PyUnresolvedReferences
class Action(MDPPrimitive):
"""Representation of an action.
Actions are represented by an array of features.
Parameters
----------
features : array_like, shape (`nfeatures`,)
List of features, where `nfeatures` is the number of features
identifying the primitive.
name : str, optional
The name of the primitive. Default is ''.
Attributes
----------
name
dtype : {DTYPE_FLOAT, DTYPE_INT, DTYPE_OBJECT}
The type of the features.
nfeatures : int
The number of features.
discretized : bool
Flag indicating whether the features are discretized or not.
min_features : list
The minimum value for each feature.
max_features : list
The minimum value for each feature.
states_per_dim : list
The number of states per dimension.
description : dict
A description of the features.
Notes
-----
Use the `description` to encode action information. The information
should contain the list of all available feature combinations, the
name of each feature.
:Examples:
A description of an action with three possible discrete actions:
::
{
"out": {"value": [-0.004]},
"in": {"value": [0.004]},
"kick": {"value": [-1.0]}
}
A description of an action with one possible continuous action with
name `move`, a value of `*` allows to find the action for every
feature array. Additional information encodes the feature name together
with its index into the feature array are given for each higher level
element of feature array:
::
{
"move": {
"value": "*",
"descr": {
"LArm": {"dx": 0, "dy": 1, "dz": 2},
"RArm": {"dx": 3, "dy": 4, "dz": 5},
"LLeg": {"dx": 6, "dy": 7, "dz": 8},
"RLeg": {"dx": 9, "dy": 10, "dz": 11},
"Torso": {"dx": 12, "dy": 13, "dz": 14}
}
}
}
Similarly, a continuous state can be encoded as follows, which identifies
the name of each feature together with its index into the feature array:
::
{
"LArm": {"x": 0, "y": 1, "z": 2},
"RArm": {"x": 3, "y": 4, "z": 5},
"LLeg": {"x": 6, "y": 7, "z": 8},
"RLeg": {"x": 9, "y": 10, "z": 11},
"Torso": {"x": 12, "y": 13, "z": 14}
}
A discrete state can be encoded by identifying the position of each feature:
::
{
"image x-position": 0,
"displacement (mm)": 1
}
Alternatively, the feature can be identified by a list of features, giving he
positional description:
::
["image x-position", "displacement (mm)"]
Rather then setting the attributes directly, use the methods :meth:`set_nfeatures`,
:meth:`set_dtype`, :meth:`set_description`, :meth:`set_discretized`, :meth:`set_minmax_features`,
and :meth:`set_states_per_dim` in order to enforce type checking.
Examples
--------
>>> Action.set_description({'LArm': {'dx': 0, 'dy': 1, 'dz': 2}
... 'RArm': {'dx': 3, 'dy': 4, 'dz': 5}})
This description identifies the features to be the delta x-y-z-position of
the left and the right arm. The position into the feature array is given
by the integer numbers.
>>> def my_key_to_index(key)
... return {
... "dx": 0,
... "dy": 1,
... "dz": 2
... }[key]
...
>>> Action.key_to_index = staticmethod(my_key_to_index)
This defines a mapping for each key.
>>> action = [0.1, 0.4, 0.3. 4.6. 2.5. 0.9]
>>>
>>> mapping = Action.description['LArm']
>>>
>>> larm = np.zeros[len(mapping.keys())]
>>> for key, axis in mapping.iteritems():
... larm[Action.key_to_index(key)] = action[axis]
...
>>> print larm
[0.1, 0.4, 0.3]
This extracts the features for the left arm from the `action` vector.
>>> a1 = Action([0.1, 0.4, 0.2])
>>> a2 = Action([0.5, 0.3, 0.5])
>>> print a1 - a2
[-0.4, 0.1, -0.3]
Subtract actions from each other.
>>> print a1 * a2
[0.05, 0.12, 0.1]
Multiplies two actions with each other.
>>> a1 *= a2
>>> print a1
[0.05, 0.12, 0.1]
Multiplies two actions in place.
"""
def __init__(self, features, name=None):
super(Action, self).__init__(features, name)
self._name = name if name is not None else Action.get_name(self._features)
@classmethod
def get_name(cls, features):
"""Retrieves the name of the action.
Retrieve the name of the action using the action's description. In the case
that all features are zero the action is considered a `no-op` action.
Parameters
----------
features : ndarray
A feature array.
Returns
-------
str :
The name of the action.
"""
features = np.asarray(features, dtype=cls.dtype)
if cls.description is not None:
for e, config in cls.description.iteritems():
if np.asarray(config["value"]).shape != features.shape:
ValueError("Dimension mismatch: array 'config['value']' is vector of length %d,"
" but 'features' is a vector of length %d." % (np.asarray(config["value"]).shape[0],
features.shape[0]))
if config["value"] == features or config["value"] == "*":
return e
if not features.any():
return "no-op"
return ""
@classmethod
def get_noop_action(cls):
"""Creates a `no-op` action.
A `no-op` action does not have any effect.
Returns
-------
Action :
A `no-op` action.
"""
if not isinstance(cls.nfeatures, int):
raise ValueError("Attribute 'nfeatures' must be of <type 'int'>, got %s" % str(type(cls.nfeatures)))
return cls(np.zeros(cls.nfeatures), "no-op")
| mit | 6,059,188,938,365,483,000 | 29.281429 | 117 | 0.533613 | false | 4.236434 | false | false | false |
miku/graphapi | app.py | 1 | 4720 | # coding: utf-8
from flask import Flask
from flask.ext.cors import CORS
import requests
import json
app = Flask(__name__)
app.config['SPARQL_ENDPOINT'] = 'http://localhost:18890/sparql'
cors = CORS(app)
QUERY = """
CONSTRUCT {{
<http://d-nb.info/gnd/{gnd}> <http://xmlns.com/foaf/0.1/depiction> ?c .
<http://d-nb.info/gnd/{gnd}> <http://example.org/kg#bornIn> ?e .
<http://d-nb.info/gnd/{gnd}> <http://example.org/kg#diedIn> ?g .
<http://d-nb.info/gnd/{gnd}> <http://example.org/kg#born> ?h .
<http://d-nb.info/gnd/{gnd}> <http://example.org/kg#died> ?i .
<http://example.org/kg#diedIn> <http://www.w3.org/2000/01/rdf-schema#label> "gestorben in"@de .
<http://example.org/kg#diedIn> <http://www.w3.org/2000/01/rdf-schema#label> "died in"@en .
<http://d-nb.info/gnd/{gnd}> <http://www.w3.org/2000/01/rdf-schema#label> ?j .
<http://d-nb.info/gnd/{gnd}> <http://example.org/kg#cityCluster> ?k .
?k <http://www.w3.org/2000/01/rdf-schema#label> ?klabel .
?k <http://xmlns.com/foaf/0.1/depiction> ?k_dbp .
<http://d-nb.info/gnd/{gnd}> <http://example.org/kg#profession> ?l .
?l <http://www.w3.org/2000/01/rdf-schema#label> ?l_label .
<http://d-nb.info/gnd/{gnd}> <http://www.w3.org/2000/01/rdf-schema#abstract> ?comment .
}}
WHERE {{
GRAPH <http://d-nb.info/gnd/> {{
OPTIONAL {{
<http://d-nb.info/gnd/{gnd}> <http://d-nb.info/standards/elementset/gnd#placeOfBirth> ?d .
?d <http://d-nb.info/standards/elementset/gnd#preferredNameForThePlaceOrGeographicName> ?e .
}}
OPTIONAL {{
<http://d-nb.info/gnd/{gnd}> <http://d-nb.info/standards/elementset/gnd#placeOfDeath> ?f .
?f <http://d-nb.info/standards/elementset/gnd#preferredNameForThePlaceOrGeographicName> ?g .
}}
OPTIONAL {{
<http://d-nb.info/gnd/{gnd}> <http://d-nb.info/standards/elementset/gnd#dateOfBirth> ?h .
}}
OPTIONAL {{
<http://d-nb.info/gnd/{gnd}> <http://d-nb.info/standards/elementset/gnd#dateOfDeath> ?i .
}}
OPTIONAL {{
<http://d-nb.info/gnd/{gnd}> <http://d-nb.info/standards/elementset/gnd#preferredNameForThePerson> ?j .
}}
OPTIONAL {{
<http://d-nb.info/gnd/{gnd}> <http://d-nb.info/standards/elementset/gnd#professionOrOccupation> ?l .
OPTIONAL {{
?l <http://d-nb.info/standards/elementset/gnd#preferredNameForTheSubjectHeading> ?l_label .
}}
}}
{{ SELECT ?k ?klabel ?kpic ?k_dbp WHERE {{
OPTIONAL {{
<http://d-nb.info/gnd/{gnd}> <http://d-nb.info/standards/elementset/gnd#placeOfBirth> ?d .
<http://d-nb.info/gnd/{gnd}> <http://d-nb.info/standards/elementset/gnd#professionOrOccupation> ?l .
?k <http://d-nb.info/standards/elementset/gnd#placeOfBirth> ?d .
?k <http://d-nb.info/standards/elementset/gnd#preferredNameForThePerson> ?klabel .
?k <http://d-nb.info/standards/elementset/gnd#professionOrOccupation> ?l .
?k <http://d-nb.info/standards/elementset/gnd#preferredNameForThePerson> ?klabel .
# Getting the picture
# This will blow up the query too much
# OPTIONAL {{
# GRAPH <http://d-nb.info/gnd/> {{
# ?k <http://www.w3.org/2002/07/owl#sameAs> ?k_dbp .
# FILTER(regex(?k_dbp, 'dbpedia'))
# }}
#
# GRAPH <http://dbpedia.org/resource/> {{
# ?k_dpb <http://xmlns.com/foaf/0.1/depiction> ?kpic .
# }}
# }}
}}
}} LIMIT 6
}}
}}
OPTIONAL {{
GRAPH <http://d-nb.info/gnd/> {{
<http://d-nb.info/gnd/{gnd}> <http://www.w3.org/2002/07/owl#sameAs> ?b .
}}
GRAPH <http://dbpedia.org/resource/> {{
?b <http://xmlns.com/foaf/0.1/depiction> ?c .
OPTIONAL {{
?b_german <http://www.w3.org/2002/07/owl#sameAs> ?b .
?b_german <http://www.w3.org/2000/01/rdf-schema#comment> ?comment .
}}
}}
}}
}}
"""
@app.route("/")
def hello():
return "Hello World!"
@app.route("/gnd/<gnd>")
def q(gnd):
r = requests.get(app.config['SPARQL_ENDPOINT'],
headers={'accept': 'application/json'},
params={'query': QUERY.format(gnd=gnd)})
#j = json.loads(r.text)
#print("%s" % j)
#return "<pre>%s</pre>" % r.text
return r.text
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| mit | -1,085,193,435,221,097,500 | 35.589147 | 116 | 0.540254 | false | 2.807852 | false | false | false |
hansroh/skitai | skitai/corequest/pth/sp_task.py | 1 | 1692 | # Subprocess Task
from . import task
import subprocess
from concurrent.futures import TimeoutError
from ..tasks import Mask
import time
from skitai import was
class Task (task.Task):
def __init__ (self, cmd, timeout):
self._timeout = timeout
self._name = cmd
self._started = time.time ()
self._was = None
self._fulfilled = None
self._mask = None
self.proc = subprocess.Popen (cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell = True)
@property
def lines (self):
for line in iter (self.proc.stdout.readline, b''):
yield line
def _polling (self):
mask = self._create_mask (self._timeout)
self._late_respond (mask)
def then (self, func):
self._fulfilled = func
self._was = self._get_was ()
was.Thread (self._polling)
def kill (self):
self.proc.kill ()
def terminate (self):
self.proc.terminate ()
def _create_mask (self, timeout):
self._timeout = timeout
if self._mask:
return self._mask
data, expt = None, None
try:
data, err = self.proc.communicate (timeout = timeout)
except subprocess.TimeoutExpired:
expt = TimeoutError
self.proc.terminate ()
self.proc.wait ()
else:
if self.proc.returncode:
if isinstance (err, bytes):
err = err.decode ()
expt = SystemError ('code:{} {}'.format (self.proc.returncode, err))
self._mask = Mask (data, expt)
return self._mask
| mit | 9,199,092,943,771,288,000 | 26.2 | 104 | 0.543735 | false | 4.272727 | false | false | false |
Foggalong/scraps | files/other/launcher.py | 1 | 3136 | #!/usr/bin/env python3
# This was created in response to this program by the ModuloProject which
# aims to make a command line app launcher. The method they were using to
# get data from launchers seemed sub-optimal so I reworked it to be a bit
# more efficient. Their original version is found at: http://git.io/vWJcm
from os import listdir, path
from subprocess import call
def clear():
call(["clear"])
home = path.expanduser("~")
locations = [
"/usr/share/applications/",
"/usr/share/applications/kde4/",
"/usr/local/share/applications/",
"/usr/local/share/applications/kde4/",
home + "/.local/share/applications/",
home + "/.local/share/applications/kde4/",
]
name_list = []
exec_list = []
file_list = []
for location in locations:
if path.isdir(location):
for filename in listdir(location):
if ".desktop" in filename:
with open(location + filename, "r") as file:
namecheck, execcheck = False, False
for line in file:
if ("Name=" in line) and (not namecheck):
namecheck = True
name_list.append(line.split("=")[1].strip())
elif ("Exec=" in line) and (not execcheck):
execcheck = True
exec_list.append(line.split("=")[1].strip())
else:
# Not interested in other lines
pass
if namecheck and execcheck:
# Only use complete launchers
file_list.append(location + filename)
else:
# Not interested in non-launchers
pass
else:
# Not all locations exist
pass
# Makes mega-list and then sorts list alphabetically by app name
data = sorted(zip(name_list, exec_list, file_list), key=lambda x: x[0].lower())
clear()
for line in data:
print("Name:", line[0])
print("Exec:", line[1], "\n")
choice, i = input("What do you want to run? ").lower().strip(), 0
if choice not in [item.lower() for item in name_list]:
# Case of zero options
print("'" + choice + "' is not an option!")
exit(0)
options, toprint = [], []
for line in data:
if line[0].lower() == choice:
toprint.append("Code: " + str(i))
toprint.append("Name: " + line[0])
toprint.append("Exec: " + line[1] + "\n")
options.append(line[1])
i += 1
if i != 1:
# Case of multiple options
clear()
for line in toprint:
print(line)
toexec = input("Which code? ")
try:
# Makes sure chosen option exists
if 0 <= int(toexec) <= len(options):
cmd = options[int(toexec)].split(" ")
else:
raise ValueError
except ValueError:
print("'" + toexec + "' is not an option!")
exit(0)
else:
# Case of exactly one option
cmd = options[0].split(" ")
clear()
try:
call(cmd)
except FileNotFoundError:
print("ERROR: command not found!")
exit(1)
| gpl-2.0 | 5,570,224,233,192,500,000 | 29.745098 | 79 | 0.549426 | false | 3.96962 | false | false | false |
cellnopt/cellnopt | test/core/test_base.py | 1 | 1215 | from cno.core.base import CNORBase, CNOBase
from cno import cnodata
from easydev import TempFile
# To test some of the base functions, need to use something else such as cnorbool
def test_cnobase():
c = CNOBase(cnodata('PKN-ToyMMB.sif'), cnodata("MD-ToyMMB.csv"))
c.pknmodel
c.midas
c.data
c.preprocessing()
c.plot_pknmodel()
assert c._reac_cnor2cno(['A+B=C']) == ['A^B=C']
c.plot_midas()
c.plot_midas(xkcd=True)
c.config
fh = TempFile()
c.save_config_file(fh.name)
c = CNOBase(cnodata('PKN-ToyMMB.sif'), cnodata("MD-ToyMMB.csv"), config=fh.name)
try:
c.create_report()
assert False
except:
assert True
try:
c.create_report_images()
assert False
except:
assert True
from cno.boolean.cnorbool import CNORbool
def test_cnobase_with_cnorbool():
c = CNORbool(cnodata("PKN-ToyMMB.sif"), cnodata("MD-ToyMMB.csv"), verbose=True)
c.verboseR = True
c.verboseR = False
c.verbose = False
c.optimise(maxgens=5, popsize=10)
c.plot_fitness()
c.plot_model()
c.plot_optimised_model()
c.plot_mapback_model()
c._create_report_header()
c.onweb()
| bsd-2-clause | 3,892,661,275,343,669,000 | 17.984375 | 84 | 0.624691 | false | 2.8125 | false | false | false |
vasilenkomike/xen-api | ocaml/idl/binding_sanity_checks/create_vm.py | 34 | 2997 | #!/usr/bin/env python
import XenAPI
import provision
import sanitychecklib
#log in
session=sanitychecklib.getsession()
sx=session.xenapi
#find the template for Debian Etch
vms = sx.VM.get_all()
print "Server", sanitychecklib.server, "has ", len(vms), "VMs",
etch_template_list = [x for x in vms if (('Etch' in sx.VM.get_name_label(x)) and (sx.VM.get_is_a_template(x)))]
print "including", len(etch_template_list), "template for 'Etch'"
etch_template=etch_template_list[0]
print "We pick the first template: "
print "name: ", sx.VM.get_name_label(etch_template)
print "description:", sx.VM.get_name_description(etch_template)
#Make a copy of the template
print "Cloning..."
clone=sx.VM.clone(etch_template, sanitychecklib.test_vm_name)
#find out where to put the new machine's disks by getting the first pool (I don't think there can be more than one)
#and using its default storage repository
pool_list=sx.pool.get_all()
if len(pool_list)==1:
print "There's only one pool"
else:
print "There are", len(pool_list), "pools"
print "We pick the first one:"
first_pool=pool_list[0]
print "name:", sx.pool.get_name_label(first_pool)
print "description: ", sx.pool.get_name_description(first_pool)
default_SR=sx.pool.get_default_SR(first_pool)
print "The default SR is: "
print "Name:", sx.SR.get_name_label(default_SR)
print "Description:", sx.SR.get_name_description(default_SR)
#set the new copy to have its disks in the default SR
#this is a debian template specific hack which allows us to create Debian VMs easily
spec=provision.getProvisionSpec(session, clone)
spec.setSR(sx.SR.get_uuid(default_SR))
provision.setProvisionSpec(session, clone, spec)
#now 'provision' it, which causes the disks to actually be created.
print "provisioning...."
sx.VM.provision(clone)
print "provisioned"
#now find out which network to attach the new machine to
#by finding out what the pool master host is connected to.
pool_master=sx.pool.get_master(first_pool)
master_PIFs=sx.host.get_PIFs(pool_master)
primary_PIF=master_PIFs[0]
master_network=sx.PIF.get_network(primary_PIF)
#attach new VM to default SR and master network
print "Creating VIF..."
new_vif = { 'device': '0',
'network': master_network,
'VM': clone,
'MAC': "",
'MTU': "1500",
"qos_algorithm_type": "",
"qos_algorithm_params": {},
"other_config": {} }
sx.VIF.create(new_vif)
#Another Debian template specific hack. If 'noninteractive' is passed on the kernel command line,
#the new machine will boot without asking for its root and VNC passwords to be set, and just use 'xensource'.
print "Adding noninteractive to the kernel commandline"
print "This is a hack in the template to enable the root account to be created with password 'xensource'"
sx.VM.set_PV_args(clone, "noninteractive")
#Should be all set now. Fire up the machine.
print "booting..."
sx.VM.start(clone, False, True)
#log out
print "logging out"
session.logout()
| lgpl-2.1 | -2,022,794,108,201,293,800 | 28.97 | 115 | 0.718385 | false | 3.201923 | false | false | false |
pakodekker/oceansar | oceansar/nrcs/kodis.py | 1 | 3385 | import numpy as np
from oceansar import constants as const
class RCSKodis():
""" Specular model (R.D. Kodis '66)
Physical optics model as described in R.D. Kodis (1966) paper
'A Note on the Theory of Scattering from an Irregular Surface'.
E.M. solved using Stationary Phase Method.
.. note::
G. Valenzuela suggested that reflection coefficient (R)
may be replaced by effective refl. coef.!
.. note::
OASIS uses only range dependent incidence angle, so
it is given on class init.
:param inc: Incidence angle matrix
:param k0: Radar wave number
:param dx: Range resolution
:param dy: Azimuth resolution
"""
def __init__(self, inc, k0, dx, dy):
self.dx = dx
self.dy = dy
self.k0 = k0
self.sin_inc = np.sin(inc)
self.cos_inc = np.cos(inc)
self.tan_inc = np.tan(inc)
self.R = (const.epsilon_sw - np.sqrt(const.epsilon_sw))/(const.epsilon_sw + np.sqrt(const.epsilon_sw))
def field(self, az_angle, sr, diffx, diffy, diffxx, diffyy, diffxy):
# Avoid repeating calculations
cos_az = np.cos(az_angle)
sin_az = np.sin(az_angle)
J = diffxx*diffyy - diffxy**2
J = np.where(J == 0., np.nan, J)
J_abs = np.abs(J)
delta_x = (1./J_abs)*(diffxy*(diffy - self.tan_inc*sin_az) - diffyy*(diffx - self.tan_inc*cos_az))
delta_y = (1./J_abs)*(diffxy*(diffx - self.tan_inc*cos_az) - diffxx*(diffy - self.tan_inc*sin_az))
epsilon = np.where(J > 0., np.sign(diffxx), 1j)
# New slant range due to deltas
hdx = self.dx/2
hdy = self.dy/2
E = np.zeros(delta_x.shape, dtype=np.complex)
sps = np.where(((-hdx < delta_x) & (delta_x < hdx)) &
((-hdy < delta_y) & (delta_y < hdy)))
if sps[0].size > 0:
delta_z = delta_x[sps] * diffx[sps] + delta_y[sps] * diffy[sps]
sr_p2 = (sr[sps] +
(self.sin_inc[0,sps[1]] * cos_az[sps] * delta_x[sps] +
self.sin_inc[0,sps[1]] * sin_az[sps] * delta_y[sps] -
self.cos_inc[0,sps[1]] * delta_z))
E[sps] = ((0.5*self.R*epsilon[sps]) *
((diffx[sps]**2. + diffy[sps]**2. + 1.)) *
np.exp(-1j*2.*self.k0*sr_p2) /
np.sqrt(J_abs[sps]))
# field = np.where(((-hdx < delta_x) & (delta_x < hdx)) & ((-hdy < delta_y) & (delta_y < hdy)),
# (0.5*self.R*epsilon)*((diffx**2. + diffy**2. + 1.)) * np.exp(-1j*2.*self.k0*np.sqrt(sr_p2)) / np.sqrt(J_abs),
# 0.)
return E
def candidates(self, az_angle, diffx, diffy, diffxx, diffyy, diffxy):
# Avoid repeating calculations
cos_az = np.cos(az_angle)
sin_az = np.sin(az_angle)
J = diffxx*diffyy - diffxy**2.
J = np.where(J == 0., np.nan, np.abs(J))
delta_x = (1./J)*(diffxy*(diffy - self.sin_inc*sin_az) - diffyy*(diffx - self.sin_inc*cos_az))
delta_y = (1./J)*(diffxy*(diffx - self.sin_inc*cos_az) - diffxx*(diffy - self.sin_inc*sin_az))
candidates = np.where(((0. < delta_x) & (delta_x < self.dx)) & ((0. < delta_y) & (delta_y < self.dy)),
1., 0.)
return candidates | gpl-3.0 | 7,093,026,212,739,581,000 | 37.044944 | 135 | 0.51551 | false | 2.898116 | false | false | false |
mnubo/kubernetes-py | kubernetes_py/models/v1alpha1/PetSetStatus.py | 3 | 1970 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
class PetSetStatus(object):
"""
http://kubernetes.io/docs/api-reference/apps/v1alpha1/definitions/#_v1alpha1_petsetstatus
"""
def __init__(self, model=None):
super(PetSetStatus, self).__init__()
self._observed_generation = None
self._replicas = None
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
if "observedGeneration" in model:
self.observed_generation = model["observedGeneration"]
if "replicas" in model:
self.replicas = model["replicas"]
# ------------------------------------------------------------------------------------- observedGeneration
@property
def observed_generation(self):
return self._observed_generation
@observed_generation.setter
def observed_generation(self, og=None):
if not isinstance(og, int):
raise SyntaxError("PetSetStatus: observed_generation: [ {} ] is not None.".format(og))
self._observed_generation = og
# ------------------------------------------------------------------------------------- replicas
@property
def replicas(self):
return self._replicas
@replicas.setter
def replicas(self, r=None):
if not isinstance(r, int):
raise SyntaxError("PetSetStatus: replicas: [ {} ] is not None.".format(r))
self._replicas = r
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.observed_generation is not None:
data["observedGeneration"] = self.observed_generation
if self.replicas is not None:
data["replicas"] = self.replicas
return data
| apache-2.0 | -5,121,746,485,660,542,000 | 30.774194 | 110 | 0.538071 | false | 4.65721 | false | false | false |
platinummonkey/trolliusgremlin | tests/tests.py | 1 | 7626 | """
"""
import asyncio
import unittest
import uuid
from aiogremlin import (submit, GremlinConnector, GremlinClient,
GremlinClientSession)
class SubmitTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_submit(self):
@asyncio.coroutine
def go():
resp = yield from submit("4 + 4", bindings={"x": 4},
loop=self.loop)
results = yield from resp.get()
return results
results = self.loop.run_until_complete(go())
self.assertEqual(results[0].data[0], 8)
def test_rebinding(self):
execute = submit("graph2.addVertex()", loop=self.loop)
try:
self.loop.run_until_complete(execute.get())
error = False
except:
error = True
self.assertTrue(error)
@asyncio.coroutine
def go():
result = yield from submit(
"graph2.addVertex()", rebindings={"graph2": "graph"},
loop=self.loop)
resp = yield from result.get()
self.assertEqual(len(resp), 1)
self.loop.run_until_complete(go())
class GremlinClientTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.gc = GremlinClient(url="ws://localhost:8182/", loop=self.loop)
def tearDown(self):
self.loop.run_until_complete(self.gc.close())
self.loop.close()
def test_connection(self):
@asyncio.coroutine
def go():
ws = yield from self.gc._connector.ws_connect(self.gc.url)
self.assertFalse(ws.closed)
yield from ws.close()
self.loop.run_until_complete(go())
def test_execute(self):
@asyncio.coroutine
def go():
resp = yield from self.gc.execute("x + x", bindings={"x": 4})
return resp
results = self.loop.run_until_complete(go())
self.assertEqual(results[0].data[0], 8)
def test_sub_waitfor(self):
sub1 = self.gc.execute("x + x", bindings={"x": 1})
sub2 = self.gc.execute("x + x", bindings={"x": 2})
sub3 = self.gc.execute("x + x", bindings={"x": 4})
coro = asyncio.gather(*[asyncio.async(sub1, loop=self.loop),
asyncio.async(sub2, loop=self.loop),
asyncio.async(sub3, loop=self.loop)],
loop=self.loop)
# Here I am looking for resource warnings.
results = self.loop.run_until_complete(coro)
self.assertIsNotNone(results)
def test_resp_stream(self):
@asyncio.coroutine
def stream_coro():
results = []
resp = yield from self.gc.submit("x + x", bindings={"x": 4})
while True:
f = yield from resp.stream.read()
if f is None:
break
results.append(f)
self.assertEqual(results[0].data[0], 8)
self.loop.run_until_complete(stream_coro())
def test_execute_error(self):
execute = self.gc.execute("x + x g.asdfas", bindings={"x": 4})
try:
self.loop.run_until_complete(execute)
error = False
except:
error = True
self.assertTrue(error)
def test_rebinding(self):
execute = self.gc.execute("graph2.addVertex()")
try:
self.loop.run_until_complete(execute)
error = False
except:
error = True
self.assertTrue(error)
@asyncio.coroutine
def go():
result = yield from self.gc.execute(
"graph2.addVertex()", rebindings={"graph2": "graph"})
self.assertEqual(len(result), 1)
self.loop.run_until_complete(go())
class GremlinClientSessionTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.gc = GremlinClientSession(url="ws://localhost:8182/",
loop=self.loop)
self.script1 = """graph = TinkerFactory.createModern()
g = graph.traversal(standard())"""
self.script2 = "g.V().has('name','marko').out('knows').values('name')"
def tearDown(self):
self.loop.run_until_complete(self.gc.close())
self.loop.close()
def test_session(self):
@asyncio.coroutine
def go():
yield from self.gc.execute(self.script1)
results = yield from self.gc.execute(self.script2)
return results
results = self.loop.run_until_complete(go())
self.assertTrue(len(results[0].data), 2)
def test_session_reset(self):
@asyncio.coroutine
def go():
yield from self.gc.execute(self.script1)
self.gc.reset_session()
results = yield from self.gc.execute(self.script2)
return results
results = self.loop.run_until_complete(go())
self.assertIsNone(results[0].data)
def test_session_manual_reset(self):
@asyncio.coroutine
def go():
yield from self.gc.execute(self.script1)
new_sess = str(uuid.uuid4())
sess = self.gc.reset_session(session=new_sess)
self.assertEqual(sess, new_sess)
self.assertEqual(self.gc.session, new_sess)
results = yield from self.gc.execute(self.script2)
return results
results = self.loop.run_until_complete(go())
self.assertIsNone(results[0].data)
def test_session_set(self):
@asyncio.coroutine
def go():
yield from self.gc.execute(self.script1)
new_sess = str(uuid.uuid4())
self.gc.session = new_sess
self.assertEqual(self.gc.session, new_sess)
results = yield from self.gc.execute(self.script2)
return results
results = self.loop.run_until_complete(go())
self.assertIsNone(results[0].data)
def test_resp_session(self):
@asyncio.coroutine
def go():
session = str(uuid.uuid4())
self.gc.session = session
resp = yield from self.gc.submit("x + x", bindings={"x": 4})
while True:
f = yield from resp.stream.read()
if f is None:
break
self.assertEqual(resp.session, session)
self.loop.run_until_complete(go())
class ContextMngrTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.connector = GremlinConnector(loop=self.loop)
def tearDown(self):
self.loop.run_until_complete(self.connector.close())
self.loop.close()
# def test_connection_manager(self):
# results = []
#
# @asyncio.coroutine
# def go():
# with (yield from self.connector) as conn:
# client = SimpleGremlinClient(conn, loop=self.loop)
# resp = yield from client.submit("1 + 1")
# while True:
# mssg = yield from resp.stream.read()
# if mssg is None:
# break
# results.append(mssg)
# self.loop.run_until_complete(go())
if __name__ == "__main__":
unittest.main()
| mit | 3,746,226,251,642,800,600 | 29.626506 | 78 | 0.548387 | false | 3.963617 | true | false | false |
realm01/gnome-quota-indicator | lib/mvc/quota_window/view.py | 1 | 3093 | """View of QuotaWindowView."""
from gi.repository import Gtk
from lib.mvc.bases import WindowViewBase
from lib.exception_feedback import add_default_exception_handling
class QuotaWindowView(Gtk.Window, WindowViewBase):
"""View of QuotaWindowView."""
def __init__(self, app, model):
"""Ctor of QuotaWindowView."""
Gtk.Window.__init__(self)
WindowViewBase.__init__(self, app, model)
self.on_open = None
self.on_close = None
@add_default_exception_handling('Failed to initialize Quota Window')
def initialize(self):
"""Create the actual view with all widgets."""
self.connect("delete-event", self.cb_close)
# create tree view
sorted_model = Gtk.TreeModelSort(model=self.model.create_model())
sorted_model.set_sort_column_id(1, Gtk.SortType.ASCENDING)
self.tree_view = Gtk.TreeView(model=sorted_model)
self.create_columns(self.tree_view)
# create a grid and attach the treeview to it
self.grid = Gtk.Grid()
self.grid.attach(self.tree_view, 0, 0, 1, 1)
# attach grid to window
self.add(self.grid)
@add_default_exception_handling('Failed to open Quota Window')
def cb_show(self, w, data):
"""On show."""
self.set_icon_from_file(self.getIcon())
if self.on_open is not None:
self.on_open()
sorted_model = Gtk.TreeModelSort(model=self.model.create_model())
sorted_model.set_sort_column_id(1, Gtk.SortType.ASCENDING)
self.tree_view.set_model(model=sorted_model)
self.show_all()
return True
@add_default_exception_handling('Failed to close Quota Window')
def cb_close(self, w, data):
""""On window close."""
if self.on_close is not None:
self.on_close()
self.hide()
return True
@add_default_exception_handling('Failed to update Quota Window')
def on_update(self):
"""On update."""
self.tree_view.set_model(self.model.create_model())
@add_default_exception_handling()
def register_on_open(self, func):
"""Register on open event."""
self.on_open = func
@add_default_exception_handling()
def register_on_close(self, func):
"""Register on close event."""
self.on_close = func
@add_default_exception_handling('Failed to display storage information')
def create_columns(self, tree_view):
"""Create the columns of the TreeView."""
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("File", rendererText, text=0)
column.set_sort_column_id(0)
tree_view.append_column(column)
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Size [MB]", rendererText, text=1)
column.set_sort_column_id(1)
column.set_cell_data_func(
rendererText, lambda col, cell, model, iter,
unused: cell.set_property(
"text", '{0:.2f}'.format(model.get(iter, 1)[0])))
tree_view.append_column(column)
| mit | 1,402,789,660,148,587,000 | 31.557895 | 76 | 0.624636 | false | 3.713085 | false | false | false |
mtasende/Machine-Learning-Nanodegree-Capstone | predictor/evaluation.py | 1 | 10656 | """ This module contains some functions to evaluate the stock prices predictor. """
import sys
import numpy as np
from pandas.errors import UnsortedIndexError
from sklearn.metrics import r2_score
import pandas as pd
import predictor.feature_extraction as fe
import datetime as dt
def mre(y_true, y_pred):
"""
MRE metrics function. The values of assets should never be zero so, as zero labels cause problems,
they are not considered.
"""
y_true_filtered = y_true[y_true != 0]
y_pred_filtered = y_pred[y_true != 0]
return np.mean(np.abs((y_pred_filtered - y_true_filtered) / y_true_filtered))
def get_metrics(y_true_df, y_pred_df):
"""
Calculates the MRE and R^2 score, on a per-symbol basis. It receives matrices of results,
in which the rows represent time and the columns represent symbols.
:param y_true_df: The labels for each symbol at each moment in time.
:param y_pred_df: The predicted labels for each symbol at each moment in time.
:returns r2_scores: Numpy array with the R^2 score for each symbol
:returns mre_scores: Numpy array with the MRE score for each symbol
:returns tickers: Array that contains the ticker symbols.
"""
tickers = y_true_df.index.levels[1]
r2_scores = []
mre_scores = []
for ticker in tickers:
try:
y_true = y_true_df.loc[(slice(None), ticker), :]
y_pred = y_pred_df.loc[(slice(None), ticker), :]
except UnsortedIndexError:
y_true = y_true_df.sort_index().loc[(slice(None), ticker), :]
y_pred = y_pred_df.sort_index().loc[(slice(None), ticker), :]
r2_scores.append(r2_score(y_true, y_pred))
mre_scores.append(mre(y_true.values, y_pred.values))
return np.array(r2_scores), np.array(mre_scores), tickers
def get_metrics_df(y_true_df, y_pred_df):
"""
Wrapper around get_metrics that returns dataframes instead of Numpy arrays.
"""
r2_scores, mre_scores, tickers = get_metrics(y_true_df, y_pred_df)
return pd.DataFrame(np.array([r2_scores, mre_scores]).T, index=tickers, columns=['r2', 'mre'])
def get_metrics_in_time(y_true_df, y_pred_df, shift):
"""
Calculates the MRE and R^2 score, on a per-time basis. It receives matrices of results,
in which the rows represent time and the columns represent symbols.
:param y_true_df: The labels for each symbol at each moment in time.
:param y_pred_df: The predicted labels for each symbol at each moment in time.
:return: The mean MRE and R^2 score for each time point, and an array of the corresponding dates.
"""
dates = y_true_df.index.get_level_values(0).unique()
r2_scores = []
mre_scores = []
for date in dates:
try:
y_true = y_true_df.loc[(date, slice(None)), :]
y_pred = y_pred_df.loc[(date, slice(None)), :]
except UnsortedIndexError:
y_true = y_true_df.sort_index().loc[(date, slice(None)), :]
y_pred = y_pred_df.sort_index().loc[(date, slice(None)), :]
r2_scores.append(r2_score(y_true, y_pred))
mre_scores.append(mre(y_true.values, y_pred.values))
return np.array(r2_scores), np.array(mre_scores), dates + dt.timedelta(shift)
def reshape_by_symbol(y):
""" Deprecated helper function. Was not used in the final implementation."""
grouped_df = y.reset_index() \
.groupby('level_0') \
.apply(lambda x: x.reset_index(drop=True)) \
.drop('level_0', axis=1)
grouped_df.index = grouped_df.index.droplevel(level=1)
grouped_df.rename(columns={'level_1': 'ticker'}, inplace=True)
reshaped_df = grouped_df.set_index('ticker', append=True).unstack()
reshaped_df.columns = reshaped_df.columns.droplevel(level=0)
reshaped_df.index.name = 'date'
return reshaped_df
def run_single_val(x, y, ahead_days, estimator):
"""
Runs a single training and validation.
:param x: A dataframe of samples. The columns represent the base days.
The rows always contain the dates of the initial day in the base period. Additionally,
the dataframe may be multiindexed with information about from which symbol each sample comes from.
The symbol information is not used for the training, but may be useful to get some insigths in
the validation process.
:param y: The labels of each sample. It corresponds to the (standarized) value of a ticker, some days ahead.
:param ahead_days: Number of days ahead that the labels are from the last base day.
:param estimator: A predictor object for the labels. It follows the scikit-learn interface, but keeps the dataframe
information.
:returns y_train_true_df: Labels for the training set. Rows contain dates, columns contain symbols.
:returns y_train_pred_df: Predictions for the training set. Rows contain dates, columns contain symbols.
:returns y_val_true_df: Labels for the validation set. Rows contain dates, columns contain symbols.
:returns y_val_pred_df: Predictions for the validation set. Rows contain dates, columns contain symbols.
"""
multiindex = x.index.nlevels > 1
x_y = pd.concat([x, y], axis=1)
x_y_sorted = x_y.sort_index()
if multiindex:
x_y_train = x_y_sorted.loc[:fe.add_market_days(x_y_sorted.index.levels[0][-1], -ahead_days)]
x_y_val = x_y_sorted.loc[x_y_sorted.index.levels[0][-1]:]
else:
x_y_train = x_y_sorted.loc[:fe.add_market_days(x_y_sorted.index[-1], -ahead_days)]
x_y_val = x_y_sorted.loc[x_y_sorted.index[-1]:]
x_train = x_y_train.iloc[:, :-1]
x_val = x_y_val.iloc[:, :-1]
y_train_true = x_y_train.iloc[:, -1]
y_val_true = x_y_val.iloc[:, -1]
estimator.fit(x_train, y_train_true)
y_train_pred = estimator.predict(x_train)
y_val_pred = estimator.predict(x_val)
y_train_true_df = pd.DataFrame(y_train_true)
y_train_pred_df = pd.DataFrame(y_train_pred)
y_val_true_df = pd.DataFrame(y_val_true)
y_val_pred_df = pd.DataFrame(y_val_pred)
# Just to make it look prettier
y_train_pred_df.columns = y_train_true_df.columns
y_val_pred_df.columns = y_val_true_df.columns
return y_train_true_df, \
y_train_pred_df, \
y_val_true_df, \
y_val_pred_df
def roll_evaluate(x, y, train_days, step_eval_days, ahead_days, predictor, verbose=False):
"""
Warning: The final date of the period should be no larger than the final date of the SPY_DF
This function applies run_single_val many times, in a rolling evaluation fashion.
:param x: A dataframe of samples. Normally it will span for a period larger than the training period.
The columns represent the base days.
The rows always contain the dates of the initial day in the base period. Additionally,
the dataframe may be multiindexed with information about from which symbol each sample comes from.
The symbol information is not used for the training, but may be useful to get some insigths in
the validation process.
:param y: The labels of each sample. It corresponds to the (standarized) value of a ticker, some days ahead.
:param train_days: The amount of training days for each train-validation run.
:param step_eval_days: The amount of days to move the training and validation sets on each cycle.
:param ahead_days: Number of days ahead that the labels are from the last base day.
:param predictor: A predictor object for the labels. It follows the scikit-learn interface, but keeps the dataframe
information.
:param verbose: If true it shows some messages and progress reports.
:returns r2_train_metrics: A numpy array with the mean and standard deviation of the R^2 metrics for each date of
evaluation. The mean and std are taken on the symbols dimension.
:returns mre_train_metrics: A numpy array with the mean and standard deviation of the MRE metrics for each date of
evaluation. The mean and std are taken on the symbols dimension.
:returns y_val_true_df: Labels for the validation set. Rows contain dates, columns contain symbols.
:returns y_val_pred_df: Predictions for the validation set. Rows contain dates, columns contain symbols.
:returns mean_dates: The mean date of the training period. It is useful to plot the training metrics in time.
"""
# calculate start and end date
# sort by date
x_y_sorted = pd.concat([x, y], axis=1).sort_index()
start_date = x_y_sorted.index.levels[0][0]
end_date = fe.add_market_days(start_date, train_days)
final_date = x_y_sorted.index.levels[0][-1]
# loop: run_single_val(x,y, ahead_days, estimator)
mean_dates = []
r2_train_means = []
r2_train_stds = []
mre_train_means = []
mre_train_stds = []
y_val_true_df = pd.DataFrame()
y_val_pred_df = pd.DataFrame()
num_training_sets = (252 / 365) * (
x.index.levels[0].max() - fe.add_market_days(x.index.levels[0].min(), train_days)).days // step_eval_days
set_index = 0
if verbose:
print('Evaluating approximately %i training/evaluation pairs' % num_training_sets)
while end_date < final_date:
x_temp = x_y_sorted.loc[start_date:end_date].iloc[:, :-1]
y_temp = x_y_sorted.loc[start_date:end_date].iloc[:, -1]
x_temp.index = x_temp.index.remove_unused_levels()
y_temp.index = y_temp.index.remove_unused_levels()
y_train_true, y_train_pred, y_val_true, y_val_pred = run_single_val(x_temp, y_temp, ahead_days, predictor)
# Register the mean date of the period for later use
mean_dates.append(start_date + ((end_date - start_date) / 2))
# Calculate R^2 and MRE for training and append
r2_scores, mre_scores, tickers = get_metrics(y_train_true, y_train_pred)
r2_train_means.append(np.mean(r2_scores))
r2_train_stds.append(np.std(r2_scores))
mre_train_means.append(np.mean(mre_scores))
mre_train_stds.append(np.std(mre_scores))
# Append validation results
y_val_true_df = y_val_true_df.append(y_val_true)
y_val_pred_df = y_val_pred_df.append(y_val_pred)
# Update the dates
start_date = fe.add_market_days(start_date, step_eval_days)
end_date = fe.add_market_days(end_date, step_eval_days)
set_index += 1
if verbose:
sys.stdout.write('\rApproximately %2.1f percent complete. ' % (100.0 * set_index / num_training_sets))
sys.stdout.flush()
return np.array([r2_train_means, r2_train_stds]).T, \
np.array([mre_train_means, mre_train_stds]).T, \
y_val_true_df, \
y_val_pred_df, \
np.array(mean_dates)
| mit | -719,197,982,344,996,700 | 45.532751 | 119 | 0.668262 | false | 3.321696 | false | false | false |
Freeseer/freeseer | src/freeseer/tests/framework/config/persist/test_jsonstorage.py | 1 | 1605 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2011, 2013 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
import unittest
from freeseer.framework.config.persist import JSONConfigStorage
from freeseer.tests.framework.config.persist import ConfigStorageTest
initial_config = '''\
{
"this_section": {
"option1": "othello",
"option2": "0"
}
}\
'''
after_config = '''\
{
"this_section": {
"option1": "something_new",
"option2": "10"
}
}\
'''
class TestJSONConfigStorage(ConfigStorageTest, unittest.TestCase):
"""Tests that JSONConfigStorage works with a generic Config subclass."""
CONFIG_STORAGE_CLASS = JSONConfigStorage
INITIAL_LOAD_CONFIG = initial_config
AFTER_STORE_CONFIG = after_config
| gpl-3.0 | -2,208,351,403,935,526,400 | 29.283019 | 76 | 0.71215 | false | 3.681193 | true | false | false |
RedhawkSDR/framework-codegen | redhawk/codegen/jinja/cpp/service/generator.py | 1 | 2253 | #
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK core.
#
# REDHAWK core is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import jinja2
from redhawk.codegen.jinja.loader import CodegenLoader
from redhawk.codegen.jinja.common import ShellTemplate, AutomakeTemplate, AutoconfTemplate
from redhawk.codegen.jinja.cpp import CppCodeGenerator, CppTemplate
from mapping import ServiceMapper
if not '__package__' in locals():
# Python 2.4 compatibility
__package__ = __name__.rsplit('.', 1)[0]
loader = CodegenLoader(__package__,
{'common': 'redhawk.codegen.jinja.common'})
class ServiceGenerator(CppCodeGenerator):
def loader(self, component):
return loader
def componentMapper(self):
return ServiceMapper()
def propertyMapper(self):
return None
def portMapper(self):
return None
def templates(self, component):
templates = [
CppTemplate('main.cpp'),
CppTemplate('service.cpp', component['userclass']['file'], userfile=True),
CppTemplate('service.h', component['userclass']['header'], userfile=True),
CppTemplate('service_base.cpp', component['baseclass']['file']),
CppTemplate('service_base.h', component['baseclass']['header']),
AutomakeTemplate('Makefile.am'),
AutomakeTemplate('Makefile.am.ide', userfile=True),
AutoconfTemplate('configure.ac'),
ShellTemplate('build.sh'),
ShellTemplate('common/reconf')
]
return templates
| lgpl-3.0 | -3,264,611,436,901,658,000 | 34.761905 | 90 | 0.689303 | false | 4.203358 | false | false | false |
imarin/Odoo-Mexico-localization | l10n_mx_company_multi_address/invoice.py | 1 | 4507 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: Fernando Irene Garcia (fernando@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import pooler, tools
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def _get_address_issued_invoice(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
res = {}
journal_obj = self.pool.get('account.journal')
for id_ in ids:
data = self.browse(cr, uid, id_, context=context)
journal_id = data.journal_id.id
data_journal = journal_obj.browse(
cr, uid, journal_id, context=context)
a = data_journal.address_invoice_company_id and \
data_journal.address_invoice_company_id.id or False
b = data_journal.company2_id and \
data_journal.company2_id.address_invoice_parent_company_id and \
data_journal.company2_id.address_invoice_parent_company_id.id or False
c = data.company_id and \
data.company_id.address_invoice_parent_company_id and \
data.company_id.address_invoice_parent_company_id.id or False
address_invoice = a or b or c or False
res[data.id] = address_invoice
return res
def _get_company_emitter_invoice(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
res = {}
journal_obj = self.pool.get('account.journal')
for id_ in ids:
data = self.browse(cr, uid, id_, context=context)
journal_id = data.journal_id.id
data_journal = journal_obj.browse(
cr, uid, journal_id, context=context)
company_invoice = data_journal.company2_id and \
data_journal.company2_id.id or data.company_id and \
data.company_id.id or False
res[data.id] = company_invoice
return res
_columns = {
'address_issued_id': fields.function(_get_address_issued_invoice,
type="many2one", relation='res.partner', string='Address Issued \
Invoice', help='This address will be used as address that issued \
for electronic invoice'),
'company_emitter_id': fields.function(_get_company_emitter_invoice,
type="many2one", relation='res.company', string='Company Emitter \
Invoice', help='This company will be used as emitter company in \
the electronic invoice')
}
def onchange_journal_id(self, cr, uid, ids, journal_id=False, context=None):
if context is None:
context = {}
result = super(account_invoice, self).onchange_journal_id(
cr, uid, ids, journal_id, context=context)
address_id = journal_id and self.pool.get('account.journal').browse(
cr, uid, journal_id, context=context) or False
if address_id and address_id.address_invoice_company_id:
result['value'].update({'address_invoice_company_id':
address_id.address_invoice_company_id.id})
if address_id and address_id.company2_id:
result['value'].update({'company2_id': address_id.company2_id.id})
return result
| agpl-3.0 | 7,326,353,656,462,540,000 | 46.442105 | 83 | 0.584868 | false | 4.086129 | false | false | false |
SekoiaLab/Fastir_Collector | factory/factory.py | 1 | 3844 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from settings import FASTIR_ROOT
import os
import sys
import inspect
import importlib
import pkgutil
def _list_packages():
directories = []
lib_dir = FASTIR_ROOT
if lib_dir.endswith('.zip'):
lib_dir = lib_dir[0:-4]
for root, dirnames, filenames in os.walk(lib_dir):
directories = dirnames
break
return directories
def _iter_modules(packages):
for p in packages:
imports = []
try:
for path_import in __import__(p).__path__:
imports.append(path_import.replace('.zip', ''))
except ImportError:
pass
# Workaround to detect imports when used as a binary.
# The issue comes from FrozenImporter in pyinstaller.
# Snippet from https://github.com/webcomics/dosage/blob/master/dosagelib/loader.py
if getattr(sys, 'frozen', False):
modules = []
importers = map(pkgutil.get_importer, imports)
toc = set()
for i in importers:
if hasattr(i, 'toc'):
toc |= i.toc
for elm in toc:
modules.append(elm)
for module in modules:
if 'psutil' not in module and not module.endswith('ext') and module.startswith(p):
yield importlib.import_module(module)
# Normal behavior.
else:
for importer, modname, ispkg in pkgutil.iter_modules(imports):
# quick fix for winXP
if 'psutil' not in p and not modname.endswith('ext'):
yield importlib.import_module(p + '.' + modname)
def load_classes(module, os_name, release):
for name, class_to_load in inspect.getmembers(module, inspect.isclass):
if name.find(os_name + 'All') != -1:
yield class_to_load
elif name.find(os_name + release) != -1:
yield class_to_load
def load_modules(filters, output_dir):
directories = _list_packages()
__filter_packages(filters, directories, output_dir)
return _iter_modules(directories)
def list_packages(filters, os_name, release):
"""List available and activated packages"""
result = {}
packages = _list_packages()
copy = packages[:]
for p in copy:
if p.find('.') == 0:
packages.remove(p)
activated_packages = list(packages)
activated_packages = __filter_packages(filters, activated_packages, '')
for module in _iter_modules(activated_packages):
classes = load_classes(module, os_name, release)
for cl in classes:
activated = False
if module.__package__ in activated_packages:
activated = True
result[module.__package__] = activated
break
return result
def __filter_packages(modules, directories, output_dir):
# Remove 'dump' and 'filecatcher' if they are not explicitely specified
for m in ['dump', 'filecatcher']:
if m in directories and m not in modules:
directories.remove(m)
# Remove everything that is not a valid CE package
copy = directories[:]
for d in copy:
if d.find('.') == 0 or d.startswith('_') or d == output_dir:
directories.remove(d)
# Remove everything not specified in module, unless module contains 'all'
if 'fast' not in modules:
copy = directories[:]
for d in copy:
if d not in modules:
directories.remove(d)
# If dump is specified, put it in first position
if 'dump' in directories:
directories.remove('dump')
directories.insert(0, 'dump')
return directories
| gpl-3.0 | 4,597,551,051,461,007,400 | 31.137931 | 98 | 0.574922 | false | 4.32396 | false | false | false |
giruenf/GRIPy | classes/ui/base/toplevel.py | 1 | 21399 | from collections import OrderedDict
import wx
from pubsub import pub
from classes.ui import UIManager
from classes.ui import UIControllerObject
from classes.ui import UIViewObject
from app.pubsub import AUTO_TOPIC
from app.app_utils import GripyIcon
"""
Add(self, item, int proportion=0, int flag=0, int border=0,
PyObject userData=None) -> wx.SizerItem
Appends a child item to the sizer.
"""
item_sizer_keys = ['proportion', 'flag', 'border', 'userData']
#
"""
__init__(self, Window parent, int id=-1, String label=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=0, String name=StaticBoxNameStr) -> StaticBox
"""
static_box_keys = ['id', 'label', 'pos', 'size', 'style', 'name']
#
"""
__init__(self, Window parent, int id=-1, Point pos=DefaultPosition,
Size size=DefaultSize, long style=wxTAB_TRAVERSAL|wxNO_BORDER,
String name=PanelNameStr) -> Panel
"""
panel_keys = ['id', 'pos', 'size', 'style', 'name']
#
staticboxsizer_keys = ['orient']
boxsizer_keys = ['orient']
gridsizer_keys = ['rows', 'cols', 'vgap', 'hgap']
flexgridsizer_keys = ['rows', 'cols', 'vgap', 'hgap']
gridbagsizer_keys = ['vgap', 'hgap']
#
wx_statictext_keys = ['label']
wx_spinctrl_keys = ['id', 'value', 'pos', 'size', 'style', 'min', 'max',
'initial', 'name']
wx_textctrl_keys = ['id', 'value', 'pos', 'size', 'style', 'validator', 'name']
wx_choice_keys = ['id', 'value', 'pos', 'size', 'choices', 'style',
'validator', 'name']
wx_listbox_keys = ['id', 'value', 'pos', 'size', 'choices', 'style',
'validator', 'name']
wx_filepickerctrl_keys = ['id', 'path', 'message', 'wildcard', 'pos', 'size',
'style', 'validator', 'name']
wx_checkbox_keys = ['id', 'label', 'value', 'pos', 'size', 'style', 'validator', 'name']
wx_radiobutton_keys = ['id', 'label', 'pos', 'size', 'style', 'validator', 'name']
registered_widgets = {
wx.StaticText: wx_statictext_keys,
wx.SpinCtrl: wx_spinctrl_keys,
wx.TextCtrl: wx_textctrl_keys,
wx.Choice: wx_choice_keys,
wx.ListBox: wx_listbox_keys,
wx.FilePickerCtrl: wx_filepickerctrl_keys,
wx.CheckBox: wx_checkbox_keys,
wx.RadioButton: wx_radiobutton_keys
}
widget_special_keys = ['initial', 'widget_name', 'options', 'controller_uid']
def get_control_keys(control_class):
if control_class in registered_widgets:
return registered_widgets.get(control_class)
raise Exception('Unregistered class')
def pop_registers(keys, kwargs):
ret = {}
for key in keys:
if kwargs.get(key) is not None:
ret[key] = kwargs.pop(key)
# for key in special_keys:
# if kwargs.get(key) is not None:
# ret[key] = kwargs.pop(key)
return ret, kwargs
def pop_widget_registers(keys, kwargs):
# print 'pop_widget_registers:', keys, kwargs
ctrl_dict = {}
special_dict = {}
for key in keys:
if key in kwargs.keys():
ctrl_dict[key] = kwargs.pop(key)
for key in widget_special_keys:
if key in kwargs.keys():
special_dict[key] = kwargs.pop(key)
return ctrl_dict, special_dict, kwargs
# TODO: Its a GripyObject?
class EncapsulatedControl(object):
def __init__(self, *args, **kwargs):
self._trigger_func = None
self._trigger_kwargs_keys = None
parent = args[0]
if not self._control_class in registered_widgets.keys():
raise Exception('Unregistered class')
special_kw = args[1]
self.name = special_kw.get('widget_name')
initial = special_kw.get('initial')
options = special_kw.get('options', {})
self._controller_uid = special_kw.get('controller_uid')
self.control = self._control_class(parent, **kwargs)
try:
if options:
self.set_options(options)
except Exception as e:
raise
if initial is not None:
self.set_value(initial)
self.old_value = None
def get_topic(self):
UIM = UIManager()
dialog = UIM.get(self._controller_uid)
return self.name + '_widget_changed@' + dialog.view.get_topic()
def set_trigger(self, func, *args):
if not callable(func):
raise Exception('A callable must be supplied.')
self._trigger_func = func
self._trigger_kwargs_keys = list(args)
pub.subscribe(self.check_change, self.get_topic())
def unset_trigger(self):
if not callable(self._trigger_func):
return None
pub.unsubscribe(self.check_change, self.get_topic())
func = self._trigger_func
self._trigger_func = None
keys = self._trigger_kwargs_keys
self._trigger_kwargs_keys = None
return func, keys
def check_change(self, name, old_value, new_value):
if not callable(self._trigger_func):
return
kwargs = {}
if self._trigger_kwargs_keys:
UIM = UIManager()
dialog = UIM.get(self._controller_uid)
for enc_ctrl_name in self._trigger_kwargs_keys:
enc_control = dialog.view.get_object(enc_ctrl_name)
try:
kwargs[enc_control.name] = enc_control.get_value()
except:
raise
self._trigger_func(name, old_value, new_value, **kwargs)
def on_change(self, event):
new_value = self.get_value()
pub.sendMessage(self.get_topic(), name=self.name,
old_value=self.old_value, new_value=new_value
)
self.old_value = new_value
def set_options(self, options_dict=None):
raise NotImplementedError()
def set_value(self, value):
raise NotImplementedError()
def get_value(self):
raise NotImplementedError()
class EncapsulatedChoice(EncapsulatedControl):
_control_class = wx.Choice
def __init__(self, *args, **kwargs):
super(EncapsulatedChoice, self).__init__(*args, **kwargs)
self.control.Bind(wx.EVT_CHOICE, self.on_change)
def set_options(self, options_dict=None):
self.control.Clear()
self._map = options_dict
if self._map is not None:
if not isinstance(self._map, OrderedDict):
self._map = OrderedDict(self._map)
self.control.AppendItems(list(self._map.keys()))
def set_value(self, value, event=False):
if value is None:
return
if not isinstance(value, int):
if not value in self._map.keys():
raise Exception('')
value = self._map.keys().index(value)
self.control.SetSelection(value)
if event:
self.on_change(None)
def get_value(self):
if not self._map:
return None
if self.control.GetSelection() == -1:
return None
return self._map[self.control.GetString(self.control.GetSelection())]
def show(self):
return self.control.Show()
def hide(self):
return self.control.Hide()
def destroy(self):
return self.control.Destroy()
class EncapsulatedRadioButton(EncapsulatedControl):
_control_class = wx.RadioButton
def __init__(self, *args, **kwargs):
super(EncapsulatedRadioButton, self).__init__(*args, **kwargs)
self.control.Bind(wx.EVT_RADIOBUTTON, self.on_change)
def set_value(self, value):
self.control.SetValue(value)
def get_value(self):
return self.control.GetValue()
class EncapsulatedCheckBox(EncapsulatedControl):
_control_class = wx.CheckBox
def __init__(self, *args, **kwargs):
super(EncapsulatedCheckBox, self).__init__(*args, **kwargs)
self.control.Bind(wx.EVT_CHECKBOX, self.on_change)
def set_value(self, value):
self.control.SetValue(value)
def get_value(self):
return self.control.GetValue()
class EncapsulatedTextCtrl(EncapsulatedControl):
_control_class = wx.TextCtrl
def __init__(self, *args, **kwargs):
super(EncapsulatedTextCtrl, self).__init__(*args, **kwargs)
self.control.Bind(wx.EVT_TEXT, self.on_change)
def set_value(self, value):
if value is None:
self.control.SetValue(wx.EmptyString)
else:
self.control.SetValue(str(value))
def get_value(self):
return self.control.GetValue().strip()
def disable(self):
return self.control.Disable()
def enable(self):
return self.control.Enable()
def hide(self):
return self.control.Hide()
def show(self):
return self.control.Show()
def destroy(self):
return self.control.Destroy()
class EncapsulatedFilePickerCtrl(EncapsulatedControl):
_control_class = wx.FilePickerCtrl
def __init__(self, *args, **kwargs):
try:
super(EncapsulatedFilePickerCtrl, self).__init__(*args, **kwargs)
except Exception as e:
print(e)
def set_value(self, value):
self.control.SetPath(value)
def get_value(self):
return self.control.GetPath()
class EncapsulatedSpinCtrl(EncapsulatedControl):
_control_class = wx.SpinCtrl
def __init__(self, *args, **kwargs):
super(EncapsulatedSpinCtrl, self).__init__(*args, **kwargs)
self.control.Bind(wx.EVT_SPINCTRL, self.on_change)
def set_value(self, value):
if value is not None:
# print 'spin =', value, type(value)
self.control.SetValue(value)
def get_value(self):
# print 'spin:', self.control.GetValue()
return self.control.GetValue()
class EncapsulatedStaticText(EncapsulatedControl):
_control_class = wx.StaticText
def __init__(self, *args, **kwargs):
super(EncapsulatedStaticText, self).__init__(*args, **kwargs)
def set_value(self, value):
if value is not None:
self.control.SetLabel(str(value))
def get_value(self):
return self.control.GetLabel()
def hide(self):
return self.control.Hide()
def show(self):
return self.control.Show()
def destroy(self):
return self.control.Destroy()
class EncapsulatedListBox(EncapsulatedControl):
_control_class = wx.ListBox
def __init__(self, *args, **kwargs):
super(EncapsulatedListBox, self).__init__(*args, **kwargs)
self.control.Bind(wx.EVT_LISTBOX, self.on_change)
def set_value(self, value, event=True):
self.control.Clear()
if not value:
self._map = None
else:
self._map = value
self.control.AppendItems(self._map.keys())
# To force on_change
if event:
self.on_change(None)
def get_value(self):
if not self._map:
return None
if not self.control.GetSelections():
return None
return [self._map.get(self.control.GetString(sel)) for sel in self.control.GetSelections()]
class PanelContainer(wx.Panel):
def __init__(self, *args, **kwargs):
# print '\nPanelContainer:', args, kwargs
if not kwargs.get('sizer_class'):
raise Exception()
sizer_class = kwargs.pop('sizer_class')
panel_kw, sizer_kw = pop_registers(panel_keys, kwargs)
wx.Panel.__init__(self, args[0], **panel_kw)
try:
sizer = sizer_class(**sizer_kw)
self.SetSizer(sizer)
except:
raise
class BoxSizerContainer(PanelContainer):
def __init__(self, *args, **kwargs):
if not kwargs:
kwargs = {
'sizer_class': wx.BoxSizer,
'orient': wx.VERTICAL
}
else:
kwargs['sizer_class'] = wx.BoxSizer
if not kwargs.get('orient'):
kwargs['orient'] = wx.VERTICAL
elif kwargs.get('orient') not in [wx.HORIZONTAL, wx.VERTICAL]:
raise Exception()
super().__init__(*args, **kwargs)
class GridSizerContainer(PanelContainer):
def __init__(self, *args, **kwargs):
if not kwargs:
kwargs = {'sizer_class': wx.GridSizer}
else:
kwargs['sizer_class'] = wx.GridSizer
super().__init__(*args, **kwargs)
class GridBagSizerContainer(PanelContainer):
def __init__(self, *args, **kwargs):
if not kwargs:
kwargs = {'sizer_class': wx.GridBagSizer}
else:
kwargs['sizer_class'] = wx.GridBagSizer
super().__init__(*args, **kwargs)
class FlexGridSizerContainer(PanelContainer):
def __init__(self, *args, **kwargs):
if not kwargs:
kwargs = {'sizer_class': wx.FlexGridSizer}
else:
kwargs['sizer_class'] = wx.FlexGridSizer
super().__init__(*args, **kwargs)
class WarpSizerContainer(PanelContainer):
def __init__(self, *args, **kwargs):
if not kwargs:
kwargs = {
'sizer_class': wx.WarpSizer,
'orient': wx.VERTICAL
}
else:
kwargs['sizer_class'] = wx.BoxSizer
if not kwargs.get('orient'):
kwargs['orient'] = wx.VERTICAL
elif kwargs.get('orient') not in [wx.HORIZONTAL, wx.VERTICAL]:
raise Exception()
super().__init__(*args, **kwargs)
class StaticBoxContainer(wx.StaticBox):
def __init__(self, *args, **kwargs):
sbkw, kwargs = pop_registers(static_box_keys, kwargs)
wx.StaticBox.__init__(self, args[0], **sbkw)
if kwargs.get('orient') is None:
orient = wx.VERTICAL
else:
orient = kwargs.pop('orient')
self._sizer = wx.StaticBoxSizer(self, orient)
def GetSizer(self):
return self._sizer
###############################################################################
###############################################################################
class TopLevelController(UIControllerObject):
tid = 'toplevel_controller'
_ATTRIBUTES = OrderedDict()
_ATTRIBUTES['title'] = {
'default_value': wx.EmptyString,
'type': str
}
# TODO: Use icon from App parameters
_ATTRIBUTES['icon'] = {
'default_value': 'basic/icons/logo-transp.ico',
'type': str
}
_ATTRIBUTES['style'] = {
'default_value': wx.DEFAULT_FRAME_STYLE,
'type': int
}
_ATTRIBUTES['maximized'] = {
'default_value': False,
'type': bool
}
_ATTRIBUTES['size'] = {
'default_value': wx.Size(800, 600),
'type': wx.Size
}
_ATTRIBUTES['pos'] = {
'default_value': wx.Point(50, 50),
'type': wx.Point
}
def __init__(self, **state):
super().__init__(**state)
class TopLevel(UIViewObject):
tid = 'toplevel'
def __init__(self, controller_uid):
UIViewObject.__init__(self, controller_uid)
UIM = UIManager()
controller = UIM.get(self._controller_uid)
# MainWindow subscribing MainWindowController PubSub messages
controller.subscribe(self._set_maximized, 'change.maximized')
controller.subscribe(self._set_size, 'change.size')
controller.subscribe(self._set_position, 'change.pos')
controller.subscribe(self._set_title, 'change.title')
#
# TODO: try to remove _flag using new GripyObject style
# little hack - on_size
# self._flag = False
def on_maximize(self, event):
UIM = UIManager()
controller = UIM.get(self._controller_uid)
controller.set_value_from_event('maximized', self.IsMaximized())
def on_move(self, event):
UIM = UIManager()
controller = UIM.get(self._controller_uid)
controller.set_value_from_event('pos', self.GetPosition())
def on_size(self, event):
UIM = UIManager()
controller = UIM.get(self._controller_uid)
controller.set_value_from_event('size', event.GetSize())
controller.set_value_from_event('maximized', self.IsMaximized())
event.Skip()
def _set_maximized(self, new_value, old_value):
self.Unbind(wx.EVT_MAXIMIZE, handler=self.on_maximize)
self.Maximize(new_value)
self.Bind(wx.EVT_MAXIMIZE, self.on_maximize)
def _set_size(self, new_value, old_value):
self.Unbind(wx.EVT_SIZE, handler=self.on_size)
self.SetSize(new_value)
self.Bind(wx.EVT_SIZE, self.on_size)
def _set_position(self, new_value, old_value):
self.Unbind(wx.EVT_MOVE, handler=self.on_move)
self.SetPosition(new_value)
self.Bind(wx.EVT_MOVE, self.on_move)
def _set_title(self, new_value, old_value):
self.SetTitle(new_value)
# Containers
def AddCreateContainer(self, container_type_name, *args, **kwargs):
try:
item_sizer_kw, kwargs = pop_registers(item_sizer_keys, kwargs)
container = self.CreateContainer(container_type_name, *args, **kwargs)
self.AddContainer(container, *args, **item_sizer_kw)
return container
except:
raise
def CreateContainer(self, container_type_name, *args, **kwargs):
try:
if container_type_name == 'BoxSizer':
container_class = BoxSizerContainer
elif container_type_name == 'GridSizer':
container_class = GridSizerContainer
elif container_type_name == 'FlexGridSizer':
container_class = FlexGridSizerContainer
elif container_type_name == 'GridBagSizer':
container_class = GridBagSizerContainer
elif container_type_name == 'StaticBox':
container_class = StaticBoxContainer
elif container_type_name == 'WarpSizer':
container_class = WarpSizerContainer
else:
raise Exception('Unregistered container.')
if not args:
parent = self.mainpanel
else:
parent = args[0]
container = container_class(parent, **kwargs)
return container
except:
raise
def AddContainer(self, container, *args, **kwargs):
for key in kwargs.keys():
if key not in item_sizer_keys:
msg = 'Invalid container key. [key=\"{}\"]'.format(key)
raise Exception(msg)
if not args:
parent = self.mainpanel
else:
parent = args[0]
# container.Show()
if container.__class__ == StaticBoxContainer:
parent.GetSizer().Add(container.GetSizer(), **kwargs)
else:
parent.GetSizer().Add(container, **kwargs)
parent.GetSizer().Layout()
def DetachContainer(self, container):
ctn_sizer = container.GetSizer()
parent = container.GetParent()
if container.__class__ == StaticBoxContainer:
result = parent.GetSizer().Detach(ctn_sizer)
else:
result = parent.GetSizer().Detach(container)
container.Show(False)
return result
# Controllers
def _get_button(self, button_id):
UIM = UIManager()
controller = UIM.get(self._controller_uid)
if button_id & controller.flags:
return self.FindWindow(button_id)
return None
def enable_button(self, button_id, enable=True):
btn = self._get_button(button_id)
btn.Enable(enable)
def register(self, enc_control):
if enc_control.name:
self._objects[enc_control.name] = enc_control
def CreateControl(self, enc_class, container, **kwargs):
# Create and Add a new control.
try:
keys = get_control_keys(enc_class._control_class)
controlkw, specialkw, kwargs = pop_widget_registers(keys, kwargs)
specialkw['controller_uid'] = self._controller_uid
enc_control = enc_class(container, specialkw, **controlkw)
self.register(enc_control)
container.GetSizer().Add(enc_control.control, **kwargs)
container.GetSizer().Layout()
except:
raise
def AddChoice(self, *args, **kwargs):
self.CreateControl(EncapsulatedChoice, args[0], **kwargs)
def AddRadioButton(self, *args, **kwargs):
self.CreateControl(EncapsulatedRadioButton, args[0], **kwargs)
def AddCheckBox(self, *args, **kwargs):
self.CreateControl(EncapsulatedCheckBox, args[0], **kwargs)
def AddTextCtrl(self, *args, **kwargs):
self.CreateControl(EncapsulatedTextCtrl, args[0], **kwargs)
def AddFilePickerCtrl(self, *args, **kwargs):
self.CreateControl(EncapsulatedFilePickerCtrl, args[0], **kwargs)
def AddSpinCtrl(self, *args, **kwargs):
self.CreateControl(EncapsulatedSpinCtrl, args[0], **kwargs)
def AddStaticText(self, *args, **kwargs):
self.CreateControl(EncapsulatedStaticText, args[0], **kwargs)
def AddListBox(self, *args, **kwargs):
self.CreateControl(EncapsulatedListBox, args[0], **kwargs)
def get_results(self):
ret = {}
for name, widget in self._objects.items():
ret[name] = widget.get_value()
return ret
def get_object(self, name):
return self._objects.get(name)
| apache-2.0 | 7,788,121,389,774,640,000 | 30.891207 | 99 | 0.588579 | false | 3.806974 | false | false | false |
edusegzy/pychemqt | lib/mEoS/mXylene.py | 1 | 2664 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class mXylene(MEoS):
"""Multiparameter equation of state for m-xylene"""
name = "m-xylene"
CASNumber = "108-38-3"
formula = "C8H10"
synonym = "1,3-dimethylbenzene"
rhoc = unidades.Density(282.929725)
Tc = unidades.Temperature(616.89)
Pc = unidades.Pressure(3534.6, "kPa")
M = 106.165 # g/mol
Tt = unidades.Temperature(225.3)
Tb = unidades.Temperature(412.214)
f_acent = 0.326
momentoDipolar = unidades.DipoleMoment(0.3, "Debye")
id = 43
Fi1 = {"ao_log": [1, 1.169909],
"pow": [0, 1],
"ao_pow": [12.652887, -0.45975624],
"ao_exp": [4.44312, 2.862794, 24.83298, 16.26077],
"titao": [160/Tc, 190/Tc, 1333/Tc, 3496/Tc]}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for m-xylene of Zhou et al. (2012).",
"__doi__": {"autor": "Zhou, Y., Lemmon, E.W., and Wu, J.",
"title": "Thermodynamic Properties of o-Xylene, m-Xylene, p-Xylene, and Ethylbenzene",
"ref": "J. Phys. Chem. Ref. Data 41, 023103 (2012).",
"doi": "10.1063/1.3703506"},
"R": 8.314472,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 700.0, "Pmax": 200000.0, "rhomax": 8.677,
"Pmin": 0.003123, "rhomin": 8.677,
"nr1": [0.000012791017, 0.041063111, 1.505996, -2.3095875, -0.46969,
0.171031],
"d1": [8, 4, 1, 1, 2, 3],
"t1": [1.0, 0.91, 0.231, 0.772, 1.205, 0.323],
"nr2": [-1.001728, -0.3945766, 0.6970578, -0.3002876, -0.024311],
"d2": [1, 3, 2, 2, 7],
"t2": [2.7, 3.11, 0.768, 4.1, 0.818],
"c2": [2, 2, 1, 2, 1],
"gamma2": [1]*5,
"nr3": [0.815488, -0.330647, -0.123393, -0.54661],
"d3": [1, 1, 3, 3],
"t3": [2.0, 2.9, 3.83, 0.5],
"alfa3": [1.0244, 1.3788, 0.9806, 6.3563],
"beta3": [1.66, 1.9354, 1.0323, 78],
"gamma3": [1.1013, 0.6515, 0.4975, 1.26],
"epsilon3": [0.713, 0.9169, 0.6897, 0.7245]}
eq = helmholtz1,
_surface = {"sigma": [0.0661], "exp": [1.29]}
_vapor_Pressure = {
"eq": 5,
"ao": [-7.5635, 1.2857, -3.2346, -1.9018],
"exp": [1.0, 1.5, 3.1, 5.6]}
_liquid_Density = {
"eq": 1,
"ao": [0.43346, 3.8716, -3.0144, 1.619],
"exp": [0.16, 0.6, 1.0, 1.5]}
_vapor_Density = {
"eq": 3,
"ao": [-1.1597, -6.0358, -16.712, -45.482, -98.418],
"exp": [0.26, 0.78, 2.6, 5.7, 11.7]}
| gpl-3.0 | -1,377,790,976,608,276,000 | 33.153846 | 107 | 0.477477 | false | 2.298533 | false | false | false |
Natim/sentry | tests/sentry/api/endpoints/test_group_details.py | 10 | 5479 | from __future__ import absolute_import, print_function
from django.core.urlresolvers import reverse
from sentry.models import (
Activity, Group, GroupAssignee, GroupBookmark, GroupSeen, GroupStatus,
GroupTagValue, Release
)
from sentry.testutils import APITestCase
class GroupDetailsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
group = self.create_group()
url = reverse('sentry-api-0-group-details', kwargs={
'group_id': group.id,
})
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['id'] == str(group.id)
assert response.data['firstRelease'] is None
def test_with_first_release(self):
self.login_as(user=self.user)
group = self.create_group()
release = Release.objects.create(
project=group.project,
version='1.0',
)
GroupTagValue.objects.create(
group=group,
project=group.project,
key='sentry:release',
value=release.version,
)
url = reverse('sentry-api-0-group-details', kwargs={
'group_id': group.id,
})
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['id'] == str(group.id)
assert response.data['firstRelease']['version'] == release.version
class GroupUpdateTest(APITestCase):
def test_resolve(self):
self.login_as(user=self.user)
group = self.create_group()
url = reverse('sentry-api-0-group-details', kwargs={
'group_id': group.id,
})
response = self.client.put(url, data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200, response.content
group = Group.objects.get(
id=group.id,
project=group.project.id,
)
assert group.status == GroupStatus.RESOLVED
def test_bookmark(self):
self.login_as(user=self.user)
group = self.create_group()
url = reverse('sentry-api-0-group-details', kwargs={
'group_id': group.id
})
response = self.client.put(url, data={
'isBookmarked': '1',
}, format='json')
assert response.status_code == 200, response.content
# ensure we've created the bookmark
assert GroupBookmark.objects.filter(
group=group, user=self.user).exists()
def test_assign(self):
self.login_as(user=self.user)
group = self.create_group()
url = reverse('sentry-api-0-group-details', kwargs={
'group_id': group.id
})
response = self.client.put(url, data={
'assignedTo': self.user.username,
}, format='json')
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(
group=group, user=self.user
).exists()
assert Activity.objects.filter(
group=group, user=self.user, type=Activity.ASSIGNED,
).count() == 1
response = self.client.put(url, format='json')
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(
group=group, user=self.user
).exists()
response = self.client.put(url, data={
'assignedTo': '',
}, format='json')
assert response.status_code == 200, response.content
assert not GroupAssignee.objects.filter(
group=group, user=self.user
).exists()
def test_mark_seen(self):
self.login_as(user=self.user)
group = self.create_group()
url = reverse('sentry-api-0-group-details', kwargs={
'group_id': group.id
})
response = self.client.put(url, data={
'hasSeen': '1',
}, format='json')
assert response.status_code == 200, response.content
assert GroupSeen.objects.filter(
group=group, user=self.user).exists()
response = self.client.put(url, data={
'hasSeen': '0',
}, format='json')
assert response.status_code == 200, response.content
assert not GroupSeen.objects.filter(
group=group, user=self.user).exists()
def test_mark_seen_as_non_member(self):
user = self.create_user('foo@example.com', is_superuser=True)
self.login_as(user=user)
group = self.create_group()
url = reverse('sentry-api-0-group-details', kwargs={
'group_id': group.id
})
response = self.client.put(url, data={
'hasSeen': '1',
}, format='json')
assert response.status_code == 200, response.content
assert not GroupSeen.objects.filter(
group=group, user=self.user).exists()
class GroupDeleteTest(APITestCase):
def test_delete(self):
self.login_as(user=self.user)
group = self.create_group()
url = reverse('sentry-api-0-group-details', kwargs={
'group_id': group.id
})
with self.tasks():
response = self.client.delete(url, format='json')
assert response.status_code == 202, response.content
group = Group.objects.filter(id=group.id).exists()
assert not group
| bsd-3-clause | -8,151,098,734,682,799,000 | 27.836842 | 74 | 0.583318 | false | 3.941727 | true | false | false |
0todd0000/rft1d | rft1d/examples/val_max_7_cca_0d.py | 2 | 1937 |
from math import sqrt,log
import numpy as np
from scipy import stats
from matplotlib import pyplot
def here_cca(y, x):
N = y.shape[0]
X,Y = np.matrix(x.T).T, np.matrix(y)
Z = np.matrix(np.ones(N)).T
Rz = np.eye(N) - Z*np.linalg.inv(Z.T*Z)*Z.T
XStar = Rz * X
YStar = Rz * Y
p,r = 1.0, 1.0 #nContrasts, nNuisanceFactors
m = N - p - r
H = YStar.T * XStar * np.linalg.inv( XStar.T * XStar ) * XStar.T * YStar / p
W = YStar.T * (np.eye(nResponses) - XStar*np.linalg.inv(XStar.T*XStar)*XStar.T) * YStar / m
#estimate maximum canonical correlation:
F = np.linalg.inv(W)*H
ff = np.linalg.eigvals( F )
fmax = float( np.real(ff.max()) )
r2max = fmax * p / (m + fmax*p)
rmax = sqrt(r2max)
### compute test statistic:
p,m = float(N), float(y.shape[1])
x2 = -(p-1-0.5*(m+2)) * log( (1-rmax**2) )
return x2
#(0) Set parameters:
np.random.seed(0)
nResponses = 20
nComponents = 3
nIterations = 1000
W0 = np.eye(nComponents)
### derived parameters:
df = nComponents
x = np.linspace(0, 1, nResponses) #independent variable
#(1) Generate Gaussian data and compute test statistic:
X2 = []
for i in range(nIterations):
y = np.random.multivariate_normal(np.zeros(nComponents), W0, nResponses)
chi2 = here_cca(y, x)
X2.append( chi2 )
X2 = np.asarray(X2)
#(2) Survival functions:
heights = np.linspace(3, 12, 21)
sf = np.array( [ (X2>h).mean() for h in heights] )
sfE = stats.chi2.sf(heights, df)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (\chi^2 > u)$', size=20)
ax.legend()
ax.set_title("CCA validation (0D)", size=20)
pyplot.show()
| gpl-3.0 | -7,678,448,358,227,951,000 | 26.671429 | 105 | 0.571502 | false | 2.496134 | false | false | false |
Laurawly/tvm-1 | tests/python/relay/test_pass_convert_op_layout.py | 1 | 54890 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test alter op layout pass"""
import tvm
from tvm import te
from tvm import relay
from tvm.relay.op import register_alter_op_layout
from tvm.relay import transform, analysis
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_no_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
return before()
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_binary_no_convert_layout():
def before():
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(1, 2))
return relay.Function(
[x, y],
relay.qnn.op.add(
x,
y,
lhs_scale=relay.const(0.0156863, "float32"),
lhs_zero_point=relay.const(127, "int32"),
rhs_scale=relay.const(0.0117647, "float32"),
rhs_zero_point=relay.const(85, "int32"),
output_scale=relay.const(0.0235294, "float32"),
output_zero_point=relay.const(128, "int32"),
),
)
def expected():
return before()
a = before()
a = run_opt_pass(a, transform.ConvertLayout({}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_nhwc_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_transpose_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d_transpose(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.nn.conv2d_transpose(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d_transpose": ["NCHW", "OIHW"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_bias_pool_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.bias_add(y, bias, axis=3)
# a useless tuple, which will be eliminated
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC")
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
bias = relay.layout_transform(bias, "NHWC", "NCHW")
y = relay.add(y, bias)
# a useless tuple, which will be eliminated
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2))
y = relay.cast(y, "int32")
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_concat_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
ret = relay.concatenate([y, y1], axis=3)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(y, weight1, channels=64, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.conv2d(y, weight2, channels=64, kernel_size=(3, 3), padding=(1, 1))
ret = relay.concatenate([y, y1], axis=1)
ret = relay.layout_transform(ret, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_deformable_conv_bias_pool_convert_layout():
def before(N, CI, H, W, CO, KH, KW, layout):
if layout == "NCHW":
data_shape = (N, CI, H, W)
weight_shape = (CO, CI, KH, KW)
kernel_layout = "OIHW"
else:
data_shape = (N, H, W, CI)
weight_shape = (KH, KW, CI, CO)
kernel_layout = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=data_shape, dtype="float32")
offset = relay.var("offset")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout,
kernel_layout=kernel_layout,
)
y = relay.nn.bias_add(y, bias, axis=-1 if layout == "NHWC" else 1)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout)
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected(N, CI, H, W, CO, KH, KW, OH, OW, src_layout, dst_layout):
layout_map = {"src": {}, "dst": {}}
if src_layout == "NCHW":
nchw = layout_map["src"]
nhwc = layout_map["dst"]
else:
nchw = layout_map["dst"]
nhwc = layout_map["src"]
nchw["data_layout"] = "NCHW"
nchw["data_shape"] = (N, CI, H, W)
nchw["offset_shape"] = (N, KH * KW * 2, OH, OW)
nchw["weight_shape"] = (CO, CI, KH, KW)
nchw["kernel_layout"] = "OIHW"
nhwc["data_layout"] = "NHWC"
nhwc["data_shape"] = (N, H, W, CI)
nhwc["offset_shape"] = (N, OH, OW, KH * KW * 2)
nhwc["weight_shape"] = (KH, KW, CI, CO)
nhwc["kernel_layout"] = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=layout_map["src"]["data_shape"], dtype="float32")
offset = relay.var("offset", shape=layout_map["src"]["offset_shape"], dtype="float32")
weight = relay.var("weight", shape=layout_map["src"]["weight_shape"], dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
data = relay.layout_transform(
data, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
offset = relay.layout_transform(
offset, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
weight = relay.layout_transform(
weight, layout_map["src"]["kernel_layout"], layout_map["dst"]["kernel_layout"]
)
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout_map["dst"]["data_layout"],
kernel_layout=layout_map["dst"]["kernel_layout"],
)
if layout_map["src"]["data_layout"] == "NHWC":
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
else:
bias = relay.expand_dims(bias, axis=1, num_newaxis=2)
bias = relay.expand_dims(bias, axis=0)
bias = relay.layout_transform(
bias, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
y = relay.add(y, bias)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout_map["dst"]["data_layout"])
y = relay.cast(y, "int32")
y = relay.layout_transform(
y, layout_map["dst"]["data_layout"], layout_map["src"]["data_layout"]
)
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
# NHWC -> NCHW
a = before(1, 3, 224, 224, 32, 3, 3, "NHWC")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NCHW", "default"]}))
b = run_opt_pass(
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NHWC", "NCHW"), transform.InferType()
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
# NCHW -> NHWC
a = before(1, 3, 224, 224, 32, 3, 3, "NCHW")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NHWC", "default"]}))
b = run_opt_pass(
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NCHW", "NHWC"), transform.InferType()
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_dual_path_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(3, 3, 32, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(
y,
weight2,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.batch_flatten(y)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(3, 3, 32, 32))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.relu(y1)
y1 = relay.layout_transform(y1, "NCHW", "NHWC")
y2 = relay.layout_transform(y, "NCHW", "NHWC")
y2 = relay.nn.batch_flatten(y2)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_bn_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
gamma = relay.var("gamma")
beta = relay.var("beta")
mean = relay.var("mean")
variance = relay.var("variance")
y, _, _ = relay.nn.batch_norm(y, gamma, beta, mean, variance, axis=3)
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
# Check that there is only 1 NHWC to NCHW transform.
has_lt = list()
find_op = lambda x: has_lt.append(
isinstance(x, tvm.relay.expr.Call)
and x.op.name == "layout_transform"
and x.attrs.src_layout == "NCHW"
and x.attrs.dst_layout == "NHWC"
)
relay.analysis.post_order_visit(a, find_op)
has_lt = list(filter(lambda x: x, has_lt))
assert len(has_lt) == 1
def test_slice_like_convert_layout():
def verify_slice_like(after, expected_axes):
# Verify if the slice_like after the convert layout has the expected axes.
has_expected = list()
checker = lambda x: has_expected.append(
isinstance(x, tvm.relay.expr.Call)
and x.op.name == "slice_like"
and str(x.attrs.axes) == str(expected_axes)
)
relay.analysis.post_order_visit(after, checker)
assert any(has_expected)
def func_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
out = relay.slice_like(y, y, axes=[1, 2])
return relay.Function(analysis.free_vars(out), out)
after = run_opt_pass(func_nhwc(), transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
verify_slice_like(after, [2, 3])
def func_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(32, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
out = relay.slice_like(y, y, axes=[2, 3])
return relay.Function(analysis.free_vars(out), out)
after = run_opt_pass(func_nchw(), transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
verify_slice_like(after, [1, 2])
def func_vars():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
# z has no layout information so convert layout won't happen.
z = relay.var("y", shape=(1, 56, 56, 32))
out = relay.slice_like(y, z, axes=[1, 2])
return relay.Function(analysis.free_vars(out), out)
after = run_opt_pass(func_vars(), transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
verify_slice_like(after, [1, 2])
def test_transpose_convert_layout():
def verify_transpose(after, expected_axes, expected_transform_cnt):
# Verify if the transpose after the convert layout has the expected axes.
has_expected = list()
checker = lambda x: has_expected.append(
isinstance(x, tvm.relay.expr.Call)
and x.op.name == "transpose"
and str(x.attrs.axes) == str(expected_axes)
)
relay.analysis.post_order_visit(after, checker)
assert any(has_expected), after
is_transform = list()
checker = lambda x: is_transform.append(
1 if isinstance(x, tvm.relay.expr.Call) and x.op.name == "layout_transform" else 0
)
relay.analysis.post_order_visit(after, checker)
assert (
sum(is_transform) == expected_transform_cnt
), "Expected %s layout_transform, but get\n%s" % (expected_transform_cnt, after)
def nhwc_to_nchw():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
z = relay.var("z", shape=(56, 56, 32))
out = relay.add(y, z)
out = relay.transpose(out, axes=[0, 3, 1, 2])
out = relay.nn.batch_flatten(out)
func = relay.Function(analysis.free_vars(out), out)
return run_opt_pass(func, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
verify_transpose(nhwc_to_nchw(), [0, 1, 2, 3], 3)
def nchw_to_nhwc():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(32, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
z = relay.var("z", shape=(32, 56, 56))
out = relay.add(y, z)
out = relay.transpose(out, axes=[0, 2, -1, 1]) # Also test a negative axis.
out = relay.nn.batch_flatten(out)
func = relay.Function(analysis.free_vars(out), out)
return run_opt_pass(func, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
verify_transpose(nchw_to_nhwc(), [0, 1, 2, 3], 3)
def default_axes():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(32, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
z = relay.var("z", shape=(32, 56, 56))
out = relay.add(y, z)
out = relay.transpose(out) # No axes provided, will use the reversed axes.
func = relay.Function(analysis.free_vars(out), out)
return run_opt_pass(func, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
verify_transpose(default_axes(), [2, 1, 3, 0], 3)
def test_resnet_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(
x, weight2, channels=32, kernel_size=(1, 1), data_layout="NHWC", kernel_layout="HWIO"
)
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y, layout="NHWC")
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1))
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_scalar_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.add(y, relay.const(1, "float32"))
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.add(y, relay.const(1.0, "float32"))
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_bn_convert_layout():
""" Check that layout transforms are propagated through bn. """
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((64,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((64,), dtype))
y = relay.nn.batch_norm(y, gamma, beta, moving_mean, moving_var, axis=3)
y = relay.nn.relu(y[0])
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((64,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((64,), dtype))
y = relay.nn.batch_norm(y, gamma, beta, moving_mean, moving_var, axis=1)
y = relay.nn.relu(y[0])
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_requantize_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=1,
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_concat_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.concatenate(
[y, y1],
[relay.const(1, "float32"), relay.const(1, "float32")],
[relay.const(1, "int32"), relay.const(1, "int32")],
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=3,
)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.qnn.op.conv2d(
y,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.concatenate(
[y, y1],
[relay.const(1, "float32"), relay.const(1, "float32")],
[relay.const(1, "int32"), relay.const(1, "int32")],
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=1,
)
ret = relay.layout_transform(ret, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_add_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.add(
y,
y1,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.qnn.op.conv2d(
y,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.add(
y,
y1,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
)
ret = relay.layout_transform(ret, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_nhwc_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
weight = relay.var("weight", shape=(64, 64, 3, 3), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
weight = relay.var("weight", shape=(64, 64, 3, 3), dtype="int8")
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_convert_kernel_layout():
""" Check that convolution kernel layout is correctly transformed. """
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
w = relay.layout_transform(w, "HWIO", "OHWI")
y = relay.nn.conv2d(
x,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="OHWI",
)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "OHWI"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_roi_align_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NHWC"
)
ret = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
"vision.roi_align": ["NHWC", "default"],
}
a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_strided_slice_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.strided_slice(y, begin=[0, 1], end=[1, -1, 10], strides=[1, 1, 2, 1])
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.strided_slice(y, begin=[0, 0, 0, 1], end=[1, 10, 56, -1], strides=[1, 2, 1, 1])
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_roi_pool_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_pool(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_pool(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, layout="NHWC"
)
ret = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
"vision.roi_pool": ["NHWC", "default"],
}
a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_default_keyword():
""" Check that the default keyword selects correct TVM default layout. """
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 3, 3, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
w = relay.var("weight", shape=(64, 3, 3, 64))
w = relay.layout_transform(w, "OHWI", "OIHW")
y = relay.nn.conv2d(
x,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_different_ops_convert_layout():
"""Check convert layout correctly supports converting the layout of
different ops in the same graph.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 3, 3, 64))
weight2 = relay.var("weight2", shape=(64, 3, 3, 64), dtype="int8")
weight3 = relay.var("weight3", shape=(64, 3, 3, 64))
out = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
out = relay.cast(out, "int8")
out = relay.qnn.op.conv2d(
out,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
out = relay.cast(out, "float32")
out = relay.nn.conv2d_transpose(
out,
weight3,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
out = relay.Function(analysis.free_vars(out), out)
return out
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 3, 3, 64))
weight2 = relay.var("weight2", shape=(64, 3, 3, 64), dtype="int8")
weight3 = relay.var("weight3", shape=(64, 3, 3, 64))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OHWI", "HWIO")
out = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
out = relay.cast(out, "int8")
out = relay.layout_transform(out, "NHWC", "NCHW")
weight2 = relay.layout_transform(weight2, "OHWI", "OIHW")
out = relay.qnn.op.conv2d(
out,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
out = relay.cast(out, "float32")
out = relay.layout_transform(out, "NCHW", "NHWC")
weight3 = relay.layout_transform(weight3, "OHWI", "HWIO")
out = relay.nn.conv2d_transpose(
out,
weight3,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
out = relay.layout_transform(out, "NHWC", "NCHW")
out = relay.Function(analysis.free_vars(out), out)
return out
a = before()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
"qnn.conv2d": ["NCHW", "OIHW"],
"nn.conv2d_transpose": ["NHWC", "HWIO"],
}
a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_no_desired_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.layout_transform(y, "NHWC", "NCHW")
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "HWIO"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_convert_with_config():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
y2 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y2 = relay.nn.relu(y2)
out = relay.Function([x, weight, weight2], y2)
return out
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
weight2 = relay.layout_transform(weight2, "HWIO", "HWOI")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "HWNC")
y2 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="HWNC",
kernel_layout="HWOI",
)
y2 = relay.nn.relu(y2)
y2 = relay.layout_transform(y2, "HWNC", "NHWC")
output = relay.Function(relay.analysis.free_vars(y2), y2)
return output
a = before()
layout_config = relay.transform.LayoutConfig(skip_layers=[0])
with layout_config:
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["HWNC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
if __name__ == "__main__":
test_qnn_binary_no_convert_layout()
test_no_convert_layout()
test_conv_convert_layout()
test_conv_nhwc_convert_layout()
test_conv_bias_pool_convert_layout()
test_conv_concat_convert_layout()
test_dual_path_convert_layout()
test_bn_convert_layout()
test_slice_like_convert_layout()
test_transpose_convert_layout()
test_resnet_convert_layout()
test_scalar_convert_layout()
test_conv_bn_convert_layout()
test_qnn_conv_requantize_convert_layout()
test_qnn_conv_concat_convert_layout()
test_qnn_conv_add_convert_layout()
test_qnn_conv_nhwc_convert_layout()
test_conv_convert_kernel_layout()
test_conv_transpose_convert_layout()
test_conv_roi_align_convert_layout()
test_conv_roi_pool_convert_layout()
test_conv_strided_slice_convert_layout()
test_deformable_conv_bias_pool_convert_layout()
test_default_keyword()
test_different_ops_convert_layout()
test_no_desired_layout()
test_convert_with_config()
| apache-2.0 | 7,701,969,506,247,526,000 | 33.609079 | 98 | 0.515613 | false | 3.256987 | true | false | false |
mufaddalq/cloudstack-datera-driver | test/integration/smoke/test_scale_vm.py | 1 | 7802 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Scaling up Vm
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from nose.plugins.attrib import attr
_multiprocess_shared_ = True
class Services:
"""Test VM Life Cycle Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "password",
},
"small":
# Create a small virtual machine instance with disk offering
{
"displayname": "testserver",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"service_offerings":
{
"small":
{
# Small service offering ID to for change VM
# service offering from medium to small
"name": "SmallInstance",
"displaytext": "SmallInstance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 256,
},
"big":
{
# Big service offering ID to for change VM
"name": "BigInstance",
"displaytext": "BigInstance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 512,
}
},
#Change this
"template": {
"displaytext": "xs",
"name": "xs",
"passwordenabled": False,
},
"sleep": 60,
"timeout": 10,
#Migrate VM to hostid
"ostype": 'CentOS 5.3 (64-bit)',
# CentOS 5.3 (64-bit)
}
class TestScaleVm(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestScaleVm, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
domain = get_domain(cls.api_client, cls.services)
zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = zone.networktype
template = get_template(
cls.api_client,
zone.id,
cls.services["ostype"]
)
# Set Zones and disk offerings ??
cls.services["small"]["zoneid"] = zone.id
cls.services["small"]["template"] = template.id
# Create account, service offerings, vm.
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["small"]
)
cls.big_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["big"]
)
#create a virtual machine
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["small"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.small_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.small_offering,
cls.account
]
@classmethod
def tearDownClass(cls):
cls.api_client = super(TestScaleVm, cls).getClsTestClient().getApiClient()
cleanup_resources(cls.api_client, cls._cleanup)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
#Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
return
@attr(hypervisor="xenserver")
@attr(tags=["advanced", "basic"])
def test_01_scale_vm(self):
"""Test scale virtual machine
"""
# Validate the following
# Scale up the vm and see if it scales to the new svc offering and is finally in running state
self.debug("Scaling VM-ID: %s to service offering: %s and state %s" % (
self.virtual_machine.id,
self.big_offering.id,
self.virtual_machine.state
))
cmd = scaleVirtualMachine.scaleVirtualMachineCmd()
cmd.serviceofferingid = self.big_offering.id
cmd.id = self.virtual_machine.id
self.apiclient.scaleVirtualMachine(cmd)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_vm_response,
None,
"Check virtual machine is listVirtualMachines"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.id,
self.virtual_machine.id,
"Check virtual machine ID of scaled VM"
)
# VirtualMachine should be updated to tell cloudstack it has PV tools
# available and successfully scaled. We will only mock that behaviour
# here but it is not expected in production since the VM scaling is not
# guaranteed until tools are installed, vm rebooted
self.virtual_machine.update(self.apiclient, isdynamicallyscalable='true')
self.debug("Scaling VM-ID: %s from service offering: %s to new service offering %s and the response says %s" % (
self.virtual_machine.id,
self.virtual_machine.serviceofferingid,
self.big_offering.id,
vm_response.serviceofferingid
))
self.assertEqual(
vm_response.serviceofferingid,
self.big_offering.id,
"Check service offering of the VM"
)
self.assertEqual(
vm_response.state,
'Running',
"Check the state of VM"
)
return
| apache-2.0 | 5,259,124,030,613,860,000 | 33.522124 | 120 | 0.54345 | false | 4.589412 | true | false | false |
beingmeta/mongo-c-driver | build/generate-opts.py | 1 | 15865 | #!/usr/bin/env python
#
# Copyright 2017-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IDL for functions that take flexible options as a bson_t.
Defines the options accepted by functions that receive a const bson_t *opts,
for example mongoc_collection_find_with_opts, mongoc_collection_insert_one,
and many others.
Generates struct types, options parsing code, and RST documentation.
Written for Python 2.6+, requires Jinja 2 for templating.
"""
from collections import OrderedDict
from os.path import basename, dirname, join as joinpath, normpath
import re
from jinja2 import Environment, FileSystemLoader # Please "pip install jinja2".
this_dir = dirname(__file__)
template_dir = joinpath(this_dir, 'opts_templates')
src_dir = normpath(joinpath(this_dir, '../src/libmongoc/src/mongoc'))
doc_includes = normpath(joinpath(this_dir, '../src/libmongoc/doc/includes'))
def flatten(items):
for item in items:
if isinstance(item, list):
# "yield from".
for subitem in flatten(item):
yield subitem
else:
yield item
class Struct(OrderedDict):
def __init__(self, items, opts_name='opts', generate_rst=True,
generate_code=True, allow_extra=True, **defaults):
"""Define an options struct.
- items: List of pairs: (optionName, info)
- opts_name: Name of the const bson_t *opts parameter
- allow_extra: Whether to allow unrecognized options
- defaults: Initial values for options
"""
OrderedDict.__init__(self, list(flatten(items)))
self.is_shared = False
self.opts_name = opts_name
self.generate_rst = generate_rst
self.generate_code = generate_code
self.allow_extra = allow_extra
self.defaults = defaults
def default(self, item, fallback):
return self.defaults.get(item, fallback)
class Shared(Struct):
def __init__(self, items, **defaults):
"""Define a struct that is shared by others."""
super(Shared, self).__init__(items, **defaults)
self.is_shared = True
self.generate_rst = False
read_concern_help = 'Construct a :symbol:`mongoc_read_concern_t` and use :symbol:`mongoc_read_concern_append` to add the read concern to ``opts``. See the example code for :symbol:`mongoc_client_read_command_with_opts`. Read concern requires MongoDB 3.2 or later, otherwise an error is returned.'
read_concern_document_option = ('readConcern', {
'type': 'document',
'help': read_concern_help
})
read_concern_option = ('readConcern', {
'type': 'mongoc_read_concern_t *',
'help': read_concern_help,
'convert': '_mongoc_convert_read_concern'
})
write_concern_option = [
('writeConcern', {
'type': 'mongoc_write_concern_t *',
'convert': '_mongoc_convert_write_concern',
'help': 'Construct a :symbol:`mongoc_write_concern_t` and use :symbol:`mongoc_write_concern_append` to add the write concern to ``opts``. See the example code for :symbol:`mongoc_client_write_command_with_opts`.'
}),
('write_concern_owned', {
'type': 'bool',
'internal': True,
})
]
session_option = ('sessionId', {
'type': 'mongoc_client_session_t *',
'convert': '_mongoc_convert_session_id',
'field': 'client_session',
'help': 'First, construct a :symbol:`mongoc_client_session_t` with :symbol:`mongoc_client_start_session`. You can begin a transaction with :symbol:`mongoc_client_session_start_transaction`, optionally with a :symbol:`mongoc_transaction_opt_t` that overrides the options inherited from |opts-source|, and use :symbol:`mongoc_client_session_append` to add the session to ``opts``. See the example code for :symbol:`mongoc_client_session_t`.'
})
ordered_option = ('ordered', {
'type': 'bool',
'help': 'set to ``false`` to attempt to insert all documents, continuing after errors.'
})
validate_option = ('validate', {
'type': 'bson_validate_flags_t',
'convert': '_mongoc_convert_validate_flags',
'help': 'Construct a bitwise-or of all desired :symbol:`bson_validate_flags_t <bson_validate_with_error>`. Set to ``false`` to skip client-side validation of the provided BSON documents.'
})
collation_option = ('collation', {
'type': 'document',
'help': 'Configure textual comparisons. See :ref:`Setting Collation Order <setting_collation_order>`, and `the MongoDB Manual entry on Collation <https://docs.mongodb.com/manual/reference/collation/>`_. Collation requires MongoDB 3.2 or later, otherwise an error is returned.'
})
array_filters_option = ('arrayFilters', {
'type': 'array',
'help': 'An array of filters specifying to which array elements an update should apply.',
})
upsert_option = ('upsert', {
'type': 'bool',
'help': 'When true, creates a new document if no document matches the query.'
})
bypass_option = ('bypassDocumentValidation', {
'type': 'bool',
'field': 'bypass',
'help': 'Set to ``true`` to skip server-side schema validation of the provided BSON documents.'
})
server_option = ('serverId', {
'type': 'uint32_t',
'convert': '_mongoc_convert_server_id',
'help': 'To target a specific server, include an int32 "serverId" field. Obtain the id by calling :symbol:`mongoc_client_select_server`, then :symbol:`mongoc_server_description_id` on its return value.'
})
hint_option = ('hint', {
'type': 'bson_value_t',
'convert': '_mongoc_convert_hint',
'help': 'A document or string that specifies the index to use to support the query predicate.'
})
opts_structs = OrderedDict([
('mongoc_crud_opts_t', Shared([
write_concern_option,
session_option,
validate_option,
])),
('mongoc_update_opts_t', Shared([
('crud', {'type': 'mongoc_crud_opts_t'}),
bypass_option,
collation_option,
hint_option,
upsert_option,
])),
('mongoc_insert_one_opts_t', Struct([
('crud', {'type': 'mongoc_crud_opts_t'}),
bypass_option
], validate='_mongoc_default_insert_vflags')),
('mongoc_insert_many_opts_t', Struct([
('crud', {'type': 'mongoc_crud_opts_t'}),
ordered_option,
bypass_option,
], validate='_mongoc_default_insert_vflags', ordered='true')),
('mongoc_delete_one_opts_t', Struct([
('crud', {'type': 'mongoc_crud_opts_t'}),
collation_option,
])),
('mongoc_delete_many_opts_t', Struct([
('crud', {'type': 'mongoc_crud_opts_t'}),
collation_option,
])),
('mongoc_update_one_opts_t', Struct([
('update', {'type': 'mongoc_update_opts_t'}),
array_filters_option,
], validate='_mongoc_default_update_vflags')),
('mongoc_update_many_opts_t', Struct([
('update', {'type': 'mongoc_update_opts_t'}),
array_filters_option,
], validate='_mongoc_default_update_vflags')),
('mongoc_replace_one_opts_t', Struct([
('update', {'type': 'mongoc_update_opts_t'}),
], validate='_mongoc_default_replace_vflags')),
('mongoc_bulk_opts_t', Struct([
write_concern_option,
ordered_option,
session_option,
], allow_extra=False, ordered='true')),
('mongoc_bulk_insert_opts_t', Struct([
validate_option,
], validate='_mongoc_default_insert_vflags', allow_extra=False)),
('mongoc_bulk_update_opts_t', Shared([
validate_option,
collation_option,
hint_option,
('upsert', {
'type': 'bool',
'help': 'If true, insert a document if none match ``selector``.'
}),
('multi', {'type': 'bool', 'hidden': True})
])),
('mongoc_bulk_update_one_opts_t', Struct(
[
('update', {'type': 'mongoc_bulk_update_opts_t'}),
array_filters_option,
],
multi='false',
validate='_mongoc_default_update_vflags',
allow_extra=False)),
('mongoc_bulk_update_many_opts_t', Struct(
[
('update', {'type': 'mongoc_bulk_update_opts_t'}),
array_filters_option,
],
multi='true',
validate='_mongoc_default_update_vflags',
allow_extra=False)),
('mongoc_bulk_replace_one_opts_t', Struct(
[('update', {'type': 'mongoc_bulk_update_opts_t'})],
multi='false',
validate='_mongoc_default_replace_vflags',
allow_extra=False)),
('mongoc_bulk_remove_opts_t', Shared([
collation_option,
('limit', {'type': 'int32_t', 'hidden': True})
])),
('mongoc_bulk_remove_one_opts_t', Struct([
('remove', {'type': 'mongoc_bulk_remove_opts_t'}),
], limit=1, allow_extra=False)),
('mongoc_bulk_remove_many_opts_t', Struct([
('remove', {'type': 'mongoc_bulk_remove_opts_t'}),
], limit=0, allow_extra=False)),
('mongoc_change_stream_opts_t', Struct([
('batchSize', {'type': 'int32_t', 'help': 'An ``int32`` representing number of documents requested to be returned on each call to :symbol:`mongoc_change_stream_next`'}),
('resumeAfter', {'type': 'document', 'help': 'A ``Document`` representing the logical starting point of the change stream. The ``_id`` field of any change received from a change stream can be used here. This option is mutually exclusive with ``startAfter`` and ``startAtOperationTime``.'}),
('startAfter', {'type': 'document', 'help': 'A ``Document`` representing the logical starting point of the change stream. Unlike ``resumeAfter``, this can resume notifications after an "invalidate" event. The ``_id`` field of any change received from a change stream can be used here. This option is mutually exclusive with ``resumeAfter`` and ``startAtOperationTime``.'}),
('startAtOperationTime', {'type': 'timestamp', 'help': 'A ``Timestamp``. The change stream only provides changes that occurred at or after the specified timestamp. Any command run against the server will return an operation time that can be used here. This option is mutually exclusive with ``resumeAfter`` and ``startAfter``.'}),
('maxAwaitTimeMS', {'type': 'int64_t', 'convert': '_mongoc_convert_int64_positive', 'help': 'An ``int64`` representing the maximum amount of time a call to :symbol:`mongoc_change_stream_next` will block waiting for data'}),
('fullDocument', {'type': 'utf8', 'help': 'A UTF-8 string. Set this option to "updateLookup" to direct the change stream cursor to lookup the most current majority-committed version of the document associated to an update change stream event.'}),
], fullDocument="default")),
('mongoc_create_index_opts_t', Struct([
write_concern_option,
session_option,
], opts_name='command_opts')),
('mongoc_read_write_opts_t', Struct([
read_concern_document_option,
write_concern_option,
session_option,
collation_option,
server_option,
])),
# Only for documentation - we use mongoc_read_write_opts_t for real parsing.
('mongoc_read_opts_t', Struct([
read_concern_document_option,
session_option,
collation_option,
server_option,
], generate_code=False)),
('mongoc_write_opts_t', Struct([
write_concern_option,
session_option,
collation_option,
server_option,
], generate_code=False)),
('mongoc_gridfs_bucket_opts_t', Struct([
('bucketName', {'type': 'utf8', 'help': 'A UTF-8 string used as the prefix to the GridFS "chunks" and "files" collections. Defaults to "fs". The bucket name, together with the database and suffix collections must not exceed 120 characters. See the manual for `the max namespace length <https://docs.mongodb.com/manual/reference/limits/#Namespace-Length>`_.'}),
('chunkSizeBytes', {'type': 'int32_t', 'convert': '_mongoc_convert_int32_positive', 'help': 'An ``int32`` representing the chunk size. Defaults to 255KB.'}),
write_concern_option,
read_concern_option
], bucketName="fs", chunkSizeBytes=(255 * 1024))),
('mongoc_gridfs_bucket_upload_opts_t', Struct([
('chunkSizeBytes', {'type': 'int32_t', 'convert': '_mongoc_convert_int32_positive', 'help': 'An ``int32`` chunk size to use for this file. Overrides the ``chunkSizeBytes`` set on ``bucket``.'}),
('metadata', {'type': 'document', 'help': 'A :symbol:`bson_t` representing metadata to include with the file.'})
])),
('mongoc_aggregate_opts_t', Struct([
read_concern_option,
write_concern_option,
session_option,
bypass_option,
collation_option,
server_option,
('batchSize', {'type': 'int32_t', 'help': 'An ``int32`` representing number of documents requested to be returned on each call to :symbol:`mongoc_cursor_next`', 'check_set': True})
]))
])
header_comment = """/**************************************************
*
* Generated by build/%s.
*
* DO NOT EDIT THIS FILE.
*
*************************************************/
/* clang-format off */""" % basename(__file__)
def paths(struct):
"""Sequence of path, option name, option info."""
for option_name, info in struct.items():
the_type = info['type']
the_field = info.get('field', option_name)
if the_type in opts_structs:
# E.g., the type is mongoc_crud_opts_t. Recurse.
sub_struct = opts_structs[the_type]
for path, sub_option_name, sub_info in paths(sub_struct):
yield ('%s.%s' % (the_field, path),
sub_option_name,
sub_info)
else:
yield the_field, option_name, info
def path_to(the_type, the_field):
"""Like "mongoc_update_one_opts->update.crud.write_concern_owned"."""
for path, name, info in paths(opts_structs[the_type]):
if name == the_field:
return path
raise ValueError(
"No field '%s' in '%s'" % (the_field, the_type))
env = Environment(loader=FileSystemLoader(template_dir),
trim_blocks=True,
extensions=['jinja2.ext.loopcontrols'])
files = ["mongoc-opts-private.h", "mongoc-opts.c"]
for file_name in files:
print(file_name)
with open(joinpath(src_dir, file_name), 'w+') as f:
t = env.get_template(file_name + ".template")
f.write(t.render(globals()))
f.write('\n')
def document_opts(struct, f):
for option_name, info in struct.items():
if info.get('internal') or info.get('hidden'):
continue
the_type = info['type']
if the_type in opts_structs:
# E.g., the type is mongoc_crud_opts_t. Recurse.
document_opts(opts_structs[the_type], f)
continue
assert 'help' in info, "No 'help' for '%s'" % option_name
f.write("* ``{option_name}``: {info[help]}\n".format(**locals()))
for struct_name, struct in opts_structs.items():
if not struct.generate_rst:
continue
name = re.sub(r'mongoc_(\w+)_t', r'\1', struct_name).replace('_', '-')
file_name = name + '.txt'
print(file_name)
f = open(joinpath(doc_includes, file_name), 'w')
f.write(
"``%s`` may be NULL or a BSON document with additional"
" command options:\n\n" % struct.opts_name)
document_opts(struct, f)
f.close()
| apache-2.0 | 2,346,167,994,309,864,400 | 38.6625 | 443 | 0.625969 | false | 3.640431 | false | false | false |
wadevries/froggle | froggle.py | 1 | 5491 | #!/usr/bin/env python
import argparse
from collections import defaultdict
import datetime
import json
import os
from freckle_client.client import FreckleClientV2
from api_client import TogglClientApi
FRECKLE_PROJECTS = None
def get_freckle_projects():
global FRECKLE_PROJECTS
if FRECKLE_PROJECTS is None:
FRECKLE_PROJECTS = [(p['id'], p['name']) for p in freckle.fetch_json('projects')]
return FRECKLE_PROJECTS
def prompt_project_mapping(project_id, project_name):
# Fetch all Freckle projects
freckle_projects = get_freckle_projects()
print 'Select Project in Freckle which corresponds to \'{} ({})\' from Toggl'.format(project_name, project_id)
print
for i, (id_, name) in enumerate(freckle_projects, 1):
print "{:2} {}".format(i, name)
print
print ' 0: - Skip this project -'
print
selected = raw_input('>> ')
if selected == '0':
return None
print "Selected '{}'".format(freckle_projects[int(selected)-1][1])
return freckle_projects[int(selected)-1][0]
def create_freckle_entry(date, project_id, description, minutes):
data = {
'date': date,
'project_id': project_id,
'description': u'#toggl {}'.format(description),
'minutes': minutes,
}
return freckle.fetch_json('entries', 'POST', post_args=data)
def run(start_date, end_date):
collected_entries = defaultdict(int)
# 1. Fetch all time entries from Toggl
time_entries = toggl.query('/time_entries', {'start_date': start_date.isoformat()+'+00:00',
'end_date': end_date.isoformat()+'+00:00'})
if time_entries.status_code != 200:
print time_entries.content
print time_entries.url
return
for entry in time_entries.json():
# Projectless entries are skipped
if 'pid' not in entry:
continue
# Determine target project
if str(entry['pid']) not in PROJECT_MAP:
# Fetch project info
project_info = toggl.query('/projects/{}'.format(entry['pid'])).json()['data']
project_id, project_name = project_info['id'], project_info['name']
freckle_project_id = prompt_project_mapping(project_id, project_name)
PROJECT_MAP[str(project_id)] = freckle_project_id
if PROJECT_MAP[str(entry['pid'])] is None:
continue
# Construct request to send to Freckle:
collected_entries[(
entry['start'].split('T')[0],
PROJECT_MAP[str(entry['pid'])],
entry.get('description', ''))
] += entry['duration']
# Create the "toggl" tag
print "Creating toggl tag: {}".format(freckle.fetch_json('tags', 'POST', post_args={'names': ['toggl']}))
# 5. Create time entries in Freckle
print "Creating Freckle entries:"
for ((date, project_id, description), seconds) in sorted(collected_entries.items()):
minutes = seconds / 60
response = create_freckle_entry(date, project_id, description, minutes)
print u"{date} {project[name]:30} {minutes:-3} {description}".format(**response)
def valid_date(s):
try:
return datetime.datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def load_config():
filename = os.path.expanduser('~/.froggle')
if os.path.exists(filename):
print "Loading tokens from config"
with open(filename, 'r') as f:
return json.load(f)
return {}
def save_config(config):
filename = os.path.expanduser('~/.froggle')
with open(filename, 'w') as f:
return json.dump(config, f, indent=4)
def start_of_today():
return datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Copy time entries from Toggl to Freckle')
parser.add_argument('--start-date', type=valid_date, default=start_of_today() - datetime.timedelta(days=1, microseconds=1))
a = parser.add_argument('--end-date', type=valid_date, default=start_of_today() - datetime.timedelta(microseconds=1),
required=False)
freckle_token_arg = parser.add_argument('--freckle-token')
toggl_token_arg = parser.add_argument('--toggl-token')
options = parser.parse_args()
config = load_config() if not options.freckle_token or not options.toggl_token else {}
if (not config or not config.get('freckle_token')) and not options.freckle_token:
raise argparse.ArgumentError(freckle_token_arg, "No Freckle token provided")
if options.freckle_token:
config['freckle_token'] = options.freckle_token
if (not config or not config.get('toggl_token')) and not options.toggl_token:
raise argparse.ArgumentError(toggl_token_arg, "No Toggl token provided")
if options.toggl_token:
config['toggl_token'] = options.toggl_token
global freckle, toggl, PROJECT_MAP
toggl = TogglClientApi({'token': config['toggl_token'], 'user-agent': 'Froggle'})
freckle = FreckleClientV2(config['freckle_token'])
PROJECT_MAP = config.get('project_map', {})
if options.end_date < options.start_date:
raise argparse.ArgumentError(a, "Start date should not come after end date")
run(options.start_date, options.end_date)
config['project_map'] = PROJECT_MAP
save_config(config)
| mit | 8,678,679,941,755,807,000 | 33.10559 | 127 | 0.637771 | false | 3.53119 | true | false | false |
bgossele/geminicassandra | geminicassandra/sql_utils.py | 1 | 2703 | """
these are utilities to parse and transform SQL statements
"""
import re
import sys
def get_select_cols_and_rest(query):
"""
Separate the a list of selected columns from
the rest of the query
Returns:
1. a list of the selected columns
2. a string of the rest of the query after the SELECT
"""
from_loc = query.lower().find("from")
raw_select_clause = query[0:from_loc].rstrip()
rest_of_query = query[from_loc:len(query)]
# remove the SELECT keyword from the query
select_pattern = re.compile("select", re.IGNORECASE)
raw_select_clause = select_pattern.sub('', raw_select_clause)
# now create and iterate through a list of of the SELECT'ed columns
selected_columns = raw_select_clause.split(',')
selected_columns = [c.strip() for c in selected_columns]
return selected_columns, rest_of_query
def get_query_parts(query):
"""
Extract the where clause of this CQL query.
"""
select_loc = query.lower().find('select')
from_loc = query.lower().find('from')
if from_loc == -1:
sys.exit("ERROR: query must contain FROM <table>")
from_end = len(query)
where_loc = query.lower().find("where")
if where_loc > -1:
from_end = where_loc
where_end = len(query)
for keyword in ["order by", "limit", "allow_filtering"]:
stop = query.find(keyword)
if stop > -1:
from_end = min(stop, from_end)
where_end = min(stop, where_end)
where_clause = ""
rest = ""
from_table = query[from_loc + 4: from_end].strip()
select_clause = query[select_loc:from_loc]
# remove the SELECT keyword from the query
select_pattern = re.compile("select", re.IGNORECASE)
select_clause = select_pattern.sub('', select_clause)
# now create and iterate through a list of of the SELECT'ed columns
selected_columns = select_clause.split(',')
selected_columns = [c.strip() for c in selected_columns]
if where_loc > -1:
where_clause = query[where_loc + 5: where_end].strip()
if where_end < len(query):
rest = query[where_end:].strip()
return selected_columns, from_table, where_clause, rest
def ensure_columns(query, cols):
"""
if a query is missing any of these list of columns, add them
and return the new query string
"""
sel_cols, rest = get_select_cols_and_rest(query)
sel_cols = [x.lower() for x in sel_cols]
for c in cols:
c = c.lower()
if c not in sel_cols:
sel_cols += [c]
sel_string = ", ".join(sel_cols)
return "select {sel_string} {rest}".format(**locals())
| mit | 36,348,952,126,758,340 | 28.380435 | 71 | 0.613393 | false | 3.682561 | false | false | false |
springmeyer/djmapnik | djmapnik/utils.py | 1 | 2379 | # std lib
import os
import tempfile
import platform
from subprocess import Popen, PIPE
# mapnik
import mapnik
def call(cmd,fail=False):
try:
response = Popen(cmd.split(' '),stdin=PIPE, stdout=PIPE, stderr=PIPE)
cm = response.communicate()
return cm[0]
except Exception, e:
if fail:
raise SystemExit(e)
else:
return None
def open_image(filename, app=None):
if os.name == 'nt':
if app:
raise SystemExit('Overriding default image viewer not supported on Win32')
call('start %s' % filename.replace('/','\\'))
elif platform.uname()[0] == 'Linux':
if app:
call('%s %s' % (app, filename))
else:
try:
cmd = 'xdg-open %s' % self.image
Popen(cmd.split(' '))
except OSError:
try:
cmd = 'gthumb %s' % self.image
Popen(cmd.split(' '))
except OSError:
cmd = 'display %s' % self.image
Popen(cmd.split(' '))
elif platform.uname()[0] == 'Darwin':
if app:
call('open %s -a %s' % (filename, app))
else:
call('open %s' % filename)
def get_default_style(geometry_type):
""" Ultra simple default style for quick setup or debugging.
"""
style, rule = mapnik.Style(), mapnik.Rule()
gtype = geometry_type.lower()
if 'poly' in gtype:
rule.symbols.append(mapnik.PolygonSymbolizer(mapnik.Color('steelblue')))
rule.symbols.append(mapnik.LineSymbolizer(mapnik.Color('steelblue'),.5))
elif 'line' in gtype:
rule.symbols.append(mapnik.LineSymbolizer(mapnik.Color('steelblue'),1.5))
else:
point = mapnik.PointSymbolizer()
point.allow_overlap = True
rule.symbols.append(point)
style.rules.append(rule)
return style
def show(lyr,sty,width=400,height=300,filename=None,app=None):
m = mapnik.Map(width,height,lyr.srs)
m.background = mapnik.Color('transparent')
lyr.styles.append('style')
m.append_style('style',sty)
m.layers.append(lyr)
m.zoom_all()
if not filename:
(handle, filename) = tempfile.mkstemp('.png', 'django-map-')
os.close(handle)
mapnik.render_to_file(m,str(filename))
open_image(str(filename))
return m | bsd-3-clause | -4,765,666,503,474,054,000 | 30.733333 | 86 | 0.570828 | false | 3.626524 | false | false | false |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_visum/models/visum_functions.py | 2 | 1174 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import win32com.client as com
def load_version_file(visum_dir, version_filename, visum_version_number=10):
#Start up Visum COM server - requires win32com library
try:
Visum = com.Dispatch("visum.visum." + str(visum_version_number)) #latest version of VISUM registered as COM server
except Exception:
error_msg = "Starting Visum COM Server Failed"
raise StandardError(error_msg)
#Set directories
try:
Visum.SetPath(2, visum_dir) #version file
Visum.SetPath(3, visum_dir) #od matrix file
Visum.SetPath(4, visum_dir) #skim matrix file
Visum.SetPath(12,visum_dir) #procedure file
except Exception:
error_msg = "Setting Visum Directories failed"
raise StandardError(error_msg)
#Load version file
try:
Visum.LoadVersion(version_filename)
except Exception:
error_msg = "Loading Visum version file failed"
raise StandardError(error_msg)
#Return Visum object
return Visum
| gpl-2.0 | -5,944,514,526,650,683,000 | 32.529412 | 122 | 0.660988 | false | 3.612308 | false | false | false |
opennode/opennode-knot | opennode/knot/model/console.py | 2 | 6957 | from __future__ import absolute_import
import subprocess
import time
from grokcore.component import context, baseclass
from twisted.internet import defer
from zope import schema
from zope.component import provideSubscriptionAdapter
from zope.interface import Interface, implements
from opennode.oms.model.model.actions import ActionsContainerExtension, Action, action
from opennode.oms.model.model.base import Container, ReadonlyContainer
from opennode.oms.endpoint.ssh.terminal import RESET_COLOR
from opennode.oms.endpoint.webterm.ssh import ssh_connect_interactive_shell
from opennode.oms.security.directives import permissions
from opennode.oms.zodb import db
class IConsole(Interface):
"""Console node."""
class ITextualConsole(Interface):
"""Textual console."""
class IGraphicalConsole(Interface):
"""Graphical console."""
class ITtyConsole(IConsole):
pty = schema.TextLine(title=u"pty")
class ISshConsole(IConsole):
user = schema.TextLine(title=u"user")
hostname = schema.TextLine(title=u"hostname")
port = schema.Int(title=u"port")
class IOpenVzConsole(IConsole):
cid = schema.Int(title=u"cid")
class IVncConsole(IConsole):
hostname = schema.TextLine(title=u"hostname")
port = schema.Int(title=u"port")
ws_url = schema.Int(title=u"ws_url", required=False, readonly=True)
class TtyConsole(ReadonlyContainer):
implements(ITtyConsole, ITextualConsole)
class TtyConsole(ReadonlyContainer):
implements(ITtyConsole, ITextualConsole)
permissions(dict(pty=('read', 'modify')))
def __init__(self, name, pty):
self.inherit_permissions = True
self.__name__ = name
self.pty = pty
class SshConsole(ReadonlyContainer):
implements(ISshConsole, ITextualConsole)
permissions(dict(user=('read', 'modify'),
hostname=('read', 'modify'),
port=('read', 'modify'),
))
def __init__(self, name, user, hostname, port):
self.inherit_permissions = True
self.__name__ = name
self.user = user
self.hostname = hostname
self.port = port
class OpenVzConsole(ReadonlyContainer):
implements(IOpenVzConsole, ITextualConsole)
permissions(dict(cid=('read', 'modify')))
def __init__(self, name, cid):
self.inherit_permissions = True
self.__name__ = name
self.cid = cid
class VncConsole(ReadonlyContainer):
implements(IVncConsole, IGraphicalConsole)
permissions(dict(hostname=('read', 'modify'),
port=('read', 'modify'),
))
proxy_processes = {}
def __init__(self, hostname, port):
self.inherit_permissions = True
self.__name__ = 'vnc'
self.hostname = hostname
self.port = port
self._ensure_proxy()
def _ensure_proxy(self):
if self.hostname in self.proxy_processes:
# check if the proxy process has matching vnc port
# otherwise kills it
if self.proxy_processes[self.hostname].port != self.port:
self.proxy_processes[self.hostname].kill()
del self.proxy_processes[self.hostname]
if self.hostname not in self.proxy_processes:
self.proxy_processes[self.hostname] = VncProxyProcess(self.hostname, self.port)
@property
def ws_url(self):
self._ensure_proxy()
proxy_port = self.proxy_processes[self.hostname].proxy_port
return 'ws://%s:%s/' % (self.hostname, proxy_port)
class VncProxyProcess(object):
def __init__(self, hostname, port):
self.port = port
self.proxy_port = port + 1000
self.process = subprocess.Popen(['bin/wsproxy', str(self.proxy_port),
'%s:%s' % (hostname, self.port)])
def kill(self):
self.process.terminate()
time.sleep(0.5)
self.process.kill()
class Consoles(Container):
__name__ = 'consoles'
__contains__ = IConsole
inherit_permissions = True
class AttachAction(Action):
"""Attach to textual console"""
baseclass()
action('attach')
@defer.inlineCallbacks
def execute(self, cmd, args):
self.closed = False
self.protocol = cmd.protocol
self.transport = self
size = (cmd.protocol.width, cmd.protocol.height)
yield self._do_connection(size)
self.deferred = defer.Deferred()
yield self.deferred
def write(self, data):
if not self.closed:
self.protocol.terminal.write(data)
def loseConnection(self):
self.closed = True
self.protocol.terminal.resetPrivateModes('1')
self.protocol.terminal.write(RESET_COLOR)
self.deferred.callback(None)
def _set_channel(self, channel):
loseConnection = self.loseConnection
class SshSubProtocol(object):
def __init__(self, parent):
self.parent = parent
self.buffer = []
def dataReceived(self, data):
for ch in data:
if ch == '\x1d':
# TODO: really close the ssh connection
loseConnection()
channel.write(data)
self.protocol.sub_protocol = SshSubProtocol(self.protocol)
class SshAttachAction(AttachAction):
context(ISshConsole)
@db.ro_transact
def _do_connection(self, size):
self.write("Attaching to %s@%s. Use ^] to force exit.\n" % (self.context.user.encode('utf-8'),
self.context.hostname.encode('utf-8')))
ssh_connect_interactive_shell(self.context.user, self.context.hostname, self.context.port,
self.transport, self._set_channel, size)
class HypervisorSshAttachAction(AttachAction):
"""For consoles that are attached by running a command on the hypervisor host."""
baseclass()
@db.ro_transact
def _do_connection(self, size):
self.write("Attaching to %s. Use ^] to force exit.\n" % self.name)
phy = self.context.__parent__.__parent__.__parent__.__parent__
ssh_connect_interactive_shell('root', phy.hostname, 22, self.transport, self._set_channel,
size, self.command)
class TtyAttachAction(HypervisorSshAttachAction):
context(ITtyConsole)
@property
def command(self):
return 'screen -xRR %s %s' % (self.context.pty.replace('/', ''), self.context.pty)
@property
def name(self):
return self.context.pty.encode('utf-8')
class OpenvzAttachAction(HypervisorSshAttachAction):
context(IOpenVzConsole)
@property
def command(self):
return 'vzctl enter %s' % (self.context.cid)
@property
def name(self):
return self.context.cid
provideSubscriptionAdapter(ActionsContainerExtension, adapts=(IConsole, ))
| gpl-3.0 | -2,150,379,436,596,722,200 | 27.9875 | 107 | 0.628001 | false | 4.000575 | false | false | false |
bartoszj/Mallet | mallet/UIKit/UIScreen.py | 1 | 3798 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2013 Bartosz Janda
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from .. import helpers
from ..common import SummaryBase
from ..Foundation import NSObject
from ..CoreGraphics import CGRect
class UIScreenSyntheticProvider(NSObject.NSObjectSyntheticProvider):
"""
Class representing UIScreen.
"""
def __init__(self, value_obj, internal_dict):
super(UIScreenSyntheticProvider, self).__init__(value_obj, internal_dict)
self.type_name = "UIScreen"
self.register_child_value("bounds", ivar_name="_bounds",
provider_class=CGRect.CGRectSyntheticProvider,
summary_function=self.get_bounds_summary)
self.register_child_value("scale", ivar_name="_scale",
primitive_value_function=SummaryBase.get_float_value,
summary_function=self.get_scale_summary)
self.register_child_value("horizontal_scale", ivar_name="_horizontalScale",
primitive_value_function=SummaryBase.get_float_value,
summary_function=self.get_horizontal_scale_summary)
self.register_child_value("interface_idiom", ivar_name="_userInterfaceIdiom",
primitive_value_function=self.get_interface_idiom_value,
summary_function=self.get_interface_idiom_summary)
@staticmethod
def get_bounds_summary(provider):
return "size=({}, {})".format(SummaryBase.formatted_float(provider.size_provider.width_value),
SummaryBase.formatted_float(provider.size_provider.height_value))
@staticmethod
def get_scale_summary(value):
return "scale={}".format(SummaryBase.formatted_float(value))
@staticmethod
def get_horizontal_scale_summary(value):
return "hScale={:.0f}".format(SummaryBase.formatted_float(value))
@staticmethod
def get_interface_idiom_value(value):
interface_idiom_value = value.GetValueAsSigned()
interface_idiom_name = "Unknown"
if interface_idiom_value == 0:
interface_idiom_name = "Phone"
elif interface_idiom_value == 1:
interface_idiom_name = "Pad"
return interface_idiom_name
@staticmethod
def get_interface_idiom_summary(value):
return "idiom={}".format(value)
def summaries_parts(self):
return [self.bounds_summary, self.scale_summary, self.interface_idiom_summary]
def summary_provider(value_obj, internal_dict):
return helpers.generic_summary_provider(value_obj, internal_dict, UIScreenSyntheticProvider)
| mit | -3,023,414,776,274,798,600 | 44.214286 | 103 | 0.675355 | false | 4.234114 | false | false | false |
madfist/aoc2016 | aoc2016/day21/main.py | 1 | 4813 | import sys
import re
def rotate(i,d,s):
if d == 'right':
i = -i
return s[i:] + s[:i]
def rotate_pos_table(n):
table = [-1]*n
for i in range(n):
j = i + 1
if i >= 4:
j += 1
if j >= n:
j -= n
if i + j >= n:
table[i+j-n] = j
else:
table[i + j] = j
return table
def scramble(start, data):
steps = data.split('\n')
for s in steps:
swap_pos_cmd = re.match(r'swap\ position\ (\d+)\ with\ position\ (\d+)', s)
swap_let_cmd = re.match(r'swap\ letter\ (\S)\ with\ letter\ (\S)', s)
reverse_cmd = re.match(r'reverse\ positions\ (\d+)\ through\ (\d+)', s)
rotate_cmd = re.match(r'rotate\ (.+?)\ (\d+)\ step[s]*', s)
rotate_p_cmd = re.match(r'rotate\ based\ on\ position\ of\ letter\ (\S)', s)
move_pos_cmd = re.match(r'move\ position\ (\d+)\ to\ position\ (\d+)', s)
if swap_pos_cmd is not None:
x = start[int(swap_pos_cmd.group(1))]
y = start[int(swap_pos_cmd.group(2))]
start = re.sub('#', y, re.sub(y, x, re.sub(x, '#', start)))
elif swap_let_cmd is not None:
x = swap_let_cmd.group(1)
y = swap_let_cmd.group(2)
start = re.sub('#', y, re.sub(y, x, re.sub(x, '#', start)))
elif reverse_cmd is not None:
x = int(reverse_cmd.group(1))
y = int(reverse_cmd.group(2))
middle = start[y:x-1:-1]
if x == 0:
middle = start[y:x:-1] + start[x]
start = start[:x] + middle + start[y+1:]
elif rotate_cmd is not None:
x = int(rotate_cmd.group(2))
start = rotate(x, rotate_cmd.group(1), start)
elif rotate_p_cmd is not None:
c = rotate_p_cmd.group(1)
x = start.find(c) + 1
if x >= 5:
x += 1
x = x % len(start)
start = rotate(x, 'right', start)
elif move_pos_cmd is not None:
x = int(move_pos_cmd.group(1))
y = int(move_pos_cmd.group(2))
if x < y:
start = start[:x] + start[x+1:y+1] + start[x] + start[y+1:]
elif x > y:
start = start[:y] + start[x] + start[y:x] + start[x+1:]
else:
print("Invalid step:", s)
return start
def unscramble(start, data):
steps = data.split('\n')
for s in steps[::-1]:
swap_pos_cmd = re.match(r'swap\ position\ (\d+)\ with\ position\ (\d+)', s)
swap_let_cmd = re.match(r'swap\ letter\ (\S)\ with\ letter\ (\S)', s)
reverse_cmd = re.match(r'reverse\ positions\ (\d+)\ through\ (\d+)', s)
rotate_cmd = re.match(r'rotate\ (.+?)\ (\d+)\ step[s]*', s)
rotate_p_cmd = re.match(r'rotate\ based\ on\ position\ of\ letter\ (\S)', s)
move_pos_cmd = re.match(r'move\ position\ (\d+)\ to\ position\ (\d+)', s)
if swap_pos_cmd is not None:
x = start[int(swap_pos_cmd.group(1))]
y = start[int(swap_pos_cmd.group(2))]
start = re.sub('#', y, re.sub(y, x, re.sub(x, '#', start)))
elif swap_let_cmd is not None:
x = swap_let_cmd.group(1)
y = swap_let_cmd.group(2)
start = re.sub('#', y, re.sub(y, x, re.sub(x, '#', start)))
elif reverse_cmd is not None:
x = int(reverse_cmd.group(1))
y = int(reverse_cmd.group(2))
middle = start[y:x-1:-1]
if x == 0:
middle = start[y:x:-1] + start[x]
start = start[:x] + middle + start[y+1:]
elif rotate_cmd is not None:
x = int(rotate_cmd.group(2))
if rotate_cmd.group(1) == 'left':
x = -x
start = start[x:] + start[:x]
elif rotate_p_cmd is not None:
c = rotate_p_cmd.group(1)
x = start.find(c)
r = rotate_pos_table(len(start))[x]
start = rotate(r, 'left', start)
elif move_pos_cmd is not None:
x = int(move_pos_cmd.group(2))
y = int(move_pos_cmd.group(1))
if x < y:
start = start[:x] + start[x+1:y+1] + start[x] + start[y+1:]
elif x > y:
start = start[:y] + start[x] + start[y:x] + start[x+1:]
else:
print("Invalid step:", s)
return start
def main():
if (len(sys.argv) < 4):
print("Usage python3", sys.argv[0], "<input> enc|dec <start>")
exit(1)
with open(sys.argv[1], 'r') as input:
data = input.read()
start = sys.argv[3]
if sys.argv[2] == 'enc':
print("Result:", scramble(start, data))
elif sys.argv[2] == 'dec':
print("Result:", unscramble(start, data))
if __name__ == '__main__':
main() | mit | 6,083,369,895,224,008,000 | 37.206349 | 84 | 0.468523 | false | 3.061705 | false | false | false |
mennanov/django-blueprint | project_name/apps/staticpages/models.py | 1 | 3932 | # -*- coding: utf-8 -*-
from urlparse import urlparse
import os.path
from django.core.urlresolvers import resolve, Resolver404, reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
from mptt.models import MPTTModel, TreeForeignKey, TreeManager
class PageManager(TreeManager):
"""
Page custom manager
"""
def get_active_page(self, uri):
"""
Get current active page from request
"""
try:
page = self.get(url=unicode(uri), published=True)
except Page.DoesNotExist:
parsed = urlparse(uri)
try:
page = self.get(url=unicode(parsed.path), published=True)
except Page.DoesNotExist:
try:
page = self.get(url__startswith=unicode(parsed.path), published=True)
except Page.DoesNotExist:
# try to find related page
try:
view_func = resolve(parsed.path).func
if hasattr(view_func, 'related_page_url'):
page = self.get_active_page(getattr(view_func, 'related_page_url'))
else:
raise
except Resolver404:
raise
return page
class File(models.Model):
"""
File attached to a page
"""
name = models.CharField(_(u'name'), max_length=140, null=True, blank=True)
file = models.FileField(_(u'file'), upload_to='staticpages')
page = models.ForeignKey('Page', related_name='files')
def __unicode__(self):
return self.name
@property
def extension(self):
return os.path.splitext(self.file.path)[1]
class PageImage(models.Model):
"""
Image in page
"""
title = models.CharField(_(u'title'), max_length=140, null=True, blank=True)
image = models.FileField(_(u'image'), upload_to='staticpages')
page = models.ForeignKey('Page', related_name='images')
def __unicode__(self):
return self.title
class Meta:
verbose_name = _(u'image')
verbose_name_plural = _(u'images')
class Page(MPTTModel):
"""Page"""
name = models.CharField(_(u'name in the menu'), max_length=70)
parent = TreeForeignKey('self', verbose_name=_(u'parent page'), null=True, blank=True, related_name='children')
title = models.CharField(_(u'title tag'), max_length=255)
url = models.CharField(_(u'url'), max_length=255,
help_text=_(u'Page full path: /news/ or /'), unique=True,
validators=[
RegexValidator('^/([a-z0-9-_A-Z]+/)*', message=_(u'Must start and end with slash'))])
text = models.TextField(_(u'text'), blank=True, null=True, help_text=_(u'HTML content of the page if it is static'))
template = models.CharField(_(u'template'), choices=[('default', _(u'Default')), ('wholesale', _(u'Wholesale'))],
default='default', max_length=10)
published = models.BooleanField(_(u'published'),
help_text=_(u'The page will not be shown at the site until it is published'),
db_index=True, default=False)
meta_keywords = models.TextField(_(u'meta keywords'), blank=True, null=True)
meta_description = models.TextField(_(u'meta description'), blank=True, null=True)
position = models.PositiveSmallIntegerField("Position", null=False, default=0)
objects = PageManager()
class MPTTMeta:
order_insertion_by = ['position']
def get_absolute_url(self):
return self.url
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u'page')
verbose_name_plural = _(u'pages')
ordering = ['position']
| gpl-2.0 | 2,181,705,280,944,567,800 | 35.407407 | 120 | 0.577823 | false | 4.205348 | false | false | false |
UnnikrishnanBhargavakurup/git-report | util.py | 2 | 3002 | import locale
import sys
import os
import os.path
import subprocess
if sys.platform == 'win32':
import ctypes
def safe_unicode(s):
'''Creates unicode object from string s.
It tries to decode string as UTF-8, fallbacks to current locale
or ISO-8859-1 if both decode attemps fail'''
if type(s) == unicode:
return s
elif isinstance(s, Exception):
s = str(s)
try:
return s.decode('UTF-8')
except UnicodeDecodeError:
pass
try:
lang,encoding = locale.getdefaultlocale()
except ValueError:
lang,encoding = 'C','UTF-8'
if encoding != 'UTF-8':
try:
return s.decode(encoding)
except UnicodeDecodeError:
pass
return s.decode('ISO-8859-1')
def utf8_str(s):
s = safe_unicode(s)
return s.encode('UTF-8')
def invert_hash(h):
ih = {}
for key,value in h.iteritems():
if value not in ih:
ih[value] = []
ih[value].append(key)
return ih
def find_binary(locations):
searchpath_sep = ';' if sys.platform == 'win32' else ':'
searchpaths = os.environ['PATH'].split(searchpath_sep)
for location in locations:
if '{PATH}' in location:
for searchpath in searchpaths:
s = location.replace('{PATH}', searchpath)
if os.path.isfile(s) and os.access(s, os.X_OK):
yield s
elif os.path.isfile(location) and os.access(location, os.X_OK):
yield location
def is_binary_file(file):
# Returns True if the file cannot be decoded as UTF-8
# and > 20% of the file is binary character
# Read file
try:
f = open(file)
buf = f.read()
f.close()
except OSError:
return False
# Decode as UTF-8
try:
ubuf = unicode(buf, 'utf-8')
return False
except UnicodeDecodeError:
pass
# Check number of binary characters
treshold = len(buf) / 5
binary_chars = 0
for c in buf:
oc = ord(c)
if oc > 0x7f or (oc < 0x1f and oc != '\r' and oc != '\n'):
binary_chars += 1
if binary_chars > treshold:
return True
return False
PROCESS_TERMINATE = 1
def kill_subprocess(process):
if sys.platform == 'win32':
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, process.pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
os.kill(process.pid, 9)
CREATE_NO_WINDOW = 0x08000000
def Popen(cmd, **args):
# Create a subprocess that does not open a new console window
if sys.platform == 'win32':
process = subprocess.Popen(cmd, creationflags = CREATE_NO_WINDOW, **args)
else:
process = subprocess.Popen(cmd, **args)
# Emulate kill() for Python 2.5
if 'kill' not in dir(process):
process.kill = lambda: kill_subprocess(process)
return process
| gpl-2.0 | 3,501,534,165,444,827,600 | 24.65812 | 90 | 0.592938 | false | 3.729193 | false | false | false |
cortesi/mitmproxy | mitmproxy/version.py | 3 | 1296 | import os
import subprocess
import sys
VERSION = "5.0.0.dev"
PATHOD = "pathod " + VERSION
MITMPROXY = "mitmproxy " + VERSION
# Serialization format version. This is displayed nowhere, it just needs to be incremented by one
# for each change in the file format.
FLOW_FORMAT_VERSION = 7
def get_dev_version() -> str:
"""
Return a detailed version string, sourced either from VERSION or obtained dynamically using git.
"""
mitmproxy_version = VERSION
here = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
try:
git_describe = subprocess.check_output(
['git', 'describe', '--long'],
stderr=subprocess.STDOUT,
cwd=here,
)
last_tag, tag_dist, commit = git_describe.decode().strip().rsplit("-", 2)
commit = commit.lstrip("g")[:7]
tag_dist = int(tag_dist)
except Exception:
pass
else:
# Add commit info for non-tagged releases
if tag_dist > 0:
mitmproxy_version += f" (+{tag_dist}, commit {commit})"
# PyInstaller build indicator, if using precompiled binary
if getattr(sys, 'frozen', False):
mitmproxy_version += " binary"
return mitmproxy_version
if __name__ == "__main__": # pragma: no cover
print(VERSION)
| mit | -1,990,845,984,088,747,300 | 27.173913 | 100 | 0.617284 | false | 3.681818 | false | false | false |
d3banjan/polyamide | polyamide/polyamide/settings.py | 1 | 2713 | """
Django settings for polyamide project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+md7#^%e3(!o03w1l(hyqk-@_s*p9+xxq038m%t_*$=w(^_qnz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pagedown', #adds the markdown editor from SO
'listofjobs',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'polyamide.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'polyamide.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| bsd-2-clause | 2,909,604,421,848,923,000 | 25.086538 | 71 | 0.689274 | false | 3.451654 | false | false | false |