repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
woest85/PokemonGo-Map-1 | pogom/models.py | 1 | 39432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import itertools
import calendar
import sys
import gc
import time
import geopy
from peewee import SqliteDatabase, InsertQuery, \
IntegerField, CharField, DoubleField, BooleanField, \
DateTimeField, fn, DeleteQuery, CompositeKey, FloatField, SQL, TextField
from playhouse.flask_utils import FlaskDB
from playhouse.pool import PooledMySQLDatabase
from playhouse.shortcuts import RetryOperationalError
from playhouse.migrate import migrate, MySQLMigrator, SqliteMigrator
from datetime import datetime, timedelta
from base64 import b64encode
from cachetools import TTLCache
from cachetools import cached
from . import config
from .utils import get_pokemon_name, get_pokemon_rarity, get_pokemon_types, get_args
from .transform import transform_from_wgs_to_gcj, get_new_coords
from .customLog import printPokemon
log = logging.getLogger(__name__)
args = get_args()
flaskDb = FlaskDB()
cache = TTLCache(maxsize=100, ttl=60 * 5)
db_schema_version = 7
class MyRetryDB(RetryOperationalError, PooledMySQLDatabase):
pass
def init_database(app):
if args.db_type == 'mysql':
log.info('Connecting to MySQL database on %s:%i', args.db_host, args.db_port)
connections = args.db_max_connections
if hasattr(args, 'accounts'):
connections *= len(args.accounts)
db = MyRetryDB(
args.db_name,
user=args.db_user,
password=args.db_pass,
host=args.db_host,
port=args.db_port,
max_connections=connections,
stale_timeout=300)
else:
log.info('Connecting to local SQLite database')
db = SqliteDatabase(args.db)
app.config['DATABASE'] = db
flaskDb.init_app(app)
return db
class BaseModel(flaskDb.Model):
@classmethod
def get_all(cls):
results = [m for m in cls.select().dicts()]
if args.china:
for result in results:
result['latitude'], result['longitude'] = \
transform_from_wgs_to_gcj(
result['latitude'], result['longitude'])
return results
class Pokemon(BaseModel):
# We are base64 encoding the ids delivered by the api
# because they are too big for sqlite to handle
encounter_id = CharField(primary_key=True, max_length=50)
spawnpoint_id = CharField(index=True)
pokemon_id = IntegerField(index=True)
latitude = DoubleField()
longitude = DoubleField()
disappear_time = DateTimeField(index=True)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_active(swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokemon
.select()
.where(Pokemon.disappear_time > datetime.utcnow())
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.disappear_time > datetime.utcnow()) &
(((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))))
.dicts())
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
pokemons = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['pokemon_rarity'] = get_pokemon_rarity(p['pokemon_id'])
p['pokemon_types'] = get_pokemon_types(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemons.append(p)
# Re-enable the GC.
gc.enable()
return pokemons
@staticmethod
def get_active_by_id(ids, swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()))
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()) &
(Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))
.dicts())
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
pokemons = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['pokemon_rarity'] = get_pokemon_rarity(p['pokemon_id'])
p['pokemon_types'] = get_pokemon_types(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemons.append(p)
# Re-enable the GC.
gc.enable()
return pokemons
@classmethod
@cached(cache)
def get_seen(cls, timediff):
if timediff:
timediff = datetime.utcnow() - timediff
pokemon_count_query = (Pokemon
.select(Pokemon.pokemon_id,
fn.COUNT(Pokemon.pokemon_id).alias('count'),
fn.MAX(Pokemon.disappear_time).alias('lastappeared')
)
.where(Pokemon.disappear_time > timediff)
.group_by(Pokemon.pokemon_id)
.alias('counttable')
)
query = (Pokemon
.select(Pokemon.pokemon_id,
Pokemon.disappear_time,
Pokemon.latitude,
Pokemon.longitude,
pokemon_count_query.c.count)
.join(pokemon_count_query, on=(Pokemon.pokemon_id == pokemon_count_query.c.pokemon_id))
.distinct()
.where(Pokemon.disappear_time == pokemon_count_query.c.lastappeared)
.dicts()
)
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
pokemons = []
total = 0
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
pokemons.append(p)
total += p['count']
# Re-enable the GC.
gc.enable()
return {'pokemon': pokemons, 'total': total}
@classmethod
def get_appearances(cls, pokemon_id, timediff):
'''
:param pokemon_id: id of pokemon that we need appearances for
:param timediff: limiting period of the selection
:return: list of pokemon appearances over a selected period
'''
if timediff:
timediff = datetime.utcnow() - timediff
query = (Pokemon
.select(Pokemon.latitude, Pokemon.longitude, Pokemon.pokemon_id, fn.Count(Pokemon.spawnpoint_id).alias('count'), Pokemon.spawnpoint_id)
.where((Pokemon.pokemon_id == pokemon_id) &
(Pokemon.disappear_time > timediff)
)
.group_by(Pokemon.latitude, Pokemon.longitude, Pokemon.pokemon_id, Pokemon.spawnpoint_id)
.dicts()
)
return list(query)
@classmethod
def get_appearances_times_by_spawnpoint(cls, pokemon_id, spawnpoint_id, timediff):
'''
:param pokemon_id: id of pokemon that we need appearances times for
:param spawnpoint_id: spawnpoing id we need appearances times for
:param timediff: limiting period of the selection
:return: list of time appearances over a selected period
'''
if timediff:
timediff = datetime.utcnow() - timediff
query = (Pokemon
.select(Pokemon.disappear_time)
.where((Pokemon.pokemon_id == pokemon_id) &
(Pokemon.spawnpoint_id == spawnpoint_id) &
(Pokemon.disappear_time > timediff)
)
.order_by(Pokemon.disappear_time.asc())
.tuples()
)
return list(itertools.chain(*query))
@classmethod
def get_spawn_time(cls, disappear_time):
return (disappear_time + 2700) % 3600
@classmethod
def get_spawnpoints(cls, southBoundary, westBoundary, northBoundary, eastBoundary):
query = Pokemon.select(Pokemon.latitude, Pokemon.longitude, Pokemon.spawnpoint_id, ((Pokemon.disappear_time.minute * 60) + Pokemon.disappear_time.second).alias('time'), fn.Count(Pokemon.spawnpoint_id).alias('count'))
if None not in (northBoundary, southBoundary, westBoundary, eastBoundary):
query = (query
.where((Pokemon.latitude <= northBoundary) &
(Pokemon.latitude >= southBoundary) &
(Pokemon.longitude >= westBoundary) &
(Pokemon.longitude <= eastBoundary)
))
query = query.group_by(Pokemon.latitude, Pokemon.longitude, Pokemon.spawnpoint_id, SQL('time'))
queryDict = query.dicts()
spawnpoints = {}
for sp in queryDict:
key = sp['spawnpoint_id']
disappear_time = cls.get_spawn_time(sp.pop('time'))
count = int(sp['count'])
if key not in spawnpoints:
spawnpoints[key] = sp
else:
spawnpoints[key]['special'] = True
if 'time' not in spawnpoints[key] or count >= spawnpoints[key]['count']:
spawnpoints[key]['time'] = disappear_time
spawnpoints[key]['count'] = count
for sp in spawnpoints.values():
del sp['count']
return list(spawnpoints.values())
@classmethod
def get_spawnpoints_in_hex(cls, center, steps):
log.info('Finding spawn points {} steps away'.format(steps))
n, e, s, w = hex_bounds(center, steps)
query = (Pokemon
.select(Pokemon.latitude.alias('lat'),
Pokemon.longitude.alias('lng'),
((Pokemon.disappear_time.minute * 60) + Pokemon.disappear_time.second).alias('time'),
Pokemon.spawnpoint_id
))
query = (query.where((Pokemon.latitude <= n) &
(Pokemon.latitude >= s) &
(Pokemon.longitude >= w) &
(Pokemon.longitude <= e)
))
# Sqlite doesn't support distinct on columns
if args.db_type == 'mysql':
query = query.distinct(Pokemon.spawnpoint_id)
else:
query = query.group_by(Pokemon.spawnpoint_id)
s = list(query.dicts())
# The distance between scan circles of radius 70 in a hex is 121.2436
# steps - 1 to account for the center circle then add 70 for the edge
step_distance = ((steps - 1) * 121.2436) + 70
# Compare spawnpoint list to a circle with radius steps * 120
# Uses the direct geopy distance between the center and the spawnpoint.
filtered = []
for idx, sp in enumerate(s):
if geopy.distance.distance(center, (sp['lat'], sp['lng'])).meters <= step_distance:
filtered.append(s[idx])
# at this point, 'time' is DISAPPEARANCE time, we're going to morph it to APPEARANCE time
for location in filtered:
# examples: time shifted
# 0 ( 0 + 2700) = 2700 % 3600 = 2700 (0th minute to 45th minute, 15 minutes prior to appearance as time wraps around the hour)
# 1800 (1800 + 2700) = 4500 % 3600 = 900 (30th minute, moved to arrive at 15th minute)
# todo: this DOES NOT ACCOUNT for pokemons that appear sooner and live longer, but you'll _always_ have at least 15 minutes, so it works well enough
location['time'] = cls.get_spawn_time(location['time'])
return filtered
class Pokestop(BaseModel):
pokestop_id = CharField(primary_key=True, max_length=50)
enabled = BooleanField()
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
lure_expiration = DateTimeField(null=True, index=True)
active_fort_modifier = CharField(max_length=50, null=True)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_stops(swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokestop
.select()
.dicts())
else:
query = (Pokestop
.select()
.where((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng))
.dicts())
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
pokestops = []
for p in query:
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokestops.append(p)
# Re-enable the GC.
gc.enable()
return pokestops
class Gym(BaseModel):
UNCONTESTED = 0
TEAM_MYSTIC = 1
TEAM_VALOR = 2
TEAM_INSTINCT = 3
gym_id = CharField(primary_key=True, max_length=50)
team_id = IntegerField()
guard_pokemon_id = IntegerField()
gym_points = IntegerField()
enabled = BooleanField()
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
last_scanned = DateTimeField(default=datetime.utcnow)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_gyms(swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
results = (Gym
.select()
.dicts())
else:
results = (Gym
.select()
.where((Gym.latitude >= swLat) &
(Gym.longitude >= swLng) &
(Gym.latitude <= neLat) &
(Gym.longitude <= neLng))
.dicts())
# Performance: Disable the garbage collector prior to creating a (potentially) large dict with append()
gc.disable()
gyms = {}
gym_ids = []
for g in results:
g['name'] = None
g['pokemon'] = []
gyms[g['gym_id']] = g
gym_ids.append(g['gym_id'])
if len(gym_ids) > 0:
pokemon = (GymMember
.select(
GymMember.gym_id,
GymPokemon.cp.alias('pokemon_cp'),
GymPokemon.pokemon_id,
Trainer.name.alias('trainer_name'),
Trainer.level.alias('trainer_level'))
.join(Gym, on=(GymMember.gym_id == Gym.gym_id))
.join(GymPokemon, on=(GymMember.pokemon_uid == GymPokemon.pokemon_uid))
.join(Trainer, on=(GymPokemon.trainer_name == Trainer.name))
.where(GymMember.gym_id << gym_ids)
.where(GymMember.last_scanned > Gym.last_modified)
.order_by(GymMember.gym_id, GymPokemon.cp)
.dicts())
for p in pokemon:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
gyms[p['gym_id']]['pokemon'].append(p)
details = (GymDetails
.select(
GymDetails.gym_id,
GymDetails.name)
.where(GymDetails.gym_id << gym_ids)
.dicts())
for d in details:
gyms[d['gym_id']]['name'] = d['name']
# Re-enable the GC.
gc.enable()
return gyms
class ScannedLocation(BaseModel):
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
class Meta:
primary_key = CompositeKey('latitude', 'longitude')
@staticmethod
def get_recent(swLat, swLng, neLat, neLng):
query = (ScannedLocation
.select()
.where((ScannedLocation.last_modified >=
(datetime.utcnow() - timedelta(minutes=15))) &
(ScannedLocation.latitude >= swLat) &
(ScannedLocation.longitude >= swLng) &
(ScannedLocation.latitude <= neLat) &
(ScannedLocation.longitude <= neLng))
.order_by(ScannedLocation.last_modified.asc())
.dicts())
return list(query)
class MainWorker(BaseModel):
worker_name = CharField(primary_key=True, max_length=50)
message = CharField()
method = CharField(max_length=50)
last_modified = DateTimeField(index=True)
class WorkerStatus(BaseModel):
username = CharField(primary_key=True, max_length=50)
worker_name = CharField()
success = IntegerField()
fail = IntegerField()
no_items = IntegerField()
skip = IntegerField()
last_modified = DateTimeField(index=True)
message = CharField(max_length=255)
@staticmethod
def get_recent():
query = (WorkerStatus
.select()
.where((WorkerStatus.last_modified >=
(datetime.utcnow() - timedelta(minutes=5))))
.order_by(WorkerStatus.username)
.dicts())
status = []
for s in query:
status.append(s)
return status
class Versions(flaskDb.Model):
key = CharField()
val = IntegerField()
class Meta:
primary_key = False
class GymMember(BaseModel):
gym_id = CharField(index=True)
pokemon_uid = CharField()
last_scanned = DateTimeField(default=datetime.utcnow)
class Meta:
primary_key = False
class GymPokemon(BaseModel):
pokemon_uid = CharField(primary_key=True, max_length=50)
pokemon_id = IntegerField()
cp = IntegerField()
trainer_name = CharField()
num_upgrades = IntegerField(null=True)
move_1 = IntegerField(null=True)
move_2 = IntegerField(null=True)
height = FloatField(null=True)
weight = FloatField(null=True)
stamina = IntegerField(null=True)
stamina_max = IntegerField(null=True)
cp_multiplier = FloatField(null=True)
additional_cp_multiplier = FloatField(null=True)
iv_defense = IntegerField(null=True)
iv_stamina = IntegerField(null=True)
iv_attack = IntegerField(null=True)
last_seen = DateTimeField(default=datetime.utcnow)
class Trainer(BaseModel):
name = CharField(primary_key=True, max_length=50)
team = IntegerField()
level = IntegerField()
last_seen = DateTimeField(default=datetime.utcnow)
class GymDetails(BaseModel):
gym_id = CharField(primary_key=True, max_length=50)
name = CharField()
description = TextField(null=True, default="")
url = CharField()
last_scanned = DateTimeField(default=datetime.utcnow)
def hex_bounds(center, steps):
# Make a box that is (70m * step_limit * 2) + 70m away from the center point
# Rationale is that you need to travel
sp_dist = 0.07 * 2 * steps
n = get_new_coords(center, sp_dist, 0)[0]
e = get_new_coords(center, sp_dist, 90)[1]
s = get_new_coords(center, sp_dist, 180)[0]
w = get_new_coords(center, sp_dist, 270)[1]
return (n, e, s, w)
# todo: this probably shouldn't _really_ be in "models" anymore, but w/e
def parse_map(args, map_dict, step_location, db_update_queue, wh_update_queue):
pokemons = {}
pokestops = {}
gyms = {}
cells = map_dict['responses']['GET_MAP_OBJECTS']['map_cells']
for cell in cells:
if config['parse_pokemon']:
for p in cell.get('wild_pokemons', []):
# time_till_hidden_ms was overflowing causing a negative integer.
# It was also returning a value above 3.6M ms.
if 0 < p['time_till_hidden_ms'] < 3600000:
d_t = datetime.utcfromtimestamp(
(p['last_modified_timestamp_ms'] +
p['time_till_hidden_ms']) / 1000.0)
else:
# Set a value of 15 minutes because currently its unknown but larger than 15.
d_t = datetime.utcfromtimestamp((p['last_modified_timestamp_ms'] + 900000) / 1000.0)
printPokemon(p['pokemon_data']['pokemon_id'], p['latitude'],
p['longitude'], d_t)
pokemons[p['encounter_id']] = {
'encounter_id': b64encode(str(p['encounter_id'])),
'spawnpoint_id': p['spawn_point_id'],
'pokemon_id': p['pokemon_data']['pokemon_id'],
'latitude': p['latitude'],
'longitude': p['longitude'],
'disappear_time': d_t
}
if args.webhooks:
wh_update_queue.put(('pokemon', {
'encounter_id': b64encode(str(p['encounter_id'])),
'spawnpoint_id': p['spawn_point_id'],
'pokemon_id': p['pokemon_data']['pokemon_id'],
'latitude': p['latitude'],
'longitude': p['longitude'],
'disappear_time': calendar.timegm(d_t.timetuple()),
'last_modified_time': p['last_modified_timestamp_ms'],
'time_until_hidden_ms': p['time_till_hidden_ms']
}))
for f in cell.get('forts', []):
if config['parse_pokestops'] and f.get('type') == 1: # Pokestops
if 'active_fort_modifier' in f:
lure_expiration = datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0) + timedelta(minutes=30)
active_fort_modifier = f['active_fort_modifier']
if args.webhooks and args.webhook_updates_only:
wh_update_queue.put(('pokestop', {
'pokestop_id': b64encode(str(f['id'])),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified_time': f['last_modified_timestamp_ms'],
'lure_expiration': calendar.timegm(lure_expiration.timetuple()),
'active_fort_modifier': active_fort_modifier
}))
else:
lure_expiration, active_fort_modifier = None, None
pokestops[f['id']] = {
'pokestop_id': f['id'],
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
'lure_expiration': lure_expiration,
'active_fort_modifier': active_fort_modifier
}
# Send all pokéstops to webhooks
if args.webhooks and not args.webhook_updates_only:
# Explicitly set 'webhook_data', in case we want to change the information pushed to webhooks,
# similar to above and previous commits.
l_e = None
if lure_expiration is not None:
l_e = calendar.timegm(lure_expiration.timetuple())
wh_update_queue.put(('pokestop', {
'pokestop_id': b64encode(str(f['id'])),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': calendar.timegm(pokestops[f['id']]['last_modified'].timetuple()),
'lure_expiration': l_e,
'active_fort_modifier': active_fort_modifier
}))
elif config['parse_gyms'] and f.get('type') is None: # Currently, there are only stops and gyms
gyms[f['id']] = {
'gym_id': f['id'],
'team_id': f.get('owned_by_team', 0),
'guard_pokemon_id': f.get('guard_pokemon_id', 0),
'gym_points': f.get('gym_points', 0),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
}
# Send gyms to webhooks
if args.webhooks and not args.webhook_updates_only:
# Explicitly set 'webhook_data', in case we want to change the information pushed to webhooks,
# similar to above and previous commits.
wh_update_queue.put(('gym', {
'gym_id': b64encode(str(f['id'])),
'team_id': f.get('owned_by_team', 0),
'guard_pokemon_id': f.get('guard_pokemon_id', 0),
'gym_points': f.get('gym_points', 0),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': calendar.timegm(gyms[f['id']]['last_modified'].timetuple())
}))
if len(pokemons):
db_update_queue.put((Pokemon, pokemons))
if len(pokestops):
db_update_queue.put((Pokestop, pokestops))
if len(gyms):
db_update_queue.put((Gym, gyms))
log.info('Parsing found %d pokemons, %d pokestops, and %d gyms',
len(pokemons),
len(pokestops),
len(gyms))
db_update_queue.put((ScannedLocation, {0: {
'latitude': step_location[0],
'longitude': step_location[1],
'last_modified': datetime.utcnow()
}}))
return {
'count': len(pokemons) + len(pokestops) + len(gyms),
'gyms': gyms,
}
def parse_gyms(args, gym_responses, wh_update_queue):
gym_details = {}
gym_members = {}
gym_pokemon = {}
trainers = {}
i = 0
for g in gym_responses.values():
gym_state = g['gym_state']
gym_id = gym_state['fort_data']['id']
gym_details[gym_id] = {
'gym_id': gym_id,
'name': g['name'],
'description': g.get('description'),
'url': g['urls'][0],
}
if args.webhooks:
webhook_data = {
'id': gym_id,
'latitude': gym_state['fort_data']['latitude'],
'longitude': gym_state['fort_data']['longitude'],
'team': gym_state['fort_data'].get('owned_by_team', 0),
'name': g['name'],
'description': g.get('description'),
'url': g['urls'][0],
'pokemon': [],
}
for member in gym_state.get('memberships', []):
gym_members[i] = {
'gym_id': gym_id,
'pokemon_uid': member['pokemon_data']['id'],
}
gym_pokemon[i] = {
'pokemon_uid': member['pokemon_data']['id'],
'pokemon_id': member['pokemon_data']['pokemon_id'],
'cp': member['pokemon_data']['cp'],
'trainer_name': member['trainer_public_profile']['name'],
'num_upgrades': member['pokemon_data'].get('num_upgrades', 0),
'move_1': member['pokemon_data'].get('move_1'),
'move_2': member['pokemon_data'].get('move_2'),
'height': member['pokemon_data'].get('height_m'),
'weight': member['pokemon_data'].get('weight_kg'),
'stamina': member['pokemon_data'].get('stamina'),
'stamina_max': member['pokemon_data'].get('stamina_max'),
'cp_multiplier': member['pokemon_data'].get('cp_multiplier'),
'additional_cp_multiplier': member['pokemon_data'].get('additional_cp_multiplier', 0),
'iv_defense': member['pokemon_data'].get('individual_defense', 0),
'iv_stamina': member['pokemon_data'].get('individual_stamina', 0),
'iv_attack': member['pokemon_data'].get('individual_attack', 0),
'last_seen': datetime.utcnow(),
}
trainers[i] = {
'name': member['trainer_public_profile']['name'],
'team': gym_state['fort_data']['owned_by_team'],
'level': member['trainer_public_profile']['level'],
'last_seen': datetime.utcnow(),
}
if args.webhooks:
webhook_data['pokemon'].append({
'pokemon_uid': member['pokemon_data']['id'],
'pokemon_id': member['pokemon_data']['pokemon_id'],
'cp': member['pokemon_data']['cp'],
'num_upgrades': member['pokemon_data'].get('num_upgrades', 0),
'move_1': member['pokemon_data'].get('move_1'),
'move_2': member['pokemon_data'].get('move_2'),
'height': member['pokemon_data'].get('height_m'),
'weight': member['pokemon_data'].get('weight_kg'),
'stamina': member['pokemon_data'].get('stamina'),
'stamina_max': member['pokemon_data'].get('stamina_max'),
'cp_multiplier': member['pokemon_data'].get('cp_multiplier'),
'additional_cp_multiplier': member['pokemon_data'].get('additional_cp_multiplier', 0),
'iv_defense': member['pokemon_data'].get('individual_defense', 0),
'iv_stamina': member['pokemon_data'].get('individual_stamina', 0),
'iv_attack': member['pokemon_data'].get('individual_attack', 0),
'trainer_name': member['trainer_public_profile']['name'],
'trainer_level': member['trainer_public_profile']['level'],
})
i += 1
if args.webhooks:
wh_update_queue.put(('gym_details', webhook_data))
# All this database stuff is synchronous (not using the upsert queue) on purpose.
# Since the search workers load the GymDetails model from the database to determine if a gym
# needs rescanned, we need to be sure the GymDetails get fully committed to the database before moving on.
#
# We _could_ synchronously upsert GymDetails, then queue the other tables for
# upsert, but that would put that Gym's overall information in a weird non-atomic state.
# upsert all the models
if len(gym_details):
bulk_upsert(GymDetails, gym_details)
if len(gym_pokemon):
bulk_upsert(GymPokemon, gym_pokemon)
if len(trainers):
bulk_upsert(Trainer, trainers)
# This needs to be completed in a transaction, because we don't wany any other thread or process
# to mess with the GymMembers for the gyms we're updating while we're updating the bridge table.
with flaskDb.database.transaction():
# get rid of all the gym members, we're going to insert new records
if len(gym_details):
DeleteQuery(GymMember).where(GymMember.gym_id << gym_details.keys()).execute()
# insert new gym members
if len(gym_members):
bulk_upsert(GymMember, gym_members)
log.info('Upserted %d gyms and %d gym members',
len(gym_details),
len(gym_members))
def db_updater(args, q):
# The forever loop
while True:
try:
while True:
try:
flaskDb.connect_db()
break
except Exception as e:
log.warning('%s... Retrying', e)
# Loop the queue
while True:
model, data = q.get()
bulk_upsert(model, data)
q.task_done()
log.debug('Upserted to %s, %d records (upsert queue remaining: %d)',
model.__name__,
len(data),
q.qsize())
if q.qsize() > 50:
log.warning("DB queue is > 50 (@%d); try increasing --db-threads", q.qsize())
except Exception as e:
log.exception('Exception in db_updater: %s', e)
def clean_db_loop(args):
while True:
try:
# Clean out old scanned locations
query = (ScannedLocation
.delete()
.where((ScannedLocation.last_modified <
(datetime.utcnow() - timedelta(minutes=30)))))
query.execute()
query = (MainWorker
.delete()
.where((ScannedLocation.last_modified <
(datetime.utcnow() - timedelta(minutes=30)))))
query.execute()
query = (WorkerStatus
.delete()
.where((ScannedLocation.last_modified <
(datetime.utcnow() - timedelta(minutes=30)))))
query.execute()
# Remove active modifier from expired lured pokestops
query = (Pokestop
.update(lure_expiration=None)
.where(Pokestop.lure_expiration < datetime.utcnow()))
query.execute()
# If desired, clear old pokemon spawns
if args.purge_data > 0:
query = (Pokemon
.delete()
.where((Pokemon.disappear_time <
(datetime.utcnow() - timedelta(hours=args.purge_data)))))
log.info('Regular database cleaning complete')
time.sleep(60)
except Exception as e:
log.exception('Exception in clean_db_loop: %s', e)
def bulk_upsert(cls, data):
num_rows = len(data.values())
i = 0
if args.db_type == 'mysql':
step = 120
else:
# SQLite has a default max number of parameters of 999,
# so we need to limit how many rows we insert for it.
step = 50
while i < num_rows:
log.debug('Inserting items %d to %d', i, min(i + step, num_rows))
try:
InsertQuery(cls, rows=data.values()[i:min(i + step, num_rows)]).upsert().execute()
except Exception as e:
log.warning('%s... Retrying', e)
continue
i += step
def create_tables(db):
db.connect()
verify_database_schema(db)
db.create_tables([Pokemon, Pokestop, Gym, ScannedLocation, GymDetails, GymMember, GymPokemon, Trainer, MainWorker, WorkerStatus], safe=True)
db.close()
def drop_tables(db):
db.connect()
db.drop_tables([Pokemon, Pokestop, Gym, ScannedLocation, Versions, GymDetails, GymMember, GymPokemon, Trainer, MainWorker, WorkerStatus, Versions], safe=True)
db.close()
def verify_database_schema(db):
if not Versions.table_exists():
db.create_tables([Versions])
if ScannedLocation.table_exists():
# Versions table didn't exist, but there were tables. This must mean the user
# is coming from a database that existed before we started tracking the schema
# version. Perform a full upgrade.
InsertQuery(Versions, {Versions.key: 'schema_version', Versions.val: 0}).execute()
database_migrate(db, 0)
else:
InsertQuery(Versions, {Versions.key: 'schema_version', Versions.val: db_schema_version}).execute()
else:
db_ver = Versions.get(Versions.key == 'schema_version').val
if db_ver < db_schema_version:
database_migrate(db, db_ver)
elif db_ver > db_schema_version:
log.error("Your database version (%i) appears to be newer than the code supports (%i).",
db_ver, db_schema_version)
log.error("Please upgrade your code base or drop all tables in your database.")
sys.exit(1)
def database_migrate(db, old_ver):
# Update database schema version
Versions.update(val=db_schema_version).where(Versions.key == 'schema_version').execute()
log.info("Detected database version %i, updating to %i", old_ver, db_schema_version)
# Perform migrations here
migrator = None
if args.db_type == 'mysql':
migrator = MySQLMigrator(db)
else:
migrator = SqliteMigrator(db)
# No longer necessary, we're doing this at schema 4 as well
# if old_ver < 1:
# db.drop_tables([ScannedLocation])
if old_ver < 2:
migrate(migrator.add_column('pokestop', 'encounter_id', CharField(max_length=50, null=True)))
if old_ver < 3:
migrate(
migrator.add_column('pokestop', 'active_fort_modifier', CharField(max_length=50, null=True)),
migrator.drop_column('pokestop', 'encounter_id'),
migrator.drop_column('pokestop', 'active_pokemon_id')
)
if old_ver < 4:
db.drop_tables([ScannedLocation])
if old_ver < 5:
# Some pokemon were added before the 595 bug was "fixed"
# Clean those up for a better UX
query = (Pokemon
.delete()
.where(Pokemon.disappear_time >
(datetime.utcnow() - timedelta(hours=24))))
query.execute()
if old_ver < 6:
migrate(
migrator.add_column('gym', 'last_scanned', DateTimeField(null=True)),
)
if old_ver < 7:
migrate(
migrator.drop_column('gymdetails', 'description'),
migrator.add_column('gymdetails', 'description', TextField(null=True, default=""))
)
| agpl-3.0 | -5,867,474,748,991,135,000 | 37.771878 | 224 | 0.531764 | false | 4.088656 | false | false | false |
flavour/iscram | controllers/default.py | 1 | 32108 | # -*- coding: utf-8 -*-
"""
Default Controllers
"""
module = "default"
# -----------------------------------------------------------------------------
def call():
"Call an XMLRPC, JSONRPC or RSS service"
# If webservices don't use sessions, avoid cluttering up the storage
#session.forget()
return service()
# -----------------------------------------------------------------------------
def download():
""" Download a file """
# Load the Model
tablename = request.args[0].split(".", 1)[0]
s3mgr.load(tablename)
return response.download(request, db)
# =============================================================================
def register_validation(form):
""" Validate the fields in registration form """
# Mobile Phone
if "mobile" in form.vars and form.vars.mobile:
regex = re.compile(single_phone_number_pattern)
if not regex.match(form.vars.mobile):
form.errors.mobile = T("Invalid phone number")
elif deployment_settings.get_auth_registration_mobile_phone_mandatory():
form.errors.mobile = T("Phone number is required")
org = deployment_settings.get_auth_registration_organisation_id_default()
if org:
# Add to default organisation
form.vars.organisation_id = org
return
# -----------------------------------------------------------------------------
def register_onaccept(form):
""" Tasks to be performed after a new user registers """
# Add newly-registered users to Person Registry, add 'Authenticated' role
# If Organisation is provided, then: add HRM record & add to 'Org_X_Access' role
person_id = auth.s3_register(form)
if form.vars.organisation_id and not deployment_settings.get_hrm_show_staff():
# Convert HRM record to a volunteer
htable = s3db.hrm_human_resource
query = (htable.person_id == person_id)
db(query).update(type=2)
# Add to required roles:
roles = deployment_settings.get_auth_registration_roles()
if roles or deployment_settings.has_module("delphi"):
utable = auth.settings.table_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (ptable.id == person_id) & \
(ptable.pe_id == ltable.pe_id) & \
(ltable.user_id == utable.id)
user = db(query).select(utable.id,
ltable.user_id,
limitby=(0, 1)).first()
if roles:
gtable = auth.settings.table_group
mtable = auth.settings.table_membership
query = (gtable.uuid.belongs(roles))
rows = db(query).select(gtable.id)
for role in rows:
mtable.insert(user_id=user[ltable._tablename].user_id,
group_id=role.id)
if deployment_settings.has_module("delphi"):
# Add user as a participant of the default problem group
table = s3db.delphi_group
query = (table.uuid == "DEFAULT")
group = db(query).select(table.id,
limitby=(0, 1)).first()
if group:
table = s3db.delphi_membership
table.insert(group_id=group.id,
user_id=user[utable._tablename].id,
status=3)
# -----------------------------------------------------------------------------
auth.settings.register_onvalidation = register_validation
auth.settings.register_onaccept = register_onaccept
_table_user = auth.settings.table_user
_table_user.first_name.label = T("First Name")
_table_user.first_name.comment = SPAN("*", _class="req")
_table_user.last_name.label = T("Last Name")
if deployment_settings.get_L10n_mandatory_lastname():
_table_user.last_name.comment = SPAN("*", _class="req")
_table_user.email.label = T("E-mail")
_table_user.email.comment = SPAN("*", _class="req")
_table_user.password.comment = SPAN("*", _class="req")
_table_user.language.label = T("Language")
_table_user.language.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Language"),
T("The language you wish the site to be displayed in.")))
_table_user.language.represent = lambda opt: s3_languages.get(opt, UNKNOWN_OPT)
# Organisation widget for use in Registration Screen
# NB User Profile is only editable by Admin - using User Management
organisation_represent = s3db.org_organisation_represent
org_widget = IS_ONE_OF(db, "org_organisation.id",
organisation_represent,
orderby="org_organisation.name",
sort=True)
if deployment_settings.get_auth_registration_organisation_mandatory():
_table_user.organisation_id.requires = org_widget
else:
_table_user.organisation_id.requires = IS_NULL_OR(org_widget)
# For the User Profile:
_table_user.utc_offset.comment = DIV(_class="tooltip",
_title="%s|%s" % (auth.messages.label_utc_offset,
auth.messages.help_utc_offset))
_table_user.organisation_id.represent = organisation_represent
_table_user.organisation_id.comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Organization"),
T("The default Organization for whom you are acting."),
T("This setting can only be controlled by the Administrator.")))
org_site_represent = s3db.org_site_represent
_table_user.site_id.represent = org_site_represent
_table_user.site_id.comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Facility"),
T("The default Facility for which you are acting."),
T("This setting can only be controlled by the Administrator.")))
# =============================================================================
def index():
""" Main Home Page """
title = deployment_settings.get_system_name()
response.title = title
item = ""
if deployment_settings.has_module("cms"):
table = s3db.cms_post
item = db(table.module == module).select(table.body,
limitby=(0, 1)).first()
if item:
item = DIV(XML(item.body))
else:
item = ""
if deployment_settings.has_module("cr"):
s3mgr.load("cr_shelter")
SHELTERS = s3.crud_strings["cr_shelter"].subtitle_list
else:
SHELTERS = ""
# Menu Boxes
menu_btns = [#div, label, app, function
["facility", SHELTERS, "cr", "shelter"],
["facility", T("Warehouses"), "inv", "warehouse"],
["facility", T("Hospitals"), "hms", "hospital"],
["facility", T("Offices"), "org", "office"],
["sit", T("Incidents"), "irs", "ireport"],
["sit", T("Assessments"), "survey", "series"],
["sit", T("Assets"), "asset", "asset"],
["sit", T("Inventory Items"), "inv", "inv_item"],
#["dec", T("Gap Map"), "project", "gap_map"],
#["dec", T("Gap Report"), "project", "gap_report"],
["dec", T("Requests"), "req", "req"],
["res", T("Projects"), "project", "project"],
["res", T("Activities"), "project", "activity"],
["res", T("Commitments"), "req", "commit"],
["res", T("Sent Shipments"), "inv", "send"],
["res", T("Received Shipments"), "inv", "recv"]
]
# Change to (Mitigation)/Preparedness/Response/Recovery?
menu_divs = {"facility": DIV( H3(T("Facilities")),
_id = "facility_box", _class = "menu_box"),
"sit": DIV( H3(T("Situation")),
_id = "menu_div_sit", _class = "menu_div"),
"dec": DIV( H3(T("Decision")),
_id = "menu_div_dec", _class = "menu_div"),
"res": DIV( H3(T("Response")),
_id = "menu_div_res", _class = "menu_div"),
}
for div, label, app, function in menu_btns:
if deployment_settings.has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A( DIV(label,
_class = "menu-btn-r"),
_class = "menu-btn-l",
_href = URL(app,function)
)
)
div_arrow = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
request.application),
_class = "div_arrow")
sit_dec_res_box = DIV(menu_divs["sit"],
div_arrow,
menu_divs["dec"],
div_arrow,
menu_divs["res"],
_id = "sit_dec_res_box",
_class = "menu_box fleft swidth"
#div_additional,
)
facility_box = menu_divs["facility"]
facility_box.append( A( IMG(_src = "/%s/static/img/map_icon_128.png" % \
request.application),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
datatable_ajax_source = ""
# Check logged in AND permissions
if AUTHENTICATED in session.s3.roles and \
auth.s3_has_permission("read", db.org_organisation):
org_items = organisation()
datatable_ajax_source = "/%s/default/organisation.aaData" % \
request.application
response.s3.actions = None
response.view = "default/index.html"
auth.permission.controller = "org"
auth.permission.function = "site"
permitted_facilities = auth.permission.permitted_facilities(redirect_on_error=False)
manage_facility_box = ""
if permitted_facilities:
facility_list = s3_represent_facilities(db, permitted_facilities,
link=False)
facility_opts = [OPTION(opt[1], _value = opt[0])
for opt in facility_list]
if facility_list:
manage_facility_box = DIV(H3(T("Manage Your Facilities")),
SELECT(_id = "manage_facility_select",
_style = "max-width:400px;",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
#_disabled = "disabled",
_id = "manage_facility_btn",
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft")
response.s3.jquery_ready.append( """
$('#manage_facility_select').change(function() {
$('#manage_facility_btn').attr('href', S3.Ap.concat('/default/site/', $('#manage_facility_select').val()));
})""" )
else:
manage_facility_box = DIV()
org_box = DIV( H3(T("Organizations")),
A(T("Add Organization"),
_href = URL(c="org", f="organisation",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right: 10px;"),
org_items["items"],
_id = "org_box",
_class = "menu_box fleft"
)
else:
manage_facility_box = ""
org_box = ""
# @ToDo: Replace this with an easily-customisable section on the homepage
#settings = db(db.s3_setting.id == 1).select(limitby=(0, 1)).first()
#if settings:
# admin_name = settings.admin_name
# admin_email = settings.admin_email
# admin_tel = settings.admin_tel
#else:
# # db empty and prepopulate is false
# admin_name = T("Sahana Administrator").xml(),
# admin_email = "support@Not Set",
# admin_tel = T("Not Set").xml(),
# Login/Registration forms
self_registration = deployment_settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in session.s3.roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
request.args = ["register"]
if deployment_settings.get_terms_of_service():
auth.messages.submit_button = T("I accept. Create my account.")
else:
auth.messages.submit_button = T("Register")
register_form = auth()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
# Add client-side validation
s3_register_validation()
if session.s3.debug:
response.s3.scripts.append( "%s/jquery.validate.js" % s3_script_dir )
else:
response.s3.scripts.append( "%s/jquery.validate.min.js" % s3_script_dir )
if request.env.request_method == "POST":
post_script = """// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');"""
else:
post_script = ""
register_script = """
// Change register/login links to avoid page reload, make back button work.
$('#register-btn').attr('href', '#register');
$('#login-btn').attr('href', '#login');
%s
// Redirect Register Button to unhide
$('#register-btn').click(function() {
// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');
});
// Redirect Login Button to unhide
$('#login-btn').click(function() {
// Hide register form
$('#register_form').addClass('hide');
// Unhide login form
$('#login_form').removeClass('hide');
});""" % post_script
response.s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system" % \
dict(login=B(T("login")))))))
if deployment_settings.frontpage.rss:
response.s3.external_stylesheets.append( "http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css" )
response.s3.scripts.append( "http://www.google.com/jsapi?key=notsupplied-wizard" )
response.s3.scripts.append( "http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js" )
counter = 0
feeds = ""
for feed in deployment_settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title: '%s',\n" % feed["title"],
"url: '%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(deployment_settings.frontpage.rss):
feeds += ",\n"
feed_control = "".join(("""
function LoadDynamicFeedControl() {
var feeds = [
""", feeds, """
];
var options = {
// milliseconds before feed is reloaded (5 minutes)
feedCycleTime : 300000,
numResults : 5,
stacked : true,
horizontal : false,
title : '""", str(T("News")), """'
};
new GFdynamicFeedControl(feeds, 'feed-control', options);
}
// Load the feeds API and set the onload callback.
google.load('feeds', '1');
google.setOnLoadCallback(LoadDynamicFeedControl);"""))
response.s3.js_global.append( feed_control )
return dict(title = title,
item = item,
sit_dec_res_box = sit_dec_res_box,
facility_box = facility_box,
manage_facility_box = manage_facility_box,
org_box = org_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
#admin_name=admin_name,
#admin_email=admin_email,
#admin_tel=admin_tel,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# -----------------------------------------------------------------------------
def organisation():
"""
Function to handle pagination for the org list on the homepage
"""
table = db.org_organisation
table.id.label = T("Organization")
table.id.represent = organisation_represent
response.s3.dataTable_sPaginationType = "two_button"
response.s3.dataTable_sDom = "rtip" #"frtip" - filter broken
response.s3.dataTable_iDisplayLength = 25
s3mgr.configure("org_organisation",
listadd = False,
addbtn = True,
super_entity = db.pr_pentity,
linkto = "/%s/org/organisation/%s" % (request.application,
"%s"),
list_fields = ["id",])
return s3_rest_controller("org", "organisation")
# -----------------------------------------------------------------------------
def site():
"""
@todo: Avoid redirect
"""
s3mgr.load("org_site")
if len(request.args):
site_id = request.args[0]
site_r = db.org_site[site_id]
tablename = site_r.instance_type
table = s3db.table(tablename)
if table:
query = (table.site_id == site_id)
id = db(query).select(db[tablename].id,
limitby = (0, 1)).first().id
cf = tablename.split("_", 1)
redirect(URL(c = cf[0],
f = cf[1],
args = [id]))
raise HTTP(404)
# -----------------------------------------------------------------------------
def message():
#if "verify_email_sent" in request.args:
title = T("Account Registered - Please Check Your Email")
message = T( "%(system_name)s has sent an email to %(email)s to verify your email address.\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters." )\
% {"system_name": deployment_settings.get_system_name(),
"email": request.vars.email}
image = "email_icon.png"
return dict(title = title,
message = message,
image_src = "/%s/static/img/%s" % (request.application, image)
)
# -----------------------------------------------------------------------------
def rapid():
""" Set/remove rapid data entry flag """
val = request.vars.get("val", True)
if val == "0":
val = False
else:
val = True
session.s3.rapid_data_entry = val
response.view = "xml.html"
return dict(item=str(session.s3.rapid_data_entry))
# -----------------------------------------------------------------------------
def user_profile_onaccept(form):
""" Update the UI locale from user profile """
if form.vars.language:
session.s3.language = form.vars.language
return
# -----------------------------------------------------------------------------
def user():
""" Auth functions based on arg. See gluon/tools.py """
auth.settings.on_failed_authorization = URL(f="error")
_table_user = auth.settings.table_user
if request.args and request.args(0) == "profile":
#_table_user.organisation.writable = False
_table_user.utc_offset.readable = True
_table_user.utc_offset.writable = True
# If we have an opt_in and some post_vars then update the opt_in value
if deployment_settings.get_auth_opt_in_to_email() and request.post_vars:
opt_list = deployment_settings.get_auth_opt_in_team_list()
removed = []
selected = []
for opt_in in opt_list:
if opt_in in request.post_vars:
selected.append(opt_in)
else:
removed.append(opt_in)
ptable = s3db.pr_person
putable = s3db.pr_person_user
query = (putable.user_id == request.post_vars.id) & \
(putable.pe_id == ptable.pe_id)
person_id = db(query).select(ptable.id, limitby=(0, 1)).first().id
db(ptable.id == person_id).update(opt_in = selected)
g_table = s3db["pr_group"]
gm_table = s3db["pr_group_membership"]
# Remove them from any team they are a member of in the removed list
for team in removed:
query = (g_table.name == team) & \
(gm_table.group_id == g_table.id) & \
(gm_table.person_id == person_id)
gm_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
if gm_rec:
db(gm_table.id == gm_rec.id).delete()
# Add them to the team (if they are not already a team member)
for team in selected:
query = (g_table.name == team) & \
(gm_table.group_id == g_table.id) & \
(gm_table.person_id == person_id)
gm_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
if not gm_rec:
query = (g_table.name == team)
team_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
# if the team doesn't exist then add it
if team_rec == None:
team_id = g_table.insert(name = team, group_type = 5)
else:
team_id = team_rec.id
gm_table.insert(group_id = team_id,
person_id = person_id)
auth.settings.profile_onaccept = user_profile_onaccept
self_registration = deployment_settings.get_security_self_registration()
login_form = register_form = None
if request.args and request.args(0) == "login":
auth.messages.submit_button = T("Login")
form = auth()
login_form = form
if s3.crud.submit_style:
form[0][-1][1][0]["_class"] = s3.crud.submit_style
elif request.args and request.args(0) == "register":
if not self_registration:
session.error = T("Registration not permitted")
redirect(URL(f="index"))
if deployment_settings.get_terms_of_service():
auth.messages.submit_button = T("I accept. Create my account.")
else:
auth.messages.submit_button = T("Register")
# Default the profile language to the one currently active
_table_user.language.default = T.accepted_language
form = auth()
register_form = form
# Add client-side validation
s3_register_validation()
elif request.args and request.args(0) == "change_password":
form = auth()
elif request.args and request.args(0) == "profile":
if deployment_settings.get_auth_openid():
form = DIV(form, openid_login_form.list_user_openids())
else:
form = auth()
# add an opt in clause to receive emails depending on the deployment settings
if deployment_settings.get_auth_opt_in_to_email():
ptable = s3db.pr_person
ltable = s3db.pr_person_user
opt_list = deployment_settings.get_auth_opt_in_team_list()
query = (ltable.user_id == form.record.id) & \
(ltable.pe_id == ptable.pe_id)
db_opt_in_list = db(query).select(ptable.opt_in, limitby=(0, 1)).first().opt_in
for opt_in in opt_list:
field_id = "%s_opt_in_%s" % (_table_user, opt_list)
if opt_in in db_opt_in_list:
checked = "selected"
else:
checked = None
form[0].insert(-1,
TR(TD(LABEL("Receive %s updates:" % opt_in,
_for="opt_in",
_id=field_id + SQLFORM.ID_LABEL_SUFFIX),
_class="w2p_fl"),
INPUT(_name=opt_in, _id=field_id, _type="checkbox", _checked=checked),
_id=field_id + SQLFORM.ID_ROW_SUFFIX))
else:
# Retrieve Password
form = auth()
# Use Custom Ext views
# Best to not use an Ext form for login: can't save username/password in browser & can't hit 'Enter' to submit!
#if request.args(0) == "login":
# response.title = T("Login")
# response.view = "auth/login.html"
return dict(form=form,
login_form=login_form,
register_form=register_form,
self_registration=self_registration)
# -----------------------------------------------------------------------------
def facebook():
""" Login using Facebook """
if not auth.settings.facebook:
redirect(URL(f="user", args=request.args, vars=request.vars))
auth.settings.login_form = s3base.FaceBookAccount()
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
def google():
""" Login using Google """
if not auth.settings.google:
redirect(URL(f="user", args=request.args, vars=request.vars))
auth.settings.login_form = s3base.GooglePlusAccount()
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
def source():
""" RESTful CRUD controller """
return s3_rest_controller("s3", "source")
# -----------------------------------------------------------------------------
# About Sahana
def apath(path=""):
""" Application path """
import os
from gluon.fileutils import up
opath = up(request.folder)
#TODO: This path manipulation is very OS specific.
while path[:3] == "../": opath, path=up(opath), path[3:]
return os.path.join(opath,path).replace("\\", "/")
def about():
"""
The About page provides details on the software dependencies and
versions available to this instance of Sahana Eden.
@ToDo: Avoid relying on Command Line tools which may not be in path
- pull back info from Python modules instead?
"""
import sys
import subprocess
import string
python_version = sys.version
web2py_version = open(apath("../VERSION"), "r").read()[8:]
sahana_version = open(os.path.join(request.folder, "VERSION"), "r").read()
# Database
sqlite_version = None
mysql_version = None
mysqldb_version = None
pgsql_version = None
psycopg_version = None
if db_string[0].find("sqlite") != -1:
try:
import sqlite3
#sqlite_version = (subprocess.Popen(["sqlite3", "-version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()
sqlite_version = sqlite3.version
except:
sqlite_version = T("Unknown")
elif db_string[0].find("mysql") != -1:
try:
mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
except:
mysql_version = T("Unknown")
try:
import MySQLdb
mysqldb_version = MySQLdb.__revision__
except:
mysqldb_version = T("Not installed or incorrectly configured.")
else:
# Postgres
try:
pgsql_reply = (subprocess.Popen(["psql", "--version"], stdout=subprocess.PIPE).communicate()[0])
pgsql_version = string.split(pgsql_reply)[2]
except:
pgsql_version = T("Unknown")
try:
import psycopg2
psycopg_version = psycopg2.__version__
except:
psycopg_version = T("Not installed or incorrectly configured.")
# Libraries
try:
import reportlab
reportlab_version = reportlab.Version
except:
reportlab_version = T("Not installed or incorrectly configured.")
try:
import xlwt
xlwt_version = xlwt.__VERSION__
except:
xlwt_version = T("Not installed or incorrectly configured.")
return dict(
python_version=python_version,
sahana_version=sahana_version,
web2py_version=web2py_version,
sqlite_version=sqlite_version,
mysql_version=mysql_version,
mysqldb_version=mysqldb_version,
pgsql_version=pgsql_version,
psycopg_version=psycopg_version,
reportlab_version=reportlab_version,
xlwt_version=xlwt_version
)
# -----------------------------------------------------------------------------
def help():
""" Custom View """
response.title = T("Help")
return dict()
# -----------------------------------------------------------------------------
def contact():
"""
Give the user options to contact the site admins.
Either:
An internal Support Requests database
or:
Custom View
"""
if auth.is_logged_in() and deployment_settings.has_module("support"):
# Provide an internal Support Requests ticketing system.
prefix = "support"
resourcename = "req"
tablename = "%s_%s" % (prefix, resourcename)
table = s3db[tablename]
# Pre-processor
def prep(r):
if r.interactive:
# Only Admins should be able to update ticket status
status = table.status
actions = table.actions
if not auth.s3_has_role(ADMIN):
status.writable = False
actions.writable = False
if r.method != "update":
status.readable = False
status.writable = False
actions.readable = False
actions.writable = False
return True
response.s3.prep = prep
output = s3_rest_controller(prefix, resourcename)
return output
else:
# Default: Simple Custom View
response.title = T("Contact us")
return dict()
# END =========================================================================
| mit | -8,412,861,163,105,064,000 | 40.216945 | 225 | 0.501557 | false | 4.23924 | false | false | false |
reiven/pungabot | util/ptime.py | 3 | 1715 | #!/usr/bin/python
# -*- coding: iso8859-1 -*-
## (c)2004 Timo Reunanen <parker _et_ wolfenstein _dit_ org>
import time
import re
_exact=r'''
^
(?P<hour> \d{1,2}) ## hour
[:.]
(?P<min> \d{2}) ## minutes
(?:
[:.]
(?P<sec>\d{2} ) ## secods (optional)
)?
$
'''
_add=r'''
^
[+]
(?: ## hour
(?P<hour> \d+)+h ## syntax: 1234h
)? ## optional
\s*
(?: ## minutes
(?P<min> \d+)+m ## syntax: 1234m
)? ## optional
\s*
(?: ## seconds
(?P<sec> \d+)+s? ## syntax: 1234s or 1234
)? ## optional
$
'''
exactRe=re.compile(_exact, re.VERBOSE | re.MULTILINE | re.I)
addRe=re.compile(_add, re.VERBOSE | re.MULTILINE | re.I)
class TimeException(Exception): pass
def convert(s):
s=s.strip()
m=exactRe.match(s)
if m:
tm=time.time()
year, mon, mday, hour, min, sec, wday, yday, isdst = time.localtime(tm)
hour=int(m.group('hour'))
min=int(m.group('min'))
sec=int(m.group('sec') or '00')
ret=time.mktime( (year, mon, mday, hour, min, sec, wday, yday, isdst) )
while ret < tm:
ret += 86400
return ret
m=addRe.match(s)
if m:
hour=int(m.group('hour') or '0')
min=int(m.group('min') or '0')
sec=int(m.group('sec') or '0')
addSecs=hour*3600 + min*60 + sec
return time.time()+addSecs
raise TimeException('Invalid syntax')
if __name__=='__main__':
year, mon, mday, hour, min, sec, wday, yday, isdst = time.localtime()
print (hour, min, sec)
print time.time()-time.mktime(time.localtime())
print convert('11.23')-time.time()
| gpl-3.0 | 5,110,685,968,079,071,000 | 20.708861 | 79 | 0.498542 | false | 2.877517 | false | false | false |
michellab/Sire | wrapper/Tools/AmberLoader.py | 2 | 31281 | #!/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
from Sire.IO import *
from Sire.Mol import *
from Sire.CAS import *
from Sire.System import *
from Sire.Move import *
from Sire.MM import *
from Sire.FF import *
from Sire.Units import *
from Sire.Vol import *
from Sire.Maths import *
from Sire.Base import *
from Sire.Qt import *
from Sire.ID import *
from Sire.Config import *
import Sire.Stream
from Sire.Tools import Parameter, resolveParameters
from Sire.Tools.WaterChanger import convertTip3PtoTip4P
###################################
# Parameters used by this module #
###################################
dobonds = Parameter("move bonds", True, """Whether or not to move the ligands bonds""")
doangles = Parameter("move angles", True, """Whether or not to move the ligands angles""")
dodihedrals = Parameter("move dihedrals", True, """Whether or not to move the ligands dihedrals""")
water_model = Parameter("water model", None,
"""The water model to use. Note, by default the water model is read from
the protein and water crd/top files. If you want to force a change
in water model, then set it here, e.g. if you are loading a TIP3P box
but want to use TIP4P, then set this parameter to "tip4p".""")
BASE_DIHEDRALH_FLEX = Parameter("h dihedral flex", 30*degrees, "Base dihedral rotation for H")
BASE_DIHEDRAL_FLEX = Parameter("dihedral flex", 20*degrees, "Base dihedral rotation")
BASE_ANGLE_FLEX = Parameter("angle flex", 0.25*degrees, "Base angle rotation")
BASE_BOND_FLEX = Parameter("bond flex", 0.025*angstroms, "Base bond stretch amount")
BASE_TRANSLATION = Parameter("translation", 0.75*angstroms, "Base translation delta amount")
BASE_ROTATION = Parameter("rotation", 30*degrees, "Base rigid body rotation")
BASE_MAXVAR = Parameter("maxvar", 10, "Maximum number of degrees of freedom to move at once")
BASE_MAXVAR_B = Parameter("maxvar bonds", 2, "Maximum number of bonds to move at once")
BASE_MAXVAR_A = Parameter("maxvar angles", 4, "Maximum number of angles to move at once")
BASE_MAXVAR_D = Parameter("maxvar dihedrals", 4, "Maximum number of dihedrals to move at once")
###################################
def getResidueNames(molecule):
nres = molecule.nResidues()
resnams = []
for i in range(0, nres):
resnams.append( str( molecule.residue(ResIdx(i)).name().value()).upper() )
return resnams
class NamingScheme:
def __init__(self):
self._protein_names = ["GLH", "ILE", "GLN", "GLY", "GLU",
"CYS", "HIS", "HID", "SER", "LYS",
"LYN", "PRO", "CYX", "HIE", "ASH",
"ASN", "HIP", "VAL", "THR", "ASP",
"TRP", "PHE", "ALA", "MET", "LEU",
"ARG", "TYR", "NME", "ACE"]
self._water_names = [ "WAT", "T3P", "T4P", "HOH" ]
self._ion_names = [ "NA+", "Na+", "CA+", "Ca+", "CAL", "CL-", "Cl-" ]
self._solute_names = [ "LIG" ]
def proteinsGroupName(self):
return MGName("protein")
def solutesGroupName(self):
return MGName("solute")
def solventsGroupName(self):
return MGName("solvent")
def watersGroupName(self):
return MGName("water")
def ionsGroupName(self):
return MGName("ions")
def allMoleculesGroupName(self):
return MGName("all")
def fixedMoleculesGroupName(self):
return MGName("fixed_molecules")
def boundaryMoleculesGroupName(self):
return MGName("boundary_molecules")
def mobileProteinSidechainsGroupName(self):
return MGName("protein_sidechains")
def mobileProteinBackbonesGroupName(self):
return MGName("protein_backbones")
def mobileSolutesGroupName(self):
return MGName("mobile_solutes")
def mobileSolventsGroupName(self):
return MGName("mobile_solvents")
def addProteinResidueName(self, name):
self._protein_names.append( name.upper() )
def addWaterResidueName(self, name):
self._water_names.append( name.upper() )
def addSoluteResidueName(self, name):
self._solute_names.append( name.upper() )
def addIonResidueName(self, name):
self._ion_names.append( name.upper() )
def proteinResidueNames(self):
return self._protein_names
def waterResidueNames(self):
return self._water_names
def soluteResidueNames(self):
return self._solute_names
def ionResidueNames(self):
return self._ion_names
def setProteinResidueNames(self, names):
self._protein_names = []
for name in names:
self.addProteinResidueName(name)
def setWaterResidueNames(self, names):
self._water_names = []
for name in names:
self.addWaterResidueName(name)
def setSoluteResidueNames(self, name):
self._solute_names = []
for name in names:
self.addSoluteResidueName(name)
def setIonResidueNames(self, name):
self._ion_names = []
for name in names:
self.addIonResidueName(name)
def _isType(self, molecule, names, max_residues = None):
try:
resnams = getResidueNames(molecule)
except:
resnams = molecule
if max_residues:
if len(resnams) > max_residues:
return False
for resnam in resnams:
if resnam in names:
return True
try:
if str(molecule.name().value()).upper() in names:
return True
else:
return False
except:
return False
def isProtein(self, molecule):
return self._isType(molecule, self._protein_names)
def isWater(self, molecule):
return self._isType(molecule, self._water_names, 1)
def isIon(self, molecule):
return self._isType(molecule, self._ion_names, 1)
def isSolute(self, molecule):
return self._isType(molecule, self._solute_names)
def findMolecule(system, molname):
molecules = system.molecules()
molname = molname.upper()
for molnum in molecules.molNums():
molecule = molecules[molnum][0].molecule()
if str(molecule.name().value()).upper() == molname:
return molecule
resnams = getResidueNames(molecule)
for resnam in resnams:
if resnam == molname:
return molecule
return None
def addMoleculeToSystem(molecule, system, naming_scheme = NamingScheme()):
"""This function adds the passed molecule to the passed system
using the passed naming scheme to assign the molecule to the
correct molecule group"""
resnams = getResidueNames(molecule)
system.add(molecule, MGName(naming_scheme.allMoleculesGroupName().value()))
if naming_scheme.isSolute(resnams):
system.add(molecule, MGName(naming_scheme.solutesGroupName().value()))
elif naming_scheme.isProtein(resnams):
system.add(molecule, MGName(naming_scheme.proteinsGroupName().value()))
elif naming_scheme.isWater(resnams):
system.add(molecule, MGName(naming_scheme.watersGroupName().value()))
system.add(molecule, MGName(naming_scheme.solventsGroupName().value()))
elif naming_scheme.isIon(resnams):
system.add(molecule, MGName(naming_scheme.ionsGroupName().value()))
system.add(molecule, MGName(naming_scheme.solventsGroupName().value()))
elif molecule.nResidues() == 1:
system.add(molecule, MGName(naming_scheme.solventsGroupName().value()))
else:
system.add(molecule, MGName(naming_scheme.solutesGroupName().value()))
def createSystemFrom(molecules, space, system_name, naming_scheme = NamingScheme()):
"""Create a new System from the passed molecules and space,
sorting the molecules into different molecule groups based on the
passed naming scheme"""
system = System(system_name)
# If requested, change the water model for all water molecules
if water_model.val == "tip4p":
molnums = molecules.molNums()
new_molecules = Molecules()
print("Forcing all water molecules to use the %s water model..." % water_model.val)
print("Converting %d molecules..." % len(molnums))
i = 0
for molnum in molnums:
molecule = molecules[molnum].molecule()
if i % 100 == 0:
print("%d" % i)
sys.stdout.flush()
elif i % 10 == 0:
print(".", end=' ')
sys.stdout.flush()
i += 1
if molecule.nAtoms() == 3:
# this could be a TIP3P water
resname =str(molecule.residue().name().value()).lower()
if resname == "wat" or resname == "t3p":
new_molecule = convertTip3PtoTip4P(molecule)
if new_molecule:
molecule = new_molecule
new_molecules.add(molecule)
print("%d" % i)
molecules = new_molecules
nmols = molecules.nMolecules()
print("Number of molecules == %s" % nmols)
print("System space == %s" % space)
if nmols == 0:
return system
print("Assigning molecules to molecule groups...")
solute_group = MoleculeGroup(naming_scheme.solutesGroupName().value())
protein_group = MoleculeGroup(naming_scheme.proteinsGroupName().value())
solvent_group = MoleculeGroup(naming_scheme.solventsGroupName().value())
water_group = MoleculeGroup(naming_scheme.watersGroupName().value())
ion_group = MoleculeGroup(naming_scheme.ionsGroupName().value())
all_group = MoleculeGroup(naming_scheme.allMoleculesGroupName().value())
# The all molecules group has all of the molecules
all_group.add(molecules)
system.add(all_group)
# Run through each molecule and decide what type it is...
molnums = molecules.molNums()
molnums.sort()
central_molecule = None
solutes = []
proteins = []
solvents = []
waters = []
ions = []
for molnum in molnums:
molecule = molecules[molnum].molecule()
resnams = getResidueNames(molecule)
if naming_scheme.isSolute(resnams):
solutes.append(molecule)
elif naming_scheme.isProtein(resnams):
proteins.append(molecule)
elif naming_scheme.isWater(resnams):
waters.append(molecule)
elif naming_scheme.isIon(resnams):
ions.append(molecule)
elif molecule.nResidues() == 1:
solvents.append(molecule)
else:
solutes.append(molecule)
# Ok - we have now divided everything up into groups
for solute in solutes:
solute_group.add(solute)
for protein in proteins:
protein_group.add(protein)
for water in waters:
solvent_group.add(water)
water_group.add(water)
for solvent in solvents:
solvent_group.add(solvent)
for ion in ions:
solvent_group.add(ion)
ion_group.add(ion)
if solute_group.nMolecules() > 0:
system.add(solute_group)
if protein_group.nMolecules() > 0:
system.add(protein_group)
if solvent_group.nMolecules() > 0:
system.add(solvent_group)
if water_group.nMolecules() > 0:
system.add(water_group)
if ion_group.nMolecules() > 0:
system.add(ion_group)
print("Number of solute molecules == %s" % solute_group.nMolecules())
print("Number of protein molecules == %s" % protein_group.nMolecules())
print("Number of ions == %s" % ion_group.nMolecules())
print("Number of water molecules == %s" % water_group.nMolecules())
print("Number of solvent molecules == %s" % solvent_group.nMolecules())
print("(solvent group is waters + ions + unidentified single-residue molecules)")
system.setProperty("space", space)
system.add( SpaceWrapper( Vector(0), all_group ) )
system.applyConstraints()
print("Returning the constructed system")
return system
def createSystem(top_file, crd_file, naming_scheme = NamingScheme()):
"""Create a new System from the molecules read in from the passed amber
topology and coordinate files. This sorts the molecules into different
molecule groups based on the passed naming scheme"""
system = MoleculeParser.read(top_file,crd_file)
# Load all of the molecules and their parameters from
# the topology and coordinate files
print("Loading the molecules from the files \"%s\" and \"%s\"..." % \
(crd_file, top_file))
return createSystemFrom(system[MGIdx(0)], system.property("space"), top_file, naming_scheme)
def centerSystem(system, molecule):
print("Setting the origin of the system to the center of molecule %s (%s)..." % (molecule, molecule.number()))
center = molecule.evaluate().centerOfMass()
print("This requires translating everything by %s..." % (-center))
moved_mols = Molecules()
for molnum in system.molNums():
molecule = system[molnum][0].molecule()
molecule = molecule.move().translate(-center).commit()
moved_mols.add(molecule)
system.update(moved_mols)
return system
def guessTranslation( solute ):
natoms = solute.nAtoms()
return (BASE_TRANSLATION.val) / ( natoms / 5 + 1)
def guessRotation( solute ):
natoms = solute.nAtoms()
sphere_radius = solute.evaluate().boundingSphere().radius()
return (BASE_ROTATION.val) / ( sphere_radius ** 2)
def generateFlexibility(solute):
connectivity = solute.property('connectivity')
all_bonds = connectivity.getBonds()
all_angles = connectivity.getAngles()
all_dihedrals = connectivity.getDihedrals()
flexibility = Flexibility(solute)
flexibility.setRotation( guessRotation(solute) )
flexibility.setTranslation( guessTranslation(solute) )
try:
flexibility.setMaximumVar( BASE_MAXVAR.val )
except:
flexibility.setMaximumBondVar( BASE_MAXVAR_B.val )
flexibility.setMaximumAngleVar( BASE_MAXVAR_A.val )
flexibility.setMaximumDihedralVar( BASE_MAXVAR_D.val )
# Redundant torsions are discarded according to the following algorithm
# 1) Do not sample a torsion at0-at1-at2-at3 if a variable torsion has
# already been defined around at1-at2 or at2-at1.
# 2) Do not sample a torsion if it would break a ring
#
if dodihedrals.val:
var_dihedrals = []
for dihedral in all_dihedrals:
#print dihedral
tomove = True
# print dihedral
at0 = dihedral.atom0()
at1 = dihedral.atom1()
at2 = dihedral.atom2()
at3 = dihedral.atom3()
# See if a one of the variable dihedral
# already rotates around the same torsion
for vardih in var_dihedrals:
if ( ( at1 == vardih.atom1() and at2 == vardih.atom2() ) or
( at2 == vardih.atom1() and at1 == vardih.atom2() ) ):
# Yes so will not move this torsion
tomove = False
break
# If still wondering...See if a rotation around this dihedral would break a ring
if tomove:
try:
dihbond = BondID(at1, at2)
#print dihbond
solute.move().change(dihbond,1*degrees)
except UserWarning as error:
# extract the type of the errror
error_type = re.search(r"(Sire\w*::\w*)", str(error)).group(0)
if error_type == "SireMol::ring_error":
# print "This dof would move a ring and is therefore skipped"
tomove = False
else:
# re-throw the exception
raise error
if tomove:
# Find out how many atoms would move
#print dihedral
gr0, gr1 = connectivity.split(at1, at2)
ngr0 = gr0.nSelected()
ngr1 = gr1.nSelected()
if (ngr0 <= ngr1):
smallgroup = gr0
else:
smallgroup = gr1
smallgroup = smallgroup.subtract(at1)
smallgroup = smallgroup.subtract(at2)
factor = smallgroup.nSelected()
flexibility.add(dihedral, BASE_DIHEDRAL_FLEX.val/factor)
var_dihedrals.append(dihedral)
# And the angles ....
if doangles.val:
moved_atoms = []
for angle in all_angles:
# print angle
at0 = angle.atom0()
at2 = angle.atom2()
# Do not sample that dof if an existing dof would already move this atom
if ( ( at0 in moved_atoms) and (at2 in moved_atoms) ):
continue
# Test if the angle breaks a ring, if so do not sample it
try:
solute.move().change(angle,1*degrees)
except UserWarning as error:
# extract the type of the errror
error_type = re.search(r"(Sire\w*::\w*)", str(error)).group(0)
if error_type == "SireMol::ring_error":
# print "This dof would move a ring and is therefore skipped"
continue
else:
# re-throw the exception
raise error
gr0, gr1 = connectivity.split(at0, angle.atom1(), at2)
ngr0 = gr0.nSelected()
ngr1 = gr1.nSelected()
if (ngr0 <= ngr1):
smallgroup = gr0
else:
smallgroup = gr1
factor = smallgroup.nSelected()
flexibility.add(angle, BASE_ANGLE_FLEX.val/factor)
if at0 not in moved_atoms:
moved_atoms.append(at0)
if at2 not in moved_atoms:
moved_atoms.append(at2)
# And the bonds...
if dobonds.val:
for bond in all_bonds:
try:
solute.move().change(bond,1*angstrom)
except UserWarning as error:
# extract the type of the errror
error_type = re.search(r"(Sire\w*::\w*)", str(error)).group(0)
if error_type == "SireMol::ring_error":
# print "This dof would move a ring and is therefore skipped"
continue
else:
# re-throw the exception
raise error
gr0, gr1 = connectivity.split(bond.atom0(), bond.atom1() )
ngr0 = gr0.nSelected()
ngr1 = gr1.nSelected()
if (ngr0 <= ngr1):
smallgroup = gr0
else:
smallgroup = gr1
factor = smallgroup.nSelected()
flexibility.add(bond, BASE_BOND_FLEX.val/factor)
return flexibility
def getCoordGroup(atoms, coords_property="coordinates"):
coords = []
for i in range(0, atoms.count()):
atom = atoms[i]
coords.append(atom.property(coords_property))
return CoordGroup(coords)
def getAtomNearCOG( molecule ):
mol_centre = molecule.evaluate().center()
mindist = 99999.0
for x in range(0, molecule.nAtoms()):
atom = molecule.atoms()[x]
at_coords = atom.property('coordinates')
dist = Vector().distance2(at_coords, mol_centre)
if dist < mindist:
mindist = dist
nearest_atom = atom
return nearest_atom
def addFlexibility(system, reflection_center=None, reflection_radius=None, \
naming_scheme=NamingScheme()):
print("Adding flexibility to the system...")
# create a group for all of the fixed molecules and residues
fixed_group = MoleculeGroup( naming_scheme.fixedMoleculesGroupName().value() )
# create a group for the fixed residues that are bonded to the mobile residues
boundary_group = MoleculeGroup( naming_scheme.boundaryMoleculesGroupName().value() )
if reflection_center is None or reflection_radius is None:
print ("No reflection radius or reflection molecule specified, so moving all "
"molecules and residues in the system.")
reflection_radius = None
reflection_center = None
else:
print(("Only moving molecules/residues that are within a distance %s A "
"of the point %s.") % (reflection_radius.value(), reflection_center))
system.setProperty("reflection center", AtomCoords(CoordGroup(1,reflection_center)))
system.setProperty("reflection sphere radius", VariantProperty(reflection_radius.to(angstroms)))
# fit the protein z-matrix templates to all of the protein molecules and add the mobile
# residues to the mobile_sc_group and mobile_bb_group for mobile sidechains and backbones
if naming_scheme.proteinsGroupName() in system.mgNames():
protein_group = system[naming_scheme.proteinsGroupName()]
# create a zmatrix maker that will be used to build the z-matrices for each protein molecule
zmat_maker = ZmatrixMaker()
zmat_maker.loadTemplates( os.path.join(parameter_directory, "amber.zmatrices") )
# now create the molecule groups that hold the flexible side chains and flexible backbone groups
mobile_sc_group = MoleculeGroup(naming_scheme.mobileProteinSidechainsGroupName().value())
mobile_bb_group = MoleculeGroup(naming_scheme.mobileProteinBackbonesGroupName().value())
# the extra atoms moved as part of a backbone move
hn_atoms = AtomName("N", CaseInsensitive) * AtomName("H", CaseInsensitive) * \
AtomName("HN", CaseInsensitive) * AtomName("HN1", CaseInsensitive) * \
AtomName("HN2", CaseInsensitive) * AtomName("HN3", CaseInsensitive)
# loop over each protein molecule
for molnum in protein_group.molNums():
protein_mol = protein_group[molnum].molecule()
print("Applying residue templates for protein %s" % molnum)
protein_mol = zmat_maker.applyTemplates(protein_mol)
system.update(protein_mol)
if reflection_radius:
space = Cartesian()
mobile_resnums = []
# only move side chains within "sc_radius" and backbones within "bb_radius" of the ligand molecule
print("Looking for which residues are within the reflection sphere...")
for i in range(0, protein_mol.nResidues()):
res = protein_mol.residue( ResIdx(i) )
distance = space.minimumDistance(CoordGroup(1,reflection_center), getCoordGroup(res.atoms()))
if distance < reflection_radius.value():
# add the residue to the mobile sidechains group
mobile_sc_group.add(res)
mobile_resnums.append( res.number() )
# now add the atoms needed from the residue to the mobile backbones group
atoms = protein_mol.select(ResIdx(i)).selection()
# for the backbone move to work, the residue must contain
# AtomName("CA", CaseInsensitive) and AtomName("N", CaseInsensitive) )
has_backbone = False
try:
if atoms.selected( AtomName("CA", CaseInsensitive) ) and \
atoms.selected( AtomName("N", CaseInsensitive) ):
has_backbone = True
except:
pass
if has_backbone:
if i < (protein_mol.nResidues()-1):
try:
atoms.deselect( hn_atoms + ResIdx(i) )
except:
pass
if i > 0:
try:
atoms.select( hn_atoms + ResIdx(i+1) )
except:
pass
mobile_bb_group.add( PartialMolecule(protein_mol, atoms) )
else:
print("Not moving backbone of %s as it doesn't contain atoms N or CA" % protein_mol.residue(ResIdx(i)))
# now loop over all of the residues and work out which ones are fixed, and which ones
# are bonded to fixed residues
connectivity = protein_mol.property("connectivity")
for i in range(0, protein_mol.nResidues()):
res = protein_mol.residue( ResIdx(i) )
if not res.number() in mobile_resnums:
# is this residue bonded to any of the mobile residues? If so, then it is a boundary residue
is_boundary = False
for bonded_res in connectivity.connectionsTo( res.number() ):
bonded_resnum = protein_mol.residue(bonded_res).number()
if bonded_resnum in mobile_resnums:
is_boundary = True
break
if is_boundary:
boundary_group.add(res)
else:
fixed_group.add(res)
else:
# assume that the backbone and side chains of all residues are flexible
for i in range(0, protein_mol.nResidues()):
res = protein_mol.residue( ResIdx(i) )
mobile_sc_group.add(res)
atoms = protein_mol.select(ResIdx(i)).selection()
if i < (protein_mol.nResidues()-1):
try:
atoms.deselect( hn_atoms + ResIdx(i) )
except:
pass
if i > 0:
try:
atoms.select( hn_atoms + ResIdx(i+1) )
except:
pass
mobile_bb_group.add( PartialMolecule(protein_mol, atoms) )
if mobile_sc_group.nMolecules() > 0:
system.add(mobile_sc_group)
if mobile_bb_group.nMolecules() > 0:
system.add(mobile_bb_group)
print("The number of residues with flexible sidechains equals %s" % mobile_sc_group.nViews())
print("The number of residues with flexible backbones equals %s" % mobile_bb_group.nViews())
print("The number of boundary residues equals %s" % boundary_group.nViews())
print("The number of fixed residues equals %s" % fixed_group.nViews())
# add all of the mobile solute molecules to the mobile_solute_group and auto-generate
# the z-matricies of all of the mobile solutes
if naming_scheme.solutesGroupName() in system.mgNames():
solute_group = system[naming_scheme.solutesGroupName()]
mobile_solute_group = MoleculeGroup( naming_scheme.mobileSolutesGroupName().value() )
# store the average solute translation and rotation deltas
avg_trans_delta = 0
avg_rot_delta = 0
for molnum in solute_group.molNums():
solute_mol = solute_group[molnum].molecule()
move_solute = True
# Only move the solute if it is within the sphere cutoff of the ligand (if a ligand and solvent
# radius have been specified...)
if reflection_radius:
move_solute = (Vector.distance(reflection_center, \
solute_mol.evaluate().centerOfMass()) < reflection_radius.value())
if move_solute:
print("\nAuto-detecting the flexible degrees of freedom for solute %s" % molnum)
# auto-generate the flexibility - bonds, angles and dihedrals
flexibility = generateFlexibility(solute_mol)
solute_mol = solute_mol.edit().setProperty("flexibility", flexibility).commit()
print("\nFlexibility of solute %s equals:" % molnum)
flex = solute_mol.property("flexibility")
print(flex)
avg_trans_delta += flex.translation().to(angstrom)
avg_rot_delta += flex.rotation().to(degrees)
system.update(solute_mol)
mobile_solute_group.add(solute_mol)
else:
print("Not moving solute %s as it is outside the spherical solvent cutoff of the ligand." % solute_mol)
fixed_group.add(solute_mol)
if mobile_solute_group.nMolecules() > 0:
system.add(mobile_solute_group)
system.setProperty("average solute translation delta", \
VariantProperty(avg_trans_delta / mobile_solute_group.nMolecules()))
system.setProperty("average solute rotation delta", \
VariantProperty(avg_rot_delta / mobile_solute_group.nMolecules()))
print("\nNumber of mobile solute molecules equals %s" % mobile_solute_group.nMolecules())
# add all of the mobile solvent molecules to the mobile_solvent_group
if naming_scheme.solventsGroupName() in system.mgNames():
solvent_group = system[ naming_scheme.solventsGroupName() ]
mobile_solvent_group = MoleculeGroup( naming_scheme.mobileSolventsGroupName().value() )
print("Adding flexibility to the solvent...")
if reflection_radius:
for molnum in solvent_group.molNums():
solvent_mol = solvent_group[molnum]
if Vector.distance(reflection_center, solvent_mol.evaluate().centerOfMass()) < reflection_radius.value():
mobile_solvent_group.add(solvent_mol)
else:
fixed_group.add(solvent_mol)
else:
mobile_solvent_group.add( solvent_group.molecules() )
if mobile_solvent_group.nMolecules() > 0:
system.add(mobile_solvent_group)
print("\nNumber of mobile solvent molecules equals %s" % mobile_solvent_group.nMolecules())
# All finished - just need to add in the fixed and boundary groups
if fixed_group.nMolecules() > 0:
system.add(fixed_group)
if boundary_group.nMolecules() > 0:
system.add(boundary_group)
print("\nNumber of fixed (or partially fixed) molecules equals %s" % fixed_group.nMolecules())
return system
def printGroupInfo(system, group_name):
try:
group = system[MGName(group_name)]
print("%s : nMolecules() == %d" % (str(group), group.nMolecules()))
except:
print("There is no group called \"%s\"" % group_name)
| gpl-2.0 | 2,624,787,477,966,018,000 | 35.581287 | 131 | 0.583304 | false | 3.97825 | false | false | false |
tshi04/machine-learning-codes | headGAN-ff/headgan.py | 1 | 3126 | import re
import numpy as np
import tensorflow as tf
from model import *
class headGAN(object):
def __init__(self, d_net, g_net, wordvec, article, title, wd_list):
print 'GAN headline'
self.wordvec = wordvec
self.article = article
self.title = title
self.d_net = d_net
self.g_net = g_net
self.wd_list = wd_list
self.sess = tf.InteractiveSession()
self.build_model()
self.train_model()
def build_model(self):
art_len = self.article.shape[1]
ttl_len = self.title.shape[1]
wd_dim = self.wordvec.shape[1]
self.in_art = tf.placeholder(tf.int32,[None, art_len])
self.in_ttl = tf.placeholder(tf.int32,[None, ttl_len])
r_art = tf.nn.embedding_lookup(self.wordvec, self.in_art)
r_art = tf.expand_dims(r_art, -1)
r_art = tf.transpose(r_art, [0,2,1,3], name='r_art')
self.r_art = r_art
self.r_ttl = tf.nn.embedding_lookup(self.wordvec, self.in_ttl)
self.r_ttl = tf.expand_dims(self.r_ttl, -1)
self.r_ttl = tf.transpose(self.r_ttl, [0,2,1,3], name='r_ttl')
self.f_ttl = self.g_net(input_data=r_art)
r_logits = self.d_net(input_data=self.r_ttl, reuse=False)
f_logits = self.d_net(input_data=self.f_ttl, reuse=True)
r_ent = tf.nn.sigmoid(r_logits)
f_ent = tf.nn.sigmoid(f_logits)
self.d_loss = tf.reduce_mean(r_ent) - tf.reduce_mean(f_ent)
self.g_loss = tf.reduce_mean(f_ent, name='g_loss')
self.g_var = self.g_net.vars
self.d_var = self.d_net.vars
self.opt_method = 'rmsprop'
if self.opt_method == 'rmsprop':
self.d_opt = tf.train.RMSPropOptimizer(0.01,decay=0.9).minimize(self.d_loss,var_list=self.d_var)
self.g_opt = tf.train.RMSPropOptimizer(0.01,decay=0.9).minimize(self.g_loss,var_list=self.g_var)
else:
self.d_opt = tf.train.AdamOptimizer().minimize(self.d_loss,var_list=self.d_var)
self.g_opt = tf.train.AdamOptimizer().minimize(self.g_loss,var_list=self.g_var)
def train_model(self):
self.sess.run(tf.global_variables_initializer())
k = 0
while k < 20000:
feed_ = {self.in_art: [self.article[k]], self.in_ttl: [self.title[k]]}
self.sess.run(self.g_opt, feed_dict=feed_)
self.sess.run(self.d_opt, feed_dict=feed_)
if k%1000 == 0:
print k, self.sess.run([self.d_loss, self.g_loss], feed_dict=feed_)
tt = self.sess.run(self.f_ttl, feed_dict=feed_)
xx = self.sess.run(self.r_art[0,:,:,0], feed_dict=feed_)
dd = self.sess.run(tf.matmul(tt[0,0], xx)))
idx = np.argmax(dd, axis=1).tolist()
for kk in idx:
print self.wd_list[self.article[k,kk]],
print
for kk in self.title[k]:
print self.wd_list[kk],
print
for kk in self.article[k]:
print self.wd_list[kk],
print
print
k += 1
if k == 19999:
k = 0
| gpl-3.0 | -7,351,620,951,786,911,000 | 34.931034 | 108 | 0.555982 | false | 2.943503 | false | false | false |
joelphillips/pypyramid | src/pypyr/assembly.py | 1 | 9235 | '''
Created on Aug 17, 2010
@author: joel
'''
import numpy
from pypyr.mesh import Basis, ElementFinder, ElementQuadrature, BoundaryQuadrature
import itertools as it
from pypyr.timing import *
def processIndices(basis, boundarytags):
""" Given a basis (a collection of elements) and a set of boundaries, extract the internal and external degrees of freedom
returns:
I: a sparse matrix that maps each the local degrees of freedom for each element to their global indices
boundaries: a map of tag->DegreeSet, which can be used to evaluate all the degrees on each boundary
internalidx: ids of the internal degrees of freedom
"""
import scipy.sparse as ss
indices = basis.getIndices()
n = basis.elementfactory.index # = max(indices)+1
I = ss.csr_matrix((numpy.ones_like(indices), indices, range(0,len(indices)+1)))
idxflag = numpy.ones(n, dtype=bool)
boundaries = {}
for tag in boundarytags:
bdy = basis.getBoundary(tag)
boundaries[tag] = bdy
if bdy: idxflag[bdy.indices] = False
internalidx = numpy.nonzero(idxflag)[0]
return I, boundaries, internalidx
def blockInnerProducts(quadweights, leftvalsiter, rightvalsiter, leftI, rightI):
""" Evaluate the inner product matrix
returns a sparse matrix equal to leftI.transpose * L.transpose * quadweights * R * rightI
where L and R are block diagonal matrices whose blocks are given by the iterables, leftvalsiter and rightvalsiter
If the left or right vals have more than 2 dimensions, the extra dimensions are multiplied and summed (tensor-contracted),
with broadcasting as necessary, i,e, this is an inner-product - it can't be used for a more general multiplication'
"""
import scipy.sparse as ss
data = []
idx = []
ip = [0]
for e, (leftvals, rightvals, weights) in enumerate(it.izip(leftvalsiter, rightvalsiter, quadweights)):
if len(weights):
lvs = len(leftvals.shape)
rvs = len(rightvals.shape)
vs = max(lvs,rvs)
leftvals = leftvals.reshape(leftvals.shape + (1,)*(vs - lvs))
rightvals = rightvals.reshape(rightvals.shape + (1,)*(vs - rvs))
lvw = leftvals * weights.reshape((-1,) + (1,)*(vs-1))
# print lvw.shape, rightvals.shape
data.append(numpy.tensordot(lvw, rightvals, ([0]+range(2,vs), [0]+range(2,vs))))
idx.append(e)
ip.append(len(idx))
# print e, idx, ip
V = ss.bsr_matrix((data, idx, ip),dtype=float, shape=(leftI.shape[0],rightI.shape[0]))
return leftI.transpose() * V * rightI
class System(object):
""" A System contains everything that's need to construct stiffness matrices and load vectors.
This is an abstract-ish class see SymmetricSystem and AsymmetricSystem for concrete implementations.
Parameters:
quadrule: a tuple of quadrature points and weights on the reference pyramid
meshevents: A function that produces mesh events
leftbasis, rightbasis: see pypyr.mesh.Basis
leftindexinfo, rightindexinfo: see processIndices
"""
def __init__(self, quadrule, meshevents, leftbasis, rightbasis, leftindexinfo, rightindexinfo):
self.elementfinder = meshevents(ElementFinder())
self.elementinfo = meshevents(ElementQuadrature())
self.boundaryquad = meshevents(BoundaryQuadrature())
self.refquadpoints, refweights = quadrule
self.quadweights = list(self.elementinfo.getWeights(self.refquadpoints, refweights))
self.leftbasis = leftbasis
self.rightbasis = rightbasis
self.leftI, self.leftbdys, self.leftintidx = leftindexinfo
self.rightI, self.rightbdys, self.rightintidx = rightindexinfo
def _transposeinplace(self):
""" Transpose this object """
self.leftbasis, self.rightbasis = self.rightbasis, self.leftbasis
self.leftI, self.rightI = self.rightI, self.leftI
self.leftbdys, self.rightbdys = self.rightbdys, self.leftbdys
self.leftintidx, self.rightintidx = self.rightintidx, self.leftintidx
return self
def processSystem(self, leftvalsiter, rightvalsiter):
""" Construct the (non-boundary aware) stiffness matrix """
return blockInnerProducts(self.quadweights, leftvalsiter, rightvalsiter, self.leftI, self.rightI)
def processBoundary(self, sysmat, tagtog):
""" Split the stiffness matrix into the internal and external parts. Evaluate boundary data
sysmat: system matrix (which will come from processSystem()).
tagtog: dictionary of functions to evaluate on the boundar(y|ies)
returns:
internalSystem: S[I,I] where I is the internal degrees
tagtoBoundarySystem: tag->S[I,E[tag]] where E[tag] gives the indices of the external degrees
tagtogvals: g[tag] evaluated at the degrees of freedom associated with boundary "tag".
Somewhat inefficient if there's a significant proportion of dofs on the boundary """
SI = sysmat[self.leftintidx, :]
internalSystem = SI[:,self.rightintidx]
tagtogvals = {}
tagtoBoundarySystem = {}
for tag, bdy in self.rightbdys.iteritems():
tagtogvals[tag] = bdy.evaluatedofs(tagtog[tag])
tagtoBoundarySystem[tag] = SI[:,bdy.indices]
return internalSystem, tagtoBoundarySystem, tagtogvals
def loadVector(self, f, deriv=False):
""" Calculate the load vector for the internal shape functions """
testvalsiter = self.leftbasis.getElementValues(self.refquadpoints, deriv)
fvalsiter = it.imap(f, self.elementinfo.getQuadPoints(self.refquadpoints))
return blockInnerProducts(self.quadweights, testvalsiter, fvalsiter, self.leftI, numpy.ones((self.elementinfo.numElements(), 1)))[self.leftintidx,:]
def boundaryLoad(self, tagtog, squarequad, trianglequad, deriv=False):
""" Calculate the load vector based on a boundary integral, e.g. for Dirichlet data in the dual formulation of the mixed laplacian"""
tagtogsys = {}
for tag, g in tagtog.iteritems():
x,w,n = zip(*self.boundaryquad.getQuadratures(tag, squarequad, trianglequad))
# print map(g,x,n)
# print map(lambda e,p: 0 if len(p) is 0 else e.values(p), self.leftbasis.elements, x)
fvalsiter = it.imap(g, x, n)
testvalsiter = it.imap(lambda e,p: 0 if len(p) is 0 else e.values(p), self.leftbasis.elements, x)
tagtogsys[tag] = blockInnerProducts(w, testvalsiter, fvalsiter, self.leftI, numpy.ones((self.elementinfo.numElements(), 1)))[self.leftintidx,:]
return tagtogsys
def evaluate(self, points, U, tagtoG = {}, deriv=False):
""" Evaluate a solution given by the coefficients of the internal degrees, U, at specified points.
tagtoG should be the coefficients for the external degrees"""
UG = numpy.zeros(self.rightbasis.elementfactory.index)
UG[self.rightintidx] = U
for tag, G in tagtoG.iteritems():
UG[self.rightbdys[tag].indices] = G
etop = self.elementfinder.elementPointMap(points)
UGvals = numpy.zeros((len(points), self.rightbasis.elements[0].ncpts))
for e, pids in zip(self.rightbasis.elements, etop):
if len(pids):
evals = e.derivs(points[pids]) if deriv else e.values(points[pids])
UGvals[pids] += numpy.tensordot(evals, UG[e.indices], ([1],[0]))
return UGvals
class SymmetricSystem(System):
""" A symmetric system"""
def __init__(self, elements, quadrule, meshevents, boundarytags):
self.basis = Basis(elements)
meshevents(self.basis)
indexinfo = processIndices(self.basis, boundarytags)
System.__init__(self, quadrule, meshevents, self.basis, self.basis, indexinfo, indexinfo)
self.elements = elements
def systemMatrix(self, deriv):
return super(SymmetricSystem, self).processSystem(*it.tee(self.basis.getElementValues(self.refquadpoints,deriv), 2))
class AsymmetricSystem(System):
""" An Asymmetric system"""
def __init__(self, leftelements, rightelements, quadrule, meshevents, leftboundarytags, rightboundarytags):
leftbasis = Basis(leftelements)
rightbasis = Basis(rightelements)
meshevents(leftbasis)
meshevents(rightbasis)
super(AsymmetricSystem, self).__init__(quadrule, meshevents, leftbasis, rightbasis, processIndices(leftbasis, leftboundarytags), processIndices(rightbasis, rightboundarytags))
def systemMatrix(self, leftderiv, rightderiv):
leftvals = self.leftbasis.getElementValues(self.refquadpoints, leftderiv)
rightvals = self.rightbasis.getElementValues(self.refquadpoints, rightderiv)
return super(AsymmetricSystem, self).processSystem(leftvals, rightvals)
def transpose(self):
import copy
return copy.copy(self)._transposeinplace()
| bsd-3-clause | 246,970,839,694,353,700 | 49.741758 | 183 | 0.663996 | false | 3.797286 | false | false | false |
JaneliaSciComp/Neuroptikon | Source/Scripts/C. elegans/Centrality.py | 1 | 2516 | # Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
"""
A custom centrality script for the C. elegans network.
"""
import networkx
# Load the neurons and their interconnections if needed.
if not any(network.objects):
execfile('Connectivity.py')
def progressCallback(fraction_complete = None):
return updateProgress('Calculating centrality...', fraction_complete)
# Compute the centrality of each node in the graph. (uncomment one of the following)
#centralities = networkx.degree_centrality(network.simplifiedGraph())
#centralities = networkx.closeness_centrality(network.simplifiedGraph(), weighted_edges = True, progress_callback = progressCallback)
centralities = networkx.betweenness_centrality(network.simplifiedGraph(), weighted_edges = True, progress_callback = progressCallback)
#centralities = networkx.load_centrality(network.simplifiedGraph(), weighted_edges = True, progress_callback = progressCallback)
if any(centralities):
# Compute the maximum centrality so we can normalize.
maxCentrality = max(centralities.itervalues())
# Alter the visualization of each node based on its centrality.
objectCentralities = {}
for node, centrality in centralities.iteritems():
object = network.objectWithId(node)
objectCentralities[object] = centrality / maxCentrality
diameter = 0.001 + objectCentralities[object] * 0.029
display.setVisibleSize(object, [diameter] * 3)
for synapse in network.synapses():
centrality = objectCentralities[synapse.preSynapticNeurite.neuron()]
for partner in synapse.postSynapticPartners:
centrality += objectCentralities[partner if isinstance(partner, Neuron) else partner.neuron()]
centrality /= 1 + len(synapse.postSynapticPartners)
display.setVisibleOpacity(synapse, centrality)
for gapJunction in network.gapJunctions():
centrality = 0.0
for neurite in gapJunction.neurites():
centrality += objectCentralities[neurite.neuron()]
centrality /= 2.0
display.setVisibleOpacity(gapJunction, centrality)
for innervation in network.innervations():
centrality = (objectCentralities[innervation.neurite.neuron()] + objectCentralities[innervation.muscle]) / 2.0
display.setVisibleOpacity(innervation, centrality)
| bsd-3-clause | 7,372,495,485,617,536,000 | 46.471698 | 134 | 0.738871 | false | 3.949765 | false | false | false |
cloudera/hue | desktop/core/ext-py/josepy-1.1.0/setup.py | 2 | 2983 | import io
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
version = '1.1.0'
# Please update tox.ini when modifying dependency version requirements
install_requires = [
# load_pem_private/public_key (>=0.6)
# rsa_recover_prime_factors (>=0.8)
'cryptography>=0.8',
# Connection.set_tlsext_host_name (>=0.13)
'PyOpenSSL>=0.13',
# For pkg_resources. >=1.0 so pip resolves it to a version cryptography
# will tolerate; see #2599:
'setuptools>=1.0',
'six>=1.9.0', # needed for python_2_unicode_compatible
]
testing_requires = [
'coverage>=4.0',
'pytest-cache>=1.0',
'pytest-cov',
'flake8',
'pytest-flake8>=0.5',
'pytest>=2.8.0',
'mock',
]
# env markers cause problems with older pip and setuptools
if sys.version_info < (2, 7):
install_requires.extend([
'argparse',
'ordereddict',
])
dev_extras = [
'pytest',
'tox',
]
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
with io.open('README.rst', encoding='UTF-8') as f:
long_description = f.read()
class PyTest(TestCommand):
user_options = []
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
setup(
name='josepy',
version=version,
description='JOSE protocol implementation in Python',
long_description=long_description,
url='https://github.com/certbot/josepy',
author="Certbot Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
install_requires=install_requires,
extras_require={
'dev': dev_extras,
'docs': docs_extras,
'tests': testing_requires,
},
entry_points={
'console_scripts': [
'jws = josepy.jws:CLI.run',
],
},
tests_require=testing_requires,
cmdclass={
'test': PyTest,
},
)
| apache-2.0 | -2,400,312,496,107,875,000 | 25.39823 | 78 | 0.60476 | false | 3.61138 | true | false | false |
LouisChen1905/OneAnalyser | src/one_analyse/request_code.py | 1 | 1851 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2016年4月10日
@author: chensi
'''
from sqlalchemy.orm.scoping import scoped_session
from sqlalchemy.orm.session import sessionmaker
from one_analyse import one_engine
from threadpool import ThreadPool, makeRequests
from one_analyse.lib.db.ormtables import OneORM
import urllib.request
import json
import codecs
from one_analyse.lib.db.ormtables import PeriodRecord
DBScopedSession = scoped_session(
sessionmaker(
autoflush=False,
autocommit=False,
bind=one_engine
)
)
code_url_format = "http://1.163.com/code/get.do?gid=424&period=%s&cid=%s"
def request_code(period_id, user_id, rid, num):
url = code_url_format % (period_id, user_id)
response = urllib.request.urlopen(url)
result = json.load(codecs.getreader("utf-8")(response))
codes = result['result']['list'][0]['code']
session = DBScopedSession()
session.query(PeriodRecord).\
filter(PeriodRecord.rid==rid).\
filter(PeriodRecord.period_id==period_id).\
filter(PeriodRecord.user_id==user_id).\
update({'codes':','.join(codes)})
session.commit()
DBScopedSession.close()
if __name__ == '__main__':
db2 = OneORM()
db2.InitDB()
# Initialize thread pool
tp = ThreadPool(50)
# Get all period records from database
session = DBScopedSession()
period_records = db2.get_period_records(session)
DBScopedSession.remove()
data = []
for r in period_records:
param_list = [r.period_id, r.user_id, r.rid, r.num]
data.append((param_list, []))
requests = makeRequests(request_code, data)
[tp.putRequest(req) for req in requests]
tp.wait() | mit | -4,420,334,246,598,185,500 | 28.774194 | 73 | 0.61897 | false | 3.667992 | false | false | false |
gwct/grampa | helper_scripts/grampa_plot.py | 1 | 3054 | import sys, os
############################################
def barPlot(xdata,ydata,xtitle,ytitle,maintitle,outname,barcol='rgb(0,102,51)',plotcol='#e1e1ea',bgcol='#fffae6',w=1000,h=1000,bmar=150):
data = [go.Bar(x=xdata,y=ydata,marker=dict(color=barcol),opacity=0.6)];
layout = go.Layout(
autosize=False,
width=w,
height=h,
paper_bgcolor=bgcol,
plot_bgcolor=plotcol,
title=maintitle,
titlefont=dict(
family="Arial, sans-serif",
size=30,
),
xaxis=dict(
title=xtitle,
titlefont=dict(
family="Arial, sans-serif",
size=20,
color="#737373"
),
),
yaxis=dict(
title=ytitle,
titlefont=dict(
family="Arial, sans-serif",
size=20,
color="#737373"
)
)
);
fig = go.Figure(data=data, layout=layout);
plot(fig, filename=outname);
############################################
def scatterPlot(xdata,ydata,xtitle,ytitle,maintitle,outname,barcol='rgb(0,102,51)',plotcol='#e1e1ea',bgcol='#fffae6',w=1000,h=500,bmar=150):
data = [go.Scatter(x=xdata,y=ydata,mode='markers',opacity=0.6)];
layout = go.Layout(
autosize=False,
width=w,
height=h,
margin=go.Margin(
l=70,
r=20,
b=150,
t=70,
pad=0
),
paper_bgcolor=bgcol,
plot_bgcolor=plotcol,
title=maintitle,
titlefont=dict(
family="Arial, sans-serif",
size=30,
),
xaxis=dict(
title=xtitle,
titlefont=dict(
family="Arial, sans-serif",
size=20,
color="#737373",
),
tickangle=90
),
yaxis=dict(
title=ytitle,
titlefont=dict(
family="Arial, sans-serif",
size=20,
color="#737373"
)
)
);
fig = go.Figure(data=data, layout=layout);
plot(fig, filename=outname);
############################################
if len(sys.argv) != 3 or "-h" in sys.argv:
print("\n# This is a beta version of this script and may be buggy.")
print("# Usage: grampa_plot.py [input file] [output file]");
print("# ---> [input file] must be a grampa output (_out.txt) file.")
print("# ---> [output file] will be an html file with your plot.\n")
sys.exit();
infilename = sys.argv[1];
outfilename = sys.argv[2];
if outfilename[len(outfilename)-5:] != ".html":
outfilename += ".html";
try:
from plotly.offline import plot
import plotly.graph_objs as go
import plotly.plotly as py
except:
sys.exit("Missing some of the required modules (plotly)")
# Option parsing and import of plot libraries if no errors.
score_dict = {};
for line in open(infilename):
if line[0] == "#" or "The" in line or "Score" in line:
continue;
line = line.strip().split("\t");
if line[0] == "ST":
score_dict[line[0]] = int(line[3]);
else:
score_dict[line[1] + "-" + line[2]] = int(line[4]);
sorted_keys = sorted(score_dict, key=score_dict.get)
sorted_vals = [];
max_len = -999;
for key in sorted_keys:
sorted_vals.append(score_dict[key]);
if len(key) > max_len:
max_len = len(key);
bot_margin = max_len * 15;
scatterPlot(sorted_keys,sorted_vals,"H1-H2 Node", "Score", "GRAMPA Results: " + infilename, outfilename, bmar=bot_margin);
| gpl-3.0 | 2,242,864,692,299,380,500 | 23.238095 | 140 | 0.609037 | false | 2.660279 | false | false | false |
tectronics/open-ihm | src/openihm/model/household.py | 3 | 3987 | #!/usr/bin/env python
"""
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
from database import Database
from householdmember_manager import HouseholdMemberManager
from householdasset_manager import HouseholdAssetManager
from householdincome_crop_manager import HouseholdCropIncomeManager
from householdincome_livestock_manager import HouseholdLivestockIncomeManager
from householdincome_wildfoods_manager import HouseholdWildfoodsIncomeManager
from householdincome_transfers_manager import HouseholdTransfersIncomeManager
from householdincome_employment_manager import HouseholdEmploymentIncomeManager
from householdcharacteristicmanager import HouseholdCharacteristicManager
class Household(HouseholdMemberManager, HouseholdCharacteristicManager, HouseholdAssetManager, HouseholdCropIncomeManager, HouseholdLivestockIncomeManager, HouseholdWildfoodsIncomeManager, HouseholdTransfersIncomeManager, HouseholdEmploymentIncomeManager):
def __init__(self, pid, hhid=0, householdname="", dateofcollection=""):
self.pid = pid
self.hhid = hhid
if ( householdname == "" and dateofcollection== "" ):
if ( not self.getHouseholdDetails() ):
self.householdname = ""
else:
self.setData(householdname, dateofcollection)
def getHouseholdDetails(self):
database = Database()
database.open()
query = "SELECT householdname, dateofcollection FROM households WHERE pid=%s AND hhid=%s " % ( self.pid, self.hhid )
rows = database.execSelectQuery( query )
num = len(rows)
if (num != 0):
exists = True
for row in rows:
self.householdname = row[0]
self.dateofcollection = row[1]
else:
exists = False
database.close()
return exists
def setData(self, householdname, dateofcollection):
database = Database()
database.open()
query = '''INSERT INTO households(hhid,pid,dateofcollection,householdname)
VALUES(%s,%s, '%s', '%s')''' % (self.hhid, self.pid, dateofcollection, householdname)
# execute query
database.execUpdateQuery( query )
database.close()
# update household attributes
self.householdname = householdname
self.dateofcollection = dateofcollection
def editData(self, hhid, householdname, dateofcollection):
database = Database()
database.open()
query = '''UPDATE households SET hhid=%s, dateofcollection='%s', householdname='%s'
WHERE hhid=%s AND pid=%s''' % (hhid, dateofcollection, householdname, self.hhid, self.pid)
# execute query
database.execUpdateQuery( query )
database.close()
# update household attributes
self.hhid = hhid
self.householdname = householdname
self.dateofcollection = dateofcollection
def getProjectID(self):
return self.pid
def getHouseholdID(self):
return self.hhid
def getHouseholdName(self):
return self.householdname
def getDateOfCollection(self):
return self.dateofcollection
| lgpl-3.0 | 3,986,717,152,432,254,000 | 39.53125 | 257 | 0.663155 | false | 4.123061 | false | false | false |
Adarnof/adarnauth-whsales | whsales/urls.py | 1 | 2103 | """whsales URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from whsales import views
urlpatterns = [
url(r'^$', views.listings_panel, name='listings_panel'),
url(r'^admin/', admin.site.urls),
url(r'^list$', views.listings_list, name='listings_list'),
url(r'^list/me$', views.my_listings, name='user_listings'),
url(r'^listing/(\d*)$', views.listing_view, name='listing_view'),
url(r'^listing/(\d*)/sell$', views.mark_sold, name='mark_sold'),
url(r'^listing/(\d*)/delete$', views.delete_listing, name='delete_listing'),
url(r'^sold$', views.listings_sold, name='listings_sold'),
url(r'^tokens$', views.select_token, name='select_token'),
url(r'^tokens/add$', views.add_token, name='add_token'),
url(r'^tokens/(\d*)/post$', views.post_listing, name='add_listing'),
url(r'^search$', views.search, name='search'),
url(r'^about$', views.about, name='about'),
url(r'^wanted$', views.wanted_panel, name='wanted_panel'),
url(r'^wanted/add$', views.add_wanted, name='add_wanted'),
url(r'^wanted/list$', views.wanted_list, name='wanted_list'),
url(r'^wanted/list/me$', views.my_wanted, name='user_wanted'),
url(r'^wanted/(\d*)$', views.wanted_view, name='wanted_view'),
url(r'^wanted/(\d*)/fulfill$', views.fulfill_wanted, name='mark_fulfilled'),
url(r'^wanted/(\d*)/delete$', views.delete_wanted, name='delete_wanted'),
url(r'^core/', include('singlecharcore.urls')),
]
| gpl-3.0 | 6,050,143,814,335,140,000 | 47.906977 | 80 | 0.66001 | false | 3.181543 | false | false | false |
DMS-Aus/Roam | ext_libs/cx_Freeze/samples/matplotlib/matplotlib_eg.py | 1 | 1652 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import arange, sin, pi
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import sys
import wx
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, 'CanvasFrame', size=(550, 350))
color = wx.Colour("WHITE")
self.SetBackgroundColour(color)
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
t = arange(0.0, 3.0, 0.01)
s = sin(2 * pi * t)
self.axes.plot(t, s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizerAndFit(self.sizer)
self.add_toolbar()
def add_toolbar(self):
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
self.SetToolBar(self.toolbar)
else:
tw, th = self.toolbar.GetSize()
fw, fh = self.canvas.GetSize()
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.toolbar.update()
def OnPaint(self, event):
self.canvas.draw()
class App(wx.App):
def OnInit(self):
'''Create the main window and insert the custom frame'''
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
| gpl-2.0 | -2,295,577,713,273,751,000 | 29.592593 | 79 | 0.616828 | false | 3.463312 | false | false | false |
benrudolph/commcare-hq | corehq/ex-submodules/phonelog/migrations/0003_auto__del_userlog__del_log__add_userentry__add_unique_userentry_xform_.py | 3 | 6323 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'UserLog'
db.delete_table(u'phonelog_userlog')
# Deleting model 'Log'
db.delete_table(u'phonelog_log')
# Adding model 'UserEntry'
db.create_table(u'phonelog_userentry', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('xform_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('i', self.gf('django.db.models.fields.IntegerField')()),
('user_id', self.gf('django.db.models.fields.CharField')(max_length=50)),
('sync_token', self.gf('django.db.models.fields.CharField')(max_length=50)),
('username', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
))
db.send_create_signal(u'phonelog', ['UserEntry'])
# Adding unique constraint on 'UserEntry', fields ['xform_id', 'i']
db.create_unique(u'phonelog_userentry', ['xform_id', 'i'])
# Adding model 'DeviceReportEntry'
db.create_table(u'phonelog_devicereportentry', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('xform_id', self.gf('django.db.models.fields.CharField')(max_length=50)),
('i', self.gf('django.db.models.fields.IntegerField')()),
('msg', self.gf('django.db.models.fields.TextField')()),
('type', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('domain', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
('device_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('app_version', self.gf('django.db.models.fields.TextField')()),
('username', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
))
db.send_create_signal(u'phonelog', ['DeviceReportEntry'])
# Adding unique constraint on 'DeviceReportEntry', fields ['xform_id', 'i']
db.create_unique(u'phonelog_devicereportentry', ['xform_id', 'i'])
def backwards(self, orm):
# Removing unique constraint on 'DeviceReportEntry', fields ['xform_id', 'i']
db.delete_unique(u'phonelog_devicereportentry', ['xform_id', 'i'])
# Removing unique constraint on 'UserEntry', fields ['xform_id', 'i']
db.delete_unique(u'phonelog_userentry', ['xform_id', 'i'])
# Adding model 'UserLog'
db.create_table(u'phonelog_userlog', (
('username', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
('xform_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('user_id', self.gf('django.db.models.fields.CharField')(max_length=50)),
('sync_token', self.gf('django.db.models.fields.CharField')(max_length=50)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'phonelog', ['UserLog'])
# Adding model 'Log'
db.create_table(u'phonelog_log', (
('username', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
('msg', self.gf('django.db.models.fields.TextField')()),
('domain', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('xform_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('app_version', self.gf('django.db.models.fields.TextField')()),
('type', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('id', self.gf('django.db.models.fields.CharField')(max_length=50, primary_key=True)),
('device_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
))
db.send_create_signal(u'phonelog', ['Log'])
# Deleting model 'UserEntry'
db.delete_table(u'phonelog_userentry')
# Deleting model 'DeviceReportEntry'
db.delete_table(u'phonelog_devicereportentry')
models = {
u'phonelog.devicereportentry': {
'Meta': {'unique_together': "[('xform_id', 'i')]", 'object_name': 'DeviceReportEntry'},
'app_version': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'device_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'i': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msg': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'xform_id': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'phonelog.userentry': {
'Meta': {'unique_together': "[('xform_id', 'i')]", 'object_name': 'UserEntry'},
'i': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sync_token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'xform_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['phonelog']
| bsd-3-clause | -2,299,862,887,122,350,000 | 54.464912 | 109 | 0.588486 | false | 3.45708 | false | false | false |
karimbahgat/PythonGis | (sandbox,tobemerged)/pythongis/raster/loader.py | 1 | 11514 | import sys, os, itertools, operator
import PIL.Image
import itertools
def grouper(iterable, n):
args = [iter(iterable)] * n
return itertools.izip(*args)
def from_file(filepath):
def check_world_file(filepath):
dir, filename = os.path.split(filepath)
filename, filetype = os.path.splitext(filename)
# find world file extension based on filetype
if filetype in ("tif","tiff","geotiff"):
ext = ".tfw"
elif filetype in ("jpg","jpeg"):
ext = ".jgw"
elif filetype == "png":
ext = ".pgw"
elif filetype == "bmp":
ext = ".bpw"
elif filetype == "gif":
ext = ".gfw"
else:
return None
worldfilepath = os.path.join(dir, filename, ext)
if os.path.lexists(worldfilepath):
worldfile = open(filepath, "r")
# note that the params are arranged slightly differently
# ...in the world file from the usual affine a,b,c,d,e,f
# ...so we have to rearrange their sequence later
# check out http://en.wikipedia.org/wiki/World_file
# ...very useful here and for affine transforms in general
xscale,yskew,xskew,yscale,xoff,yoff = worldfile.read()
return [xscale,yskew,xskew,yscale,xoff,yoff]
if filepath.lower().endswith((".asc",".ascii")):
tempfile = open(filepath,"r")
### Step 1: check header for file info
info = dict()
def _nextheader(headername=None, force2length=True):
"returns a two-list of headername and headervalue"
nextline = False
while not nextline:
nextline = tempfile.readline().strip()
nextline = nextline.split()
if force2length:
if len(nextline) != 2:
raise Exception("Each header line must contain exactly two elements")
if headername:
if nextline[0].lower() != headername:
raise Exception("The required headername was not found: %s instead of %s"%(nextline[0].lower(),headername))
return nextline
# dimensions
cols = int(_nextheader(headername="ncols")[1])
rows = int(_nextheader(headername="nrows")[1])
# x/y_orig
_next = _nextheader()
if _next[0].lower() in ("xllcenter","xllcorner"):
xorig = float(_next[1])
xorigtype = _next[0].lower()
_next = _nextheader()
if _next[0].lower() in ("yllcenter","yllcorner"):
yorig = float(_next[1])
yorigtype = _next[0].lower()
info["xy_cell"] = (0, rows)
info["xy_geo"] = (xorig, yorig)
if "corner" in xorigtype and "corner" in yorigtype:
info["cell_anchor"] = "sw"
elif "corner" in xorigtype:
info["cell_anchor"] = "w"
elif "corner" in yorigtype:
info["cell_anchor"] = "s"
else:
info["cell_anchor"] = "center"
# cellsize
cellsize = float(_nextheader(headername="cellsize")[1])
info["cellwidth"] = cellsize
info["cellheight"] = cellsize
# nodata
prevline = tempfile.tell()
_next = _nextheader(force2length=False)
if _next[0].lower() == "nodata_value":
nodata = float(_next[1])
else:
# nd header missing, so set to default and go back to previous header line
nodata = -9999.0
tempfile.seek(prevline)
info["nodata_value"] = nodata
### Step 2: read data into lists
# make sure filereading is set to first data row (in case there are spaces or gaps in bw header and data)
nextline = False
while not nextline:
prevline = tempfile.tell()
nextline = tempfile.readline().strip()
tempfile.seek(prevline)
# collect flat list of cells instead of rows (bc data isn't necessarily organized into lines)
data = []
for line in tempfile.readlines():
data.extend(float(cell) for cell in line.split())
# reshape to correspond with columns-rows and flatten again
reshaped = itertools.izip(*grouper(data, cols))
data = [cell for row in reshaped for cell in row]
# load the data as an image
tempfile.close()
img = PIL.Image.new("F", (rows, cols))
img.putdata(data=data)
# create the cell access object
cells = img.load()
# make a single-grid tuple
grids = [(img,cells)]
### Step 3: Read coordinate ref system
# ascii doesnt have any crs so assume default
crs = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
return info, grids, crs
elif filepath.lower().endswith((".tif",".tiff",".geotiff")):
# for more info:
# http://gis.stackexchange.com/questions/16839/why-does-a-tif-file-lose-projection-information-when-a-pixel-value-is-changed
# https://mail.python.org/pipermail/image-sig/2001-March/001380.html
main_img = PIL.Image.open(filepath)
raw_info = dict(main_img.tag.items())
def process_info(raw_info):
# check tag definitions here
# http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
# http://duff.ess.washington.edu/data/raster/drg/docs/geotiff.txt
info = dict()
if raw_info.has_key(1025):
# GTRasterTypeGeoKey, aka midpoint pixels vs topleft area pixels
if raw_info.get(1025) == (1,):
# is area
info["cell_anchor"] = "center"
elif raw_info.get(1025) == (2,):
# is point
info["cell_anchor"] = "nw"
else:
# TODO: what would be default value?
pass
if raw_info.has_key(34264):
# ModelTransformationTag, aka 4x4 transform coeffs...
a,b,c,d,
e,f,g,h,
i,j,k,l,
m,n,o,p = raw_info.get(34264)
# But we don't want to meddle with 3-D transforms,
# ...so for now only get the 2-D affine parameters
xscale,xskew,xoff = a,b,d
yskew,yscale,yoff = e,f,h
info["transform_coeffs"] = xscale,xskew,xoff,yskew,yscale,yoff
else:
if raw_info.has_key(33922):
# ModelTiepointTag
x, y, z, geo_x, geo_y, geo_z = raw_info.get(33922)
info["xy_cell"] = x,y
info["xy_geo"] = geo_x,geo_y
if raw_info.has_key(33550):
# ModelPixelScaleTag
scalex,scaley,scalez = raw_info.get(33550)
info["cellwidth"] = scalex
info["cellheight"] = -scaley # note: cellheight must be inversed because geotiff has a reversed y-axis (ie 0,0 is in upperleft corner)
if raw_info.get(42113):
info["nodata_value"] = eval(raw_info.get(42113)) # eval from string to nr
return info
def read_crs(raw_info):
crs = dict()
if raw_info.get(34735):
# GeoKeyDirectoryTag
crs["proj_params"] = raw_info.get(34735)
if raw_info.get(34737):
# GeoAsciiParamsTag
crs["proj_name"] = raw_info.get(34737)
return crs
# read geotiff tags
info = process_info(raw_info)
# if no geotiff tag info look for world file transform coefficients
if len(info) <= 1 and not info.get("transform_coeffs"):
transform_coeffs = check_world_file(filepath)
if transform_coeffs:
# rearrange the param sequence to match affine transform
[xscale,yskew,xskew,yscale,xoff,yoff] = transform_coeffs
info["transform_coeffs"] = [xscale,xskew,xoff,yskew,yscale,yoff]
else:
raise Exception("Couldn't find any geotiff tags or world file needed to position the image in space")
# group image bands and pixel access into grid tuples
grids = []
for img in main_img.split():
cells = img.load()
grids.append((img,cells))
# read coordinate ref system
crs = read_crs(raw_info)
return info, grids, crs
elif filepath.lower().endswith((".jpg",".jpeg",".png",".bmp",".gif")):
# pure image, so only read if has a world file
transform_coeffs = check_world_file(filepath)
if transform_coeffs:
# rearrange the param sequence to match affine transform
[xscale,yskew,xskew,yscale,xoff,yoff] = transform_coeffs
info["transform_coeffs"] = [xscale,xskew,xoff,yskew,yscale,yoff]
# group image bands and pixel access into grid tuples
grids = []
for img in main_img.split():
cells = img.load()
grids.append((img,cells))
# read crs
# normal images have no crs, so just assume default crs
crs = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
return info, grids, crs
else:
raise Exception("Couldn't find the world file needed to position the image in space")
else:
raise Exception("Could not create a raster from the given filepath: the filetype extension is either missing or not supported")
def from_lists(data, nodata_value=-9999.0, cell_anchor="center", **geoargs):
pass
def from_image(image, nodata_value=-9999.0, cell_anchor="center", **geoargs):
size = image.size
print geoargs
info = dict([(key,val) for key,val in geoargs.iteritems()
if key in ("xy_cell","xy_geo","cellwidth",
"cellheight","transform_coeffs") ])
if len(info) <= 3 and not info.get("transform_coeffs"):
raise Exception("To make a new raster from scratch, you must specify either all of xy_cell, xy_geo, cellwidth, cellheight, or the transform coefficients")
info["nodata_value"] = nodata_value
info["cell_anchor"] = cell_anchor
crs = geoargs.get("crs", "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
grids = []
cells = image.load()
grids.append((image, cells))
return info, grids, crs
def new(width, height, nodata_value=-9999.0, bands=1, cell_anchor="center", **geoargs):
size = (width, height)
info = dict([(key,val) for key,val in geoargs.iteritems()
if key in ("xy_cell","xy_geo","cellwidth",
"cellheight","transform_coeffs") ])
if len(info) <= 3 and not info.get("transform_coeffs"):
raise Exception("To make a new raster from scratch, you must specify either all of xy_cell, xy_geo, cellwidth, cellheight, or the transform coefficients")
info["nodata_value"] = nodata_value
info["cell_anchor"] = cell_anchor
crs = geoargs.get("crs", "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
grids = []
for _ in range(bands):
img = PIL.Image.new("F", size, float(nodata_value))
cells = img.load()
grids.append((img, cells))
return info, grids, crs
| mit | -6,514,481,410,946,916,000 | 38.84083 | 162 | 0.556106 | false | 3.904374 | false | false | false |
a-parhom/edx-platform | scripts/xsslint/xsslint/utils.py | 15 | 11534 | """
Utility classes/functions for the XSS Linter.
"""
import re
def is_skip_dir(skip_dirs, directory):
"""
Determines whether a directory should be skipped or linted.
Arguments:
skip_dirs: The configured directories to be skipped.
directory: The current directory to be tested.
Returns:
True if the directory should be skipped, and False otherwise.
"""
for skip_dir in skip_dirs:
skip_dir_regex = re.compile(
"(.*/)*{}(/.*)*".format(re.escape(skip_dir)))
if skip_dir_regex.match(directory) is not None:
return True
return False
class StringLines(object):
"""
StringLines provides utility methods to work with a string in terms of
lines. As an example, it can convert an index into a line number or column
number (i.e. index into the line).
"""
def __init__(self, string):
"""
Init method.
Arguments:
string: The string to work with.
"""
self._string = string
self._line_start_indexes = self._process_line_breaks(string)
# this is an exclusive index used in the case that the template doesn't
# end with a new line
self.eof_index = len(string)
def _process_line_breaks(self, string):
"""
Creates a list, where each entry represents the index into the string
where the next line break was found.
Arguments:
string: The string in which to find line breaks.
Returns:
A list of indices into the string at which each line begins.
"""
line_start_indexes = [0]
index = 0
while True:
index = string.find('\n', index)
if index < 0:
break
index += 1
line_start_indexes.append(index)
return line_start_indexes
def get_string(self):
"""
Get the original string.
"""
return self._string
def index_to_line_number(self, index):
"""
Given an index, determines the line of the index.
Arguments:
index: The index into the original string for which we want to know
the line number
Returns:
The line number of the provided index.
"""
current_line_number = 0
for line_break_index in self._line_start_indexes:
if line_break_index <= index:
current_line_number += 1
else:
break
return current_line_number
def index_to_column_number(self, index):
"""
Gets the column (i.e. index into the line) for the given index into the
original string.
Arguments:
index: The index into the original string.
Returns:
The column (i.e. index into the line) for the given index into the
original string.
"""
start_index = self.index_to_line_start_index(index)
column = index - start_index + 1
return column
def index_to_line_start_index(self, index):
"""
Gets the index of the start of the line of the given index.
Arguments:
index: The index into the original string.
Returns:
The index of the start of the line of the given index.
"""
line_number = self.index_to_line_number(index)
return self.line_number_to_start_index(line_number)
def index_to_line_end_index(self, index):
"""
Gets the index of the end of the line of the given index.
Arguments:
index: The index into the original string.
Returns:
The index of the end of the line of the given index.
"""
line_number = self.index_to_line_number(index)
return self.line_number_to_end_index(line_number)
def line_number_to_start_index(self, line_number):
"""
Gets the starting index for the provided line number.
Arguments:
line_number: The line number of the line for which we want to find
the start index.
Returns:
The starting index for the provided line number.
"""
return self._line_start_indexes[line_number - 1]
def line_number_to_end_index(self, line_number):
"""
Gets the ending index for the provided line number.
Arguments:
line_number: The line number of the line for which we want to find
the end index.
Returns:
The ending index for the provided line number.
"""
if line_number < len(self._line_start_indexes):
return self._line_start_indexes[line_number]
else:
# an exclusive index in the case that the file didn't end with a
# newline.
return self.eof_index
def line_number_to_line(self, line_number):
"""
Gets the line of text designated by the provided line number.
Arguments:
line_number: The line number of the line we want to find.
Returns:
The line of text designated by the provided line number.
"""
start_index = self._line_start_indexes[line_number - 1]
if len(self._line_start_indexes) == line_number:
line = self._string[start_index:]
else:
end_index = self._line_start_indexes[line_number]
line = self._string[start_index:end_index - 1]
return line
def line_count(self):
"""
Gets the number of lines in the string.
"""
return len(self._line_start_indexes)
class ParseString(object):
"""
ParseString is the result of parsing a string out of a template.
A ParseString has the following attributes:
start_index: The index of the first quote, or None if none found
end_index: The index following the closing quote, or None if
unparseable
quote_length: The length of the quote. Could be 3 for a Python
triple quote. Or None if none found.
string: the text of the parsed string, or None if none found.
string_inner: the text inside the quotes of the parsed string, or None
if none found.
"""
def __init__(self, template, start_index, end_index):
"""
Init method.
Arguments:
template: The template to be searched.
start_index: The start index to search.
end_index: The end index to search before.
"""
self.end_index = None
self.quote_length = None
self.string = None
self.string_inner = None
self.start_index = self._find_string_start(template, start_index, end_index)
if self.start_index is not None:
result = self._parse_string(template, self.start_index)
if result is not None:
self.end_index = result['end_index']
self.quote_length = result['quote_length']
self.string = result['string']
self.string_inner = result['string_inner']
def _find_string_start(self, template, start_index, end_index):
"""
Finds the index of the end of start of a string. In other words, the
first single or double quote.
Arguments:
template: The template to be searched.
start_index: The start index to search.
end_index: The end index to search before.
Returns:
The start index of the first single or double quote, or None if no
quote was found.
"""
quote_regex = re.compile(r"""['"]""")
start_match = quote_regex.search(template, start_index, end_index)
if start_match is None:
return None
else:
return start_match.start()
def _parse_string(self, template, start_index):
"""
Finds the indices of a string inside a template.
Arguments:
template: The template to be searched.
start_index: The start index of the open quote.
Returns:
A dict containing the following, or None if not parseable:
end_index: The index following the closing quote
quote_length: The length of the quote. Could be 3 for a Python
triple quote.
string: the text of the parsed string
string_inner: the text inside the quotes of the parsed string
"""
quote = template[start_index]
if quote not in ["'", '"']:
raise ValueError("start_index must refer to a single or double quote.")
triple_quote = quote * 3
if template.startswith(triple_quote, start_index):
quote = triple_quote
next_start_index = start_index + len(quote)
while True:
quote_end_index = template.find(quote, next_start_index)
backslash_index = template.find("\\", next_start_index)
if quote_end_index < 0:
return None
if 0 <= backslash_index < quote_end_index:
next_start_index = backslash_index + 2
else:
end_index = quote_end_index + len(quote)
quote_length = len(quote)
string = template[start_index:end_index]
return {
'end_index': end_index,
'quote_length': quote_length,
'string': string,
'string_inner': string[quote_length:-quote_length],
}
class Expression(object):
"""
Represents an arbitrary expression.
An expression can be any type of code snippet. It will sometimes have a
starting and ending delimiter, but not always.
Here are some example expressions::
${x | n, decode.utf8}
<%= x %>
function(x)
"<p>" + message + "</p>"
Other details of note:
- Only a start_index is required for a valid expression.
- If end_index is None, it means we couldn't parse the rest of the
expression.
- All other details of the expression are optional, and are only added if
and when supplied and needed for additional checks. They are not necessary
for the final results output.
"""
def __init__(self, start_index, end_index=None, template=None, start_delim="", end_delim="", strings=None):
"""
Init method.
Arguments:
start_index: the starting index of the expression
end_index: the index immediately following the expression, or None
if the expression was unparseable
template: optional template code in which the expression was found
start_delim: optional starting delimiter of the expression
end_delim: optional ending delimeter of the expression
strings: optional list of ParseStrings
"""
self.start_index = start_index
self.end_index = end_index
self.start_delim = start_delim
self.end_delim = end_delim
self.strings = strings
if template is not None and self.end_index is not None:
self.expression = template[start_index:end_index]
self.expression_inner = self.expression[len(start_delim):-len(end_delim)].strip()
else:
self.expression = None
self.expression_inner = None
| agpl-3.0 | -2,525,840,043,737,450,000 | 31.767045 | 111 | 0.581325 | false | 4.491433 | false | false | false |
tectronics/coot | python/jligand_gui.py | 4 | 4622 |
# This happens when user clicks on the "Launch JLigand" button.
# It starts a jligand and puts it in the background.
#
def launch_jligand_function():
global jligand_jar
global jligand_home_env
global java_command
start_jligand_listener()
# maybe this should rather check PATH or similar!? FIXME
if not os.path.isfile(jligand_jar):
# Boo. Give us a warning dialog
#
s = "jligand java jar file: " + jligand_jar + " not found"
# make an extra message telling us that JLIGAND_HOME is
# not set if it is not set.
env_message = "Environment variable JLIGAND_HOME not set\n\n" \
if not jligand_home_env else ""
info_dialog(env_message + s)
else:
# OK, it does exist - run it!
#
java_exe = find_exe(java_command)
if not java_exe:
print "BL INFO:: no java found"
else:
# first check if we can run it with coot, i.e. is '-version'
# a valid command line arg
jligand_version = ["-jar", jligand_jar, "-version"]
cmd = java_exe + " " + \
string_append_with_spaces(jligand_version)
res = shell_command_to_string(cmd)
if (not res):
message = "Sorry, your JLigand:\n\n " + jligand_jar + "\n\n" + \
"is not new enough to work with Coot!\n" + \
"Please download a new one!"
info_dialog(message)
else:
run_concurrently(java_exe, jligand_args)
# beam in a new menu to the menu bar:
if (have_coot_python):
if coot_python.main_menubar():
jligand_menu = coot_menubar_menu("JLigand")
add_simple_coot_menu_menuitem(
jligand_menu, "Send Link to JLigand (click 2 monomers)",
lambda func: click_select_residues_for_jligand()
)
# This happens when user clicks on the "Select Residues for JLigand"
# (or some such) button. It expects the user to click on atoms of
# the two residues involved in the link.
#
def click_select_residues_for_jligand():
global imol_jligand_link
def link_em(*args):
print "we received these clicks", args
if (len(args) == 2):
click_1 = args[0]
click_2 = args[1]
print "click_1:", click_1
print "click_2:", click_2
if ((len(click_1) == 7)
and (len(click_2) ==7)):
resname_1 = residue_name(click_1[1],
click_1[2],
click_1[3],
click_1[4])
resname_2 = residue_name(click_2[1],
click_2[2],
click_2[3],
click_2[4])
imol_click_1 = click_1[1]
imol_click_2 = click_2[1]
chain_click_1 = click_1[2]
chain_click_2 = click_2[2]
resno_click_1 = click_1[3]
resno_click_2 = click_2[3]
if not (isinstance(resname_1, str) and
isinstance(resname_2, str)):
print "Bad resnames: %s and %s" %(resname_1, resname_2)
else:
if not (imol_click_1 == imol_click_2):
msg = "Two different molecules %s and %s selected.\n" \
%(imol_click_1, imol_click_2) + \
"Make sure to select residues in the same molecule."
info_dialog(msg)
imol_jligand_link = False
elif (chain_click_1 == chain_click_2 and
resno_click_1 == resno_click_2):
msg = "Same residue %s %s selected.\n" \
%(chain_click_1, resno_click_1) + \
"Make sure to select different residues."
info_dialog(msg)
imol_jligand_link = False
else:
# happy path
imol_jligand_link = imol_click_1
write_file_for_jligand(click2res_spec(click_1), resname_1,
click2res_spec(click_2), resname_2)
user_defined_click(2, link_em)
| gpl-3.0 | -8,485,911,911,103,284,000 | 41.796296 | 84 | 0.463436 | false | 3.940324 | false | false | false |
zbqf109/goodo | openerp/addons/sale_stock/tests/test_sale_stock.py | 1 | 10525 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.addons.sale.tests.test_sale_common import TestSale
class TestSaleStock(TestSale):
def test_00_sale_stock_invoice(self):
"""
Test SO's changes when playing around with stock moves, quants, pack operations, pickings
and whatever other model there is in stock with "invoice on delivery" products
"""
inv_obj = self.env['account.invoice']
self.so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice on order
self.so.action_invoice_create()
# deliver partially, check the so's invoice_status and delivered quantities
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "nothing to invoice" after invoicing')
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 1})
wiz_act = pick.do_new_transfer()
wiz = self.env[wiz_act['res_model']].browse(wiz_act['res_id'])
wiz.process()
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" after partial delivery')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [1.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after partial delivery')
# invoice on delivery: only stockable products
inv_id = self.so.action_invoice_create()
inv_1 = inv_obj.browse(inv_id)
self.assertTrue(all([il.product_id.invoice_policy == 'delivery' for il in inv_1.invoice_line_ids]),
'Sale Stock: invoice should only contain "invoice on delivery" products')
# complete the delivery and check invoice_status again
self.assertEqual(self.so.invoice_status, 'no',
'Sale Stock: so invoice_status should be "nothing to invoice" after partial delivery and invoicing')
self.assertEqual(len(self.so.picking_ids), 2, 'Sale Stock: number of pickings should be 2')
pick_2 = self.so.picking_ids[0]
pick_2.force_assign()
pick_2.pack_operation_product_ids.write({'qty_done': 1})
self.assertIsNone(pick_2.do_new_transfer(), 'Sale Stock: second picking should be final without need for a backorder')
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" after complete delivery')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [2.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after complete delivery')
# invoice on delivery
inv_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'invoiced',
'Sale Stock: so invoice_status should be "fully invoiced" after complete delivery and invoicing')
def test_01_sale_stock_order(self):
"""
Test SO's changes when playing around with stock moves, quants, pack operations, pickings
and whatever other model there is in stock with "invoice on order" products
"""
# let's cheat and put all our products to "invoice on order"
self.so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
for sol in self.so.order_line:
sol.product_id.invoice_policy = 'order'
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on order" stockable products')
# let's do an invoice for a deposit of 5%
adv_wiz = self.env['sale.advance.payment.inv'].with_context(active_ids=[self.so.id]).create({
'advance_payment_method': 'percentage',
'amount': 5.0,
'product_id': self.env.ref('sale.advance_product_0').id,
})
act = adv_wiz.with_context(open_invoices=True).create_invoices()
inv = self.env['account.invoice'].browse(act['res_id'])
self.assertEqual(inv.amount_untaxed, self.so.amount_untaxed * 5.0 / 100.0, 'Sale Stock: deposit invoice is wrong')
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so should be to invoice after invoicing deposit')
# invoice on order: everything should be invoiced
self.so.action_invoice_create(final=True)
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so should be fully invoiced after second invoice')
# deliver, check the delivered quantities
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 2})
self.assertIsNone(pick.do_new_transfer(), 'Sale Stock: complete delivery should not need a backorder')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [2.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after partial delivery')
# invoice on delivery: nothing to invoice
self.assertFalse(self.so.action_invoice_create(), 'Sale Stock: there should be nothing to invoice')
def test_02_sale_stock_return(self):
"""
Test a SO with a product invoiced on delivery. Deliver and invoice the SO, then do a return
of the picking. Check that a refund invoice is well generated.
"""
# intial so
self.partner = self.env.ref('base.res_partner_1')
self.product = self.env.ref('product.product_product_47')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {
'name': self.product.name,
'product_id': self.product.id,
'product_uom_qty': 5.0,
'product_uom': self.product.uom_id.id,
'price_unit': self.product.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
self.so = self.env['sale.order'].create(so_vals)
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice in on delivery, nothing should be invoiced
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "nothing to invoice"')
# deliver completely
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 5})
pick.do_new_transfer()
# Check quantity delivered
del_qty = sum(sol.qty_delivered for sol in self.so.order_line)
self.assertEqual(del_qty, 5.0, 'Sale Stock: delivered quantity should be 5.0 after complete delivery')
# Check invoice
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" before invoicing')
inv_1_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so invoice_status should be "invoiced" after invoicing')
self.assertEqual(len(inv_1_id), 1, 'Sale Stock: only one invoice should be created')
self.inv_1 = self.env['account.invoice'].browse(inv_1_id)
self.assertEqual(self.inv_1.amount_untaxed, self.inv_1.amount_untaxed, 'Sale Stock: amount in SO and invoice should be the same')
# Create return picking
StockReturnPicking = self.env['stock.return.picking']
default_data = StockReturnPicking.with_context(active_ids=pick.ids, active_id=pick.ids[0]).default_get(['move_dest_exists', 'original_location_id', 'product_return_moves', 'parent_location_id', 'location_id'])
return_wiz = StockReturnPicking.with_context(active_ids=pick.ids, active_id=pick.ids[0]).create(default_data)
res = return_wiz.create_returns()
return_pick = self.env['stock.picking'].browse(res['res_id'])
# Validate picking
return_pick.force_assign()
return_pick.pack_operation_product_ids.write({'qty_done': 5})
return_pick.do_new_transfer()
# Check invoice
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" before invoicing')
# let's do an invoice with refunds
adv_wiz = self.env['sale.advance.payment.inv'].with_context(active_ids=[self.so.id]).create({
'advance_payment_method': 'all',
})
adv_wiz.with_context(open_invoices=True).create_invoices()
self.inv_2 = self.so.invoice_ids[1]
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "no" after invoicing the return')
self.assertEqual(self.inv_2.amount_untaxed, self.inv_2.amount_untaxed, 'Sale Stock: amount in SO and invoice should be the same')
| gpl-3.0 | -8,601,836,444,490,651,000 | 59.142857 | 217 | 0.645986 | false | 3.671085 | true | false | false |
Pushjet/Pushjet-Server-Api | utils.py | 1 | 3199 | from re import compile
from json import dumps
from flask import request, jsonify
from functools import wraps
from models import Service
from shared import zmq_relay_socket
uuid = compile(r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$')
service = compile(r'^[a-zA-Z0-9]{4}-[a-zA-Z0-9]{6}-[a-zA-Z0-9]{12}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{9}$')
is_uuid = lambda s: uuid.match(s) is not None
is_service = lambda s: service.match(s) is not None
is_secret = lambda s: compile(r'^[a-zA-Z0-9]{32}$').match(s) is not None
QUERY_ACTION_NEW_MESSAGE = 0
QUERY_UPDATE_LISTEN = 1
class Error(object):
@staticmethod
def _e(message, error_code, http_status):
return (dumps({'error': {'message': message, 'id': error_code}}), http_status)
NONE = (dumps({'status': 'ok'}), 200) # OK
INVALID_CLIENT = _e.__func__('Invalid client uuid', 1, 400) # Bad request
INVALID_SERVICE = _e.__func__('Invalid service', 2, 400) # - || -
INVALID_SECRET = _e.__func__('Invalid secret', 3, 400) # - || -
DUPLICATE_LISTEN = _e.__func__('Already subscribed to that service', 4, 409) # Conflict
RATE_TOOFAST = _e.__func__('Whoaw there cowboy, slow down!', 5, 429) # Too many requests
SERVICE_NOTFOUND = _e.__func__('Service not found', 6, 404)
INVALID_PUBKEY = _e.__func__('Invalid public key supplied. Please send a DER formatted base64 encoded key.', 8, 400) # Bad request
CONNECTION_CLOSING = _e.__func__('Connection closing', 9, 499) # Client closed request
NO_CHANGES = _e.__func__('No changes were made', 10, 400) # Bad request
NOT_SUBSCRIBED = _e.__func__('Not subscribed to that service', 11, 409) # Conflict
@staticmethod
def ARGUMENT_MISSING(arg):
return Error._e('Missing argument {}'.format(arg), 7, 400) # Bad request
def has_uuid(f):
@wraps(f)
def df(*args, **kwargs):
client = request.form.get('uuid', '') or request.args.get('uuid', '')
if not client:
return Error.ARGUMENT_MISSING('uuid')
if not is_uuid(client):
return Error.INVALID_CLIENT
return f(*args, client=client, **kwargs)
return df
def has_service(f):
@wraps(f)
def df(*args, **kwargs):
service = request.form.get('service', '') or request.args.get('service', '')
if not service:
return Error.ARGUMENT_MISSING('service')
if not is_service(service):
return Error.INVALID_SERVICE
srv = Service.query.filter_by(public=service).first()
if not srv:
return Error.SERVICE_NOTFOUND
return f(*args, service=srv, **kwargs)
return df
def has_secret(f):
@wraps(f)
def df(*args, **kwargs):
secret = request.form.get('secret', '') or request.args.get('secret', '')
if not secret:
return Error.ARGUMENT_MISSING('secret')
if not is_secret(secret):
return Error.INVALID_SECRET
srv = Service.query.filter_by(secret=secret).first()
if not srv:
return Error.SERVICE_NOTFOUND
return f(*args, service=srv, **kwargs)
return df
def queue_zmq_message(message):
zmq_relay_socket.send_string(message)
| bsd-2-clause | 53,105,573,924,442,610 | 35.352273 | 134 | 0.615505 | false | 3.315026 | false | false | false |
katstalk/android_external_chromium_org | tools/win/toolchain/get_toolchain_if_necessary.py | 23 | 4403 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes.wintypes
import hashlib
import json
import os
import subprocess
import sys
BASEDIR = os.path.dirname(os.path.abspath(__file__))
GetFileAttributes = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributes.argtypes = (ctypes.wintypes.LPWSTR,)
GetFileAttributes.restype = ctypes.wintypes.DWORD
FILE_ATTRIBUTE_HIDDEN = 0x2
FILE_ATTRIBUTE_SYSTEM = 0x4
def IsHidden(file_path):
"""Returns whether the given |file_path| has the 'system' or 'hidden'
attribute set."""
p = GetFileAttributes(file_path)
assert p != 0xffffffff
return bool(p & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM))
def GetFileList(root):
"""Gets a normalized list of files under |root|."""
assert not os.path.isabs(root)
assert os.path.normpath(root) == root
file_list = []
for base, _, files in os.walk(root):
paths = [os.path.join(base, f) for f in files]
file_list.extend(x.lower() for x in paths if not IsHidden(x))
return sorted(file_list)
def MakeTimestampsFileName(root):
return os.path.join(root, '..', '.timestamps')
def CalculateHash(root):
"""Calculates the sha1 of the paths to all files in the given |root| and the
contents of those files, and returns as a hex string."""
file_list = GetFileList(root)
# Check whether we previously saved timestamps in $root/../.timestamps. If
# we didn't, or they don't match, then do the full calculation, otherwise
# return the saved value.
timestamps_file = MakeTimestampsFileName(root)
timestamps_data = {'files': [], 'sha1': ''}
if os.path.exists(timestamps_file):
with open(timestamps_file, 'rb') as f:
try:
timestamps_data = json.load(f)
except ValueError:
# json couldn't be loaded, empty data will force a re-hash.
pass
matches = len(file_list) == len(timestamps_data['files'])
if matches:
for disk, cached in zip(file_list, timestamps_data['files']):
if disk != cached[0] or os.stat(disk).st_mtime != cached[1]:
matches = False
break
if matches:
return timestamps_data['sha1']
digest = hashlib.sha1()
for path in file_list:
digest.update(path)
with open(path, 'rb') as f:
digest.update(f.read())
return digest.hexdigest()
def SaveTimestampsAndHash(root, sha1):
"""Save timestamps and the final hash to be able to early-out more quickly
next time."""
file_list = GetFileList(root)
timestamps_data = {
'files': [[f, os.stat(f).st_mtime] for f in file_list],
'sha1': sha1,
}
with open(MakeTimestampsFileName(root), 'wb') as f:
json.dump(timestamps_data, f)
def main():
if sys.platform not in ('win32', 'cygwin'):
return 0
if len(sys.argv) != 1:
print >> sys.stderr, 'Unexpected arguments.'
return 1
# Move to same location as .gclient. This is a no-op when run via gclient.
os.chdir(os.path.normpath(os.path.join(BASEDIR, '..\\..\\..\\..')))
toolchain_dir = 'src\\third_party\\win_toolchain'
target_dir = os.path.join(toolchain_dir, 'files')
sha1path = os.path.join(toolchain_dir, 'toolchain.sha1')
desired_hash = ''
if os.path.isfile(sha1path):
with open(sha1path, 'rb') as f:
desired_hash = f.read().strip()
# If the current hash doesn't match what we want in the file, nuke and pave.
# Typically this script is only run when the .sha1 one file is updated, but
# directly calling "gclient runhooks" will also run it, so we cache
# based on timestamps to make that case fast.
current_hash = CalculateHash(target_dir)
if current_hash != desired_hash:
print 'Windows toolchain out of date or doesn\'t exist, updating...'
if os.path.isdir(target_dir):
subprocess.check_call('rmdir /s/q "%s"' % target_dir, shell=True)
subprocess.check_call([
sys.executable,
'src\\tools\\win\\toolchain\\toolchain2013.py',
'--targetdir', target_dir])
current_hash = CalculateHash(target_dir)
if current_hash != desired_hash:
print >> sys.stderr, (
'Got wrong hash after pulling a new toolchain. '
'Wanted \'%s\', got \'%s\'.' % (
desired_hash, current_hash))
return 1
SaveTimestampsAndHash(target_dir, current_hash)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 7,618,586,585,814,765,000 | 30.905797 | 78 | 0.671588 | false | 3.49722 | false | false | false |
ellisgeek/AllSeeingEye | allSeeingEye.py | 1 | 7922 | """
All Seeing Eye
Oracle Client Install Helper!
Elliott Saille
12/3/13
"""
#Include only specific functions
from subprocess import call
from os import name
from os import system
from os import access
from os import R_OK
from os import W_OK
from os import makedirs
from os import path
from os import environ
from os import walk
from shutil import rmtree
from shutil import copy2
from sys import exit
from time import sleep
from subprocess import call
#Variables
tempDir = environ["TEMP"] + "/allSeeingEye"
tnsnamesTemp = tempDir + "/tnsnames.ora"
tnsnames = "C:/oracle/product/10.2.0/client/NETWORK/ADMIN/tnsnames.ora"
oraInstaller = "M:/INSTALL/Voyager8/10203_client_vista-win7"
installTemp = tempDir + "/oracle"
setup = installTemp + "/setup.exe"
setupOpts = "\"FROM_LOCATION=%CD%\stage\products.xml\" -responseFile \"%CD%\response\ExLibrisOracle.rsp\""
compatMode = "VISTASP2"
def compatabilityChange(path, mode="WINXPSP3", runasadmin=True, verbose=False):
"""
Borrowed from http://techdiary-viki.blogspot.com/2011/03/script-to-set-compatibility-mode-of.html
Change the compatibility mode of a windows EXE
Valid Compatibility modes are:
WIN95: Windows 95
WIN98: Windows 98 / Windows ME
WINXPSP2: Windows XP (Service Pack 2)
WINXPSP3: Windows XP (Service Pack 3)
VISTARTM: Windows Vista
VISTASP1: Windows Vista (Service Pack 1)
VISTASP2: Windows Vista (Service Pack 2)
WIN7RTM: Windows 7
WIN8RTM: Windows 8
"""
#Display path to file that will be changed
print("Processing path %s" % path)
files = []
for dirpath, dirnames, filenames in walk(path):
files.extend(filenames)
exec_files = filter(lambda x: x.endswith('.exe'), files)
if verbose:
print("%d files to process" % len(exec_files))
print("Setting mode to %s" % mode)
if runasadmin == True:
print("Program will run as Administrator")
for ef in exec_files:
if verbose:
print("Processing file %s" % path + '\\' + ef)
system('REG.EXE ADD "HKEY_CURRENT_USER\Software\Microsoft\Windows NT\CurrentVersion\AppCompatFlags\Layers" /v "%s" /t REG_SZ /d "%s" /f' % (ef, mode))
def confirm(prompt=None, resp=False):
"""
Prompts for yes or no response from the user. Returns True for yes and
False for no.
"resp" should be set to the default value assumed by the caller when
user simply types ENTER.
"""
#set default prompt if none set
if prompt is None:
prompt = "Confirm"
#Change the default response
if resp:
prompt = "%s [%s]|%s: " % (prompt, "y", "n")
else:
prompt = "%s [%s]|%s: " % (prompt, "n", "y")
#Check for user input
while True:
ans = input(prompt)
if not ans:
return resp
if ans not in ["y", "Y", "n", "N"]:
print("please enter y or n.")
continue
if ans == "y" or ans == "Y":
return True
if ans == "n" or ans == "N":
return False
def clear():
"""
Clears the screen
"""
system("cls")
def backup():
"""
Backs up current tnsnames if it exists
"""
clear()
print("Backing up current tnsnames.ora from:")
print(tnsnames)
#make sure we can access the file
if access(tnsnames, R_OK) == True:
try:
#Copy it to the Temp Dir
copy2(tnsnames, tnsnamesTemp)
#or throw error
except IOError as e:
print("\n")
print("({})".format(e))
print("\n")
confirm("Backup Failed!\nReturn to main menu?", True)
mainMenu()
#be happy
else:
print("\nBackup Complete!\n")
else:
clear()
print("Unable to access tnsnames.ora at:")
print(tnsnames)
confirm("Return To main Menu?", True)
mainMenu()
def download():
"""
Copies oracle installer from network share
"""
#Check if installer exists on share
if path.exists(oraInstaller):
try:
#Copy it local
system("xcopy" +" /I /S \""+ oraInstaller +"\" \""+ installTemp +"\"")
#Throw a useful error
except IOError as e:
print("\n")
print("({})".format(e))
print("\n")
confirm("Installation Failed!\nReturn to main menu?", True)
mainMenu()
#If no errors print happy message!
else:
print("\nInstaller Copied Successfully!\n")
#No installer :(
else:
confirm("\nInstaller does not exist on share!\nReturn to main menu?", True)
mainMenu()
#Check if installer has been downloaded
if path.exists(setup):
#Change compatibility mode
compatabilityChange(setup, compatMode, True, False)
#Or Fail!
else:
clear()
print("Could not find installer,\nnothing to set compatibility for!\n")
confirm("Return to main menu?", True)
mainMenu()
def install():
"""
Sets environment up to run the oracle installer
"""
clear()
print("Installing Oracle database client\n")
#Are you shure this is what you want to do?
if confirm("Continue Installation?", True) == False:
clear()
print("Installation aborted")
sleep(2)
mainMenu()
#Check if installer has already been downloaded this session
if path.exists(setup):
#Ask if you want to reuse downloaded installer and if not re-download
if confirm("Installer exists!\nUse downloaded installer?", True) == False:
clear()
print("Will re-download installer")
rmtree(installTemp)
download()
#If not download the installer
else:
download()
#Write some initial configuration stuff to the Registry
system("reg add HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\MSDTC\MTxOCI /v OracleOciLib /t REG_SZ /d oci.dll /f")
system("reg add HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\MSDTC\MTxOCI /v OracleSqlLib /t REG_SZ /d orasql10.dll /f")
system("reg add HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\MSDTC\MTxOCI /v OracleXaLib /t REG_SZ /d oraclient10.dll /f")
#Call the installer
call("%s" % setup + " " + setupOpts, shell=True)
confirm("Return To main Menu?", True)
mainMenu()
def tnsnames():
"""
Copy preconfigured tnsnames.ora to oracle install location
Will eventually include option to add custom entries to tnsnames
"""
def mainMenu():
"""
Display the Main Menu
"""
clear()
print("Oracle Installation and Configuration Helper")
print("\n")
print("1. Backup current tnsnames.ora")
print("2. Install Oracle 10g Client")
print("3. Create tnsnames.ora")
print("4. Add ODBC Configuration")
print("Q. Exit")
choise = input("Please Make a Selection: ")
if choise == "1":
backup()
elif choise == "2":
install()
elif choise == "3":
tnsnames()
elif choise == "4":
print("2")
elif choise == "Q" or choise == "q":
clear()
quit()
clear()
print("Please make a selection!")
confirm("Return To main Menu?", True)
mainMenu()
#Clean up and Create Temp Dir for session
if path.exists(tempDir):
print ("Old temp directory found at %s" % tempDir)
if confirm("Remove Temp Directory?", True) == True:
try:
rmtree(tempDir)
except IOError as e:
print("({})".format(e))
try:
makedirs(tempDir)
except IOError as e:
print("({})".format(e))
else:
exit("Will not remove Temp Directory! Please Manually delete directory %s!" % tempDir)
else:
try:
makedirs(tempDir)
except IOError as e:
print("({})".format(e))
#Do Stuff!
mainMenu()
| gpl-3.0 | -2,110,420,134,937,973,800 | 29.007576 | 158 | 0.601237 | false | 3.788618 | false | false | false |
aldian/tensorflow | tensorflow/python/kernel_tests/variable_scope_test.py | 4 | 57583 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import numpy
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class VariableScopeTest(test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes()
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
@test_util.run_in_graph_and_eager_modes()
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes()
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
@test_util.run_in_graph_and_eager_modes()
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testEagerVaribleStore(self):
with context.eager_mode():
store = variable_scope.EagerVariableStore()
with store.as_default():
v = variable_scope.get_variable("v", shape=(), trainable=True)
w = variable_scope.get_variable("w", shape=(), trainable=False)
self.assertTrue(v in store.variables())
self.assertTrue(w in store.variables())
self.assertTrue(v in store.trainable_variables())
self.assertFalse(w in store.trainable_variables())
self.assertFalse(v in store.non_trainable_variables())
self.assertTrue(w in store.non_trainable_variables())
@test_util.run_in_graph_and_eager_modes()
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
if context.in_graph_mode():
with self.assertRaises(TypeError):
variable_scope.get_variable("x4", initializer={})
else:
with self.assertRaises(ValueError):
variable_scope.get_variable("x4", initializer={})
@test_util.run_in_graph_and_eager_modes()
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(alive): support variable partitioning/caching in eager mode.
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(v2_not_cached.value().device.startswith(
caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(v2_identity_device.value().device.startswith(
caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
@test_util.run_in_graph_and_eager_modes()
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
# reuse=True is for now only supported when eager execution is disabled.
if context.in_graph_mode():
v = variable_scope.get_variable("v",
[]) # "v" is alredy there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
@test_util.run_in_graph_and_eager_modes()
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
def testControlDeps(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlow(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
@test_util.run_in_graph_and_eager_modes()
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if context.in_graph_mode():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes()
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes()
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if context.in_graph_mode():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if context.in_graph_mode():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
def testVarScopeOriginalNameScope(self):
with self.test_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
def testVarScopeObjectReuse(self):
with self.test_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
def testVarScopeGetOrCreateReuse(self):
with self.test_session():
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope("testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope("testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, x.eval())
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
def testVarOpScope(self):
with self.test_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.test_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
def testVarOpScopeUniqueNamesWithJump(self):
with self.test_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
def testVarOpScopeReuse(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarScopeGetVar(self):
with self.test_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
def testVarScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
def testVarOpScopeReuseParam(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarOpScopeReuseError(self):
with self.test_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
def testVarOpScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarOpScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.test_session():
with variable_scope.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "scope/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "scope/w1:0")
self.assertEqual(constant_op.constant([], name="c1").name, "c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("scope"):
self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "outer/c:0")
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.test_session():
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(constant_op.constant([], name="c").name, "default/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "outer/c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/default/c:0")
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.test_session():
root_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
def testAuxiliaryNameScopeIsInvalid(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with variable_scope.variable_scope("scope") as scope:
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.test_session():
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope("inner") as inner:
pass
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with ops.name_scope(scope.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
with ops.name_scope("inner"):
self.assertEqual(constant_op.constant([], name="c").name, "inner/c:0")
with variable_scope.variable_scope("another"):
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with ops.name_scope(scope1.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/inner/c1:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
@test_util.run_in_graph_and_eager_modes()
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
if context.in_graph_mode():
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
if context.in_graph_mode():
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
def testGetCollection(self):
with self.test_session():
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable("testGetCollection_b", [],
trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable("testGetCollection_b", [],
trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable("testGetCollection_b", [],
trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
def testGetTrainableVariables(self):
with self.test_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable("testGetTrainableVariables_c", [],
trainable=False)
self.assertEqual([v.name
for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/"
"testGetTrainableVariables_b:0"])
def testGetGlobalVariables(self):
with self.test_session():
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual([v.name
for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
def testGetLocalVariables(self):
with self.test_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable(
"c", [])
self.assertEqual([v.name
for v in scope.local_variables()], ["foo/b:0"])
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope(
"prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope(
"sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v,
(((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3]))
+ ((np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
class PartitionInfoTest(test.TestCase):
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
if __name__ == "__main__":
test.main()
| apache-2.0 | -5,723,243,853,738,241,000 | 43.192632 | 80 | 0.639998 | false | 3.648882 | true | false | false |
jmckib/soundcurl | src/soundcurl.py | 1 | 4951 | #!/usr/bin/env python
from HTMLParser import HTMLParser
import json
import shutil
from StringIO import StringIO
import sys
import traceback
import urllib2
from bs4 import BeautifulSoup
from mutagen.id3 import ID3, APIC, TIT2, TPE1
unescape_html = HTMLParser().unescape
def main():
try:
if len(sys.argv) != 2:
raise ValueError('Expecting one argument, the URL of a song on SoundCloud.')
sound_cloud_page = SoundCloudPage(sys.argv[1])
sound_cloud_page.download_song()
except:
traceback.print_exception(*sys.exc_info())
print ('\nSorry, you just experienced an error :(\nPlease it '
'to me here: https://github.com/jmckib/soundcurl/issues/new')
class SoundCloudPage(object):
def __init__(self, page_url):
# Http GET parameters screw up the expected format of the page
# sometimes. Example: `?fb_action_ids` from soundcloud links on
# facebook.
self._page_url = page_url.split('?')[0]
# Use StringIO so we can consume the response multiple times.
self._http_response = StringIO(urllib2.urlopen(self._page_url).read())
def download_song(self):
"""Download song from given SoundCloud URL and write to disk as mp3.
The URL must be for a single song, not a set or an artist's page.
Title, artist, and cover art metadata are included in the mp3.
"""
stream_url_line = self._get_stream_url_line()
if not stream_url_line:
raise ValueError(
"Can't find stream URL. Are you sure '%s' is the url of a "
"song on SoundCloud?" % self._page_url)
stream_data = self._get_stream_data(stream_url_line)
# A file-like object containing the song data.
song = urllib2.urlopen(stream_data['streamUrl'])
# Write the song to disk.
song_title, artist = self._get_title_and_artist(stream_data)
# Believe it or not, there are songs with forward slahes in their
# titles, but we can't use that as a file name.
song_filename = '%s.mp3' % song_title.replace('/', '|')
print "Writing '%s'" % song_filename
shutil.copyfileobj(song, open(song_filename, 'wb'))
print 'Writing song metadata...'
tags = ID3()
tags.add(TIT2(encoding=3, text=song_title)) # Song title
print "\ttitle: '%s'" % song_title
tags.add(TPE1(encoding=3, text=artist)) # Artist
print "\tartist: '%s'" % artist
# Add track artwork.
# First, get a URL for the artwork as a jpeg.
soup = BeautifulSoup(self._get_fresh_http_response())
artwork_img = soup.find('img', alt="Track artwork")
artwork_url = artwork_img.get('src') if artwork_img else None
if not artwork_url:
print 'Failed to find artwork URL.'
else:
print 'Writing cover art...'
artwork = urllib2.urlopen(artwork_url)
tags.add(APIC(
encoding=3, mime='image/jpeg', desc=u'',
type=3, # indicates that this is the front cover art
data=artwork.read())
)
tags.save(song_filename)
def _get_fresh_http_response(self):
self._http_response.seek(0)
return self._http_response
def _get_stream_url_lines(self):
"""Return an iterator of the stream url lines in the http response.
A "stream url line" is a line of javascript code in the page's html
that contains the url of an mp3. The stream url lines are in the same
order as the songs on the page.
"""
return (line for line in self._get_fresh_http_response()
if 'http://media.soundcloud.com/stream/' in line)
def _get_stream_url_line(self):
"""Return the first line in the http response with a stream url in it.
If there are no stream urls, return None. See `_get_stream_url_lines`
for more explanation.
"""
return next(self._get_stream_url_lines(), None)
def _get_stream_data(self, stream_url_line):
"""Return dictionary of stream data from a stream url line."""
# stream_url_line looks something like this
# window.SC.bufferTracks.push(<BIG_JAVASCRIPT_DICT>);\n
# Since a javascript dict is really a json dict, we decode it as one.
return json.loads(stream_url_line[28:-3])
def _get_all_stream_data(self):
return (self._get_stream_data(stream_url_line) for stream_url_line
in self._get_stream_url_lines())
def _get_title_and_artist(self, stream_data):
try:
artist, title = stream_data['title'].split(' - ')
except ValueError:
artist = stream_data['user']['username']
title = stream_data['title']
return unescape_html(title).strip(), unescape_html(artist).strip()
if __name__ == '__main__':
main()
| apache-2.0 | -6,992,734,706,741,720,000 | 36.793893 | 88 | 0.611392 | false | 3.802611 | false | false | false |
vbatoufflet/machette | machette/module/split.py | 1 | 3365 | # -*- coding: utf-8 -*-
#
# This file is a part of Machette.
#
# Copyright (C) 2010 Vincent Batoufflet <vincent@batoufflet.info>
#
# This software is released under the terms of the GNU General Public License
# version 3 or any later version. See LICENSE file for further details.
#
# $Id$
import gtk
import pygtk
import os
import re
from machette.module import MachetteModule
from machette.path import DATA_DIR
pygtk.require('2.0')
# Set module class name
classname = 'MachetteModuleSplit'
# Set module information
mandatory = True
# Set configuration options list
options = {
'window.split-delimiter': (int, 0),
}
class MachetteModuleSplit(MachetteModule):
def register(self):
"""
Register MachetteModuleSplit module
void register(void)
"""
# Load module UI file
self.parent.wtree.add_from_file(os.path.join(DATA_DIR, 'ui', 'module',
'split.ui'))
# Initialize split delimiter GtkComboBox
for delim in ['|', '#', '@', unichr(0xb6), unichr(0x25a0)]:
self.parent.wtree.get_object('combobox-split-delimiter').\
append_text(delim)
# Restore last state
self.parent.wtree.get_object('combobox-split-delimiter').set_active(
self.parent.config.get('window.split-delimiter'))
# Attach UI to the parent window
self.parent.wtree.get_object('notebook-extension').append_page(
self.parent.wtree.get_object('vbox-split'), gtk.Label(_('Split')))
# Connect signals
self.parent.rbuffer.connect('changed', self.update_tab)
self.parent.tbuffer.connect('changed', self.update_tab)
self.parent.wtree.get_object('combobox-split-delimiter').\
connect('changed', self.update_tab)
self.parent.wtree.get_object('vbox-split').\
connect('map', self.update_tab)
def unregister(self):
"""
Unregister MachetteModuleSplit module
void unregister(void)
"""
# Save state
if self.parent.config.get('window.save-state'):
self.parent.config.set('window.split-delimiter', self.parent.\
wtree.get_object('combobox-split-delimiter').get_active())
def update_tab(self, source=None, event=None):
"""
Update split GtkNotebook tab
void update_tab(event source: gtk.Object, event: gtk.gdk.Event)
"""
# Reset buffer text
self.parent.wtree.get_object('textview-split-result').get_buffer().\
set_text('')
# Stop if updating is active or regex not available
if self.parent.updating or not self.parent.regex:
return
try:
delimiter = self.parent.wtree.\
get_object('combobox-split-delimiter').get_active_text()
# Get split chunks
regex = re.compile(self.parent.rbuffer.get_text(
self.parent.rbuffer.get_start_iter(),
self.parent.rbuffer.get_end_iter()), self.parent.flags)
chunks = regex.split(self.parent.target, self.parent.limit)
chunks = [a if a else '' for a in chunks]
self.parent.wtree.get_object('textview-split-result').\
get_buffer().set_text(delimiter.join(chunks))
except (IndexError, re.error), e:
pass
| gpl-3.0 | 8,518,862,810,468,989,000 | 31.355769 | 78 | 0.618425 | false | 3.806561 | false | false | false |
webeng/DeepLearningTutorials | code/sktheano_cnn_v2.py | 1 | 37558 | """This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
"""
Aaron Berndsen:
A Conformal Neural Network using Theano for computation and structure,
but built to obey sklearn's basic 'fit' 'predict' functionality
*code largely motivated from deeplearning.net examples
and Graham Taylor's "Vanilla RNN" (https://github.com/gwtaylor/theano-rnn/blob/master/rnn.py)
You'll require theano and libblas-dev
tips/tricks/notes:
* if training set is large (>O(100)) and redundant, use stochastic gradient descent (batch_size=1), otherwise use conjugate descent (batch_size > 1)
*
Basic usage:
import nnetwork as NN
n = NN.NeuralNetwork(design=[8,8]) # a NN with two hidden layers of 8 neurons each
n.fit(Xtrain, ytrain)
pred = n.predict(Xtest)
"""
import cPickle as pickle
import logging
import numpy as np
import timeit
from sklearn.base import BaseEstimator
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
import logging
import os
import sys
from logistic_sgd_test import LogisticRegression, load_data
_logger = logging.getLogger("theano.gof.compilelock")
_logger.setLevel(logging.WARN)
logger = logging.getLogger(__name__)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class CNN(object):
"""
Convolutional Neural Network (CNN),
backend by Theano, but compliant with sklearn interface.
This class defines all the layers in the network. At present the CNN has 7 layers: 3 LeNetConvPoolLayer,
3 MLP HiddenLayers and 1 LogisticRegression. This architecture is for classifying 128x128 grayscale images.
The class MetaCNN has more lower level routines such as initialization, prediction and save.
You should init with MetaCNN.
"""
def __init__(self, input, im_width=128, im_height=128, n_out=2, activation=T.tanh,
nkerns=[48,128,256],
filters=[13,5,4],
poolsize=[(2,2),(2,2),(2,2)],
n_hidden=[200,50,2],
output_type='softmax', batch_size=128,
use_symbolic_softmax=False,verbose = True):
"""
im_width : width of input image
im_height : height of input image
n_out : number of class labels
:type nkerns: list of integers
:param nkerns: number of kernels on each layer
:type filters: list of integers
:param filters: width of convolution
:type poolsize: list of 2-tuples
:param poolsize: maxpooling in convolution layer (index-0),
and direction x or y (index-1)
:type n_hidden: list of integers
:param n_hidden: number of hidden neurons
:type output_type: string
:param output_type: type of decision 'softmax', 'binary', 'real'
:type batch_size: integers
:param batch_size: number of samples in each training batch. Default 200.
"""
self.activation = activation
self.output_type = output_type
self.verbose = verbose
if verbose:
logger.info("\n Input image with:{} height:{} ".format(im_width,im_height))
# if use_symbolic_softmax:
# def symbolic_softmax(x):
# e = T.exp(x)
# return e / T.sum(e, axis=1).dimshuffle(0, 'x')
# self.softmax = symbolic_softmax
# else:
# self.softmax = T.nnet.softmax
rng = np.random.RandomState(23455)
# Reshape matrix of rasterized images of shape (batch_size, nx*ny)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = input.reshape((batch_size, 1, im_width, im_height))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (im_width - filters[0]+1, im_height-filters[0] + 1 )=(x,x)
# maxpooling reduces this further to (x/2,x/2) = (y,y)
# 4D output tensor is thus of shape (batch_size,nkerns[0],y,y)
self.layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, im_width, im_height),
filter_shape=(nkerns[0], 1, filters[0], filters[0]),
poolsize=poolsize[0]
)
if self.verbose:
logger.info('\n Layer {} \n image_shape: ({},{},{},{}) \n filter_shape: ({},{},{},{}) \n poolsize:{}'.format(0,
batch_size, 1, im_width, im_height,
nkerns[0], 1, filters[0], filters[0],
poolsize[0])
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (im_width-filters[0]+1,im_height-filters[0]+1) = (x,x)
# maxpooling reduces this further to (x/2,x/2) = y
# 4D output tensor is thus of shape (nkerns[0],nkerns[1],y,y)
im_width_l1 = (im_width - filters[0] + 1)/poolsize[0][0]
im_height_l1 = (im_height - filters[0] + 1)/poolsize[0][1]
self.layer1 = LeNetConvPoolLayer(
rng,
input=self.layer0.output,
image_shape=(batch_size, nkerns[0], im_width_l1, im_height_l1),
filter_shape=(nkerns[1], nkerns[0], filters[1], filters[1]),
poolsize=poolsize[1]
)
if self.verbose:
logger.info('\n Layer {} \n image_shape: ({},{},{},{}) \n filter_shape: ({},{},{},{}) \n poolsize:{}'.format(1
,batch_size, nkerns[0], im_width_l1, im_height_l1,
nkerns[1], nkerns[0], filters[1], filters[1],
poolsize[1])
)
# Construct the third convolutional pooling layer
# filtering reduces the image size to (im_width_l1-filters[1]+1,im_height_l1-filters[1]+1) = (x,x)
# maxpooling reduces this further to (x/2,x/2) = y
# 4D output tensor is thus of shape (nkerns[1],nkerns[2],y,y)
im_width_l2 = (im_width_l1 - filters[1] + 1)/poolsize[1][0]
im_height_l2 = (im_height_l1 - filters[1] + 1)/poolsize[1][1]
self.layer2 = LeNetConvPoolLayer(
rng,
input=self.layer1.output,
image_shape=(batch_size, nkerns[1], im_width_l2, im_height_l2),
filter_shape=(nkerns[2], nkerns[1], filters[2], filters[2]),
poolsize=poolsize[2]
)
if self.verbose:
logger.info('\n Layer {} \n image_shape: ({},{},{},{}) \n filter_shape: ({},{},{},{}) \n poolsize:{}'.format(2,
batch_size, nkerns[1], im_width_l2, im_height_l2,
nkerns[2], nkerns[1], filters[2], filters[2],
poolsize[2])
)
# the TanhLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size,num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (20,32*4*4) = (20,512)
layer3_input = self.layer2.output.flatten(2)
# construct a fully-connected sigmoidal layer
im_width_l3 = (im_width_l2-filters[2]+1)/poolsize[2][0]
im_height_l3 = (im_height_l2-filters[2]+1)/poolsize[2][1]
self.layer3 = HiddenLayer(
rng,
input=layer3_input,
n_in=nkerns[2] * im_width_l3 * im_height_l3,
n_out=n_hidden[0],
activation=T.tanh
)
if self.verbose:
logger.info("\n Layer {} input: ({},{})".format(3,batch_size,nkerns[2] * im_width_l3 * im_height_l3))
# construct a fully-connected sigmoidal layer
self.layer4 = HiddenLayer(
rng,
input=self.layer3.output,
n_in=n_hidden[0],
n_out=n_hidden[1],
activation=T.tanh
)
if self.verbose:
logger.info("\n Layer {} input: {}".format(4,n_hidden[1]))
# construct a fully-connected sigmoidal layer
self.layer5 = HiddenLayer(
rng,
input=self.layer4.output,
n_in=n_hidden[1],
n_out=n_hidden[2],
activation=T.tanh
)
if self.verbose:
logger.info("\n Layer {} input: {}".format(5,n_hidden[2]))
# classify the values of the fully-connected sigmoidal layer
self.layer6 = LogisticRegression(
input=self.layer5.output,
n_in=n_hidden[2],
n_out=n_out
)
if self.verbose:
logger.info("\n Layer {} input: {}".format(6,n_hidden[2]))
# CNN regularization
self.L1 = self.layer6.L1
self.L2_sqr = self.layer6.L2_sqr
# create a list of all model parameters to be fit by gradient descent
self.params = self.layer6.params + self.layer5.params + self.layer4.params + self.layer3.params + self.layer2.params + self.layer1.params + self.layer0.params
self.y_pred = self.layer6.y_pred
self.p_y_given_x = self.layer6.p_y_given_x
#self.layer3_output = self.layer5.input
self.layer5_output = self.layer5.input
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
#same as negative-log-likelikhood
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_pred', self.y_pred.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class MetaCNN(BaseEstimator):
"""
the actual CNN is not init-ed until .fit is called.
We determine the image input size (assumed square images) and
the number of outputs in .fit from the training data
"""
def __init__(
self, learning_rate=0.05, n_epochs=60, batch_size=128, activation='tanh',
nkerns=[20,45], n_hidden=500, filters=[15,7], poolsize=[(3,3),(2,2)],
output_type='softmax',L1_reg=0.00, L2_reg=0.00,
use_symbolic_softmax=False, im_width=128, im_height=128, n_out=2,verbose = True):
self.learning_rate = float(learning_rate)
self.nkerns = nkerns
self.n_hidden = n_hidden
self.filters = filters
self.poolsize = poolsize
self.n_epochs = int(n_epochs)
self.batch_size = int(batch_size)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.use_symbolic_softmax = use_symbolic_softmax
self.im_width = im_width
self.im_height = im_height
self.n_out = n_out
self.verbose = verbose
def ready(self):
"""
this routine is called from "fit" since we determine the
image size (assumed square) and output labels from the training data.
"""
#input
self.x = T.matrix('x')
#output (a label)
self.y = T.ivector('y')
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.cnn = CNN(
input=self.x,
n_out=self.n_out,
activation=activation,
nkerns=self.nkerns,
filters=self.filters,
n_hidden=self.n_hidden,
poolsize=self.poolsize,
output_type=self.output_type,
batch_size=self.batch_size,
use_symbolic_softmax=self.use_symbolic_softmax,
verbose=self.verbose
)
#self.cnn.predict expects batch_size number of inputs.
#we wrap those functions and pad as necessary in 'def predict' and 'def predict_proba'
self.predict_wrap = theano.function(inputs=[self.x],
outputs=self.cnn.y_pred,
mode=mode)
# self.predict_vector = theano.function(inputs=[self.x],
# outputs=self.cnn.layer5.output,
# mode=mode)
self.predict_vector = theano.function(inputs=[self.x],
outputs=self.cnn.layer5_output,
mode=mode)
self.predict_proba_wrap = theano.function(inputs=[self.x],
outputs=self.cnn.p_y_given_x,
mode=mode)
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
z : float
"""
return np.mean(self.predict(X) == y)
def fit(self, train_set_x, train_set_y, valid_set_x=None, valid_set_y=None,test_set_x = None,test_set_y = None,
n_epochs=None):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (T x n_in)
Y_train : ndarray (T x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
n_epochs : None (used to override self.n_epochs from init.
"""
self.ready()
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= self.batch_size
n_valid_batches /= self.batch_size
n_test_batches /= self.batch_size
######################
# BUILD ACTUAL MODEL #
######################
if self.verbose:
logger.info('\n ... building the model')
index = T.lscalar('index') # index to a [mini]batch
# cost = self.cnn.loss(self.y)\
# + self.L1_reg * self.cnn.L1\
# + self.L2_reg * self.cnn.L2_sqr
#cost = self.cnn.loss(self.y)
cost = self.cnn.layer6.negative_log_likelihood(self.y)
#self.cnn.loss(self.y),
test_model = theano.function(
[index],
self.cnn.layer6.errors(self.y),
givens={
self.x: test_set_x[index * self.batch_size: (index + 1) * self.batch_size],
self.y: test_set_y[index * self.batch_size: (index + 1) * self.batch_size]
}
)
#self.cnn.loss(self.y),
validate_model = theano.function(
[index],
self.cnn.layer6.errors(self.y),
givens={
self.x: valid_set_x[index * self.batch_size: (index + 1) * self.batch_size],
self.y: valid_set_y[index * self.batch_size: (index + 1) * self.batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
self.params = self.cnn.params
# create a list of gradients for all model parameters
self.grads = T.grad(cost, self.params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates dictionary by automatically looping over all
# (params[i],grads[i]) pairs.
# self.updates = {}
# for param_i, grad_i in zip(self.params, self.grads):
# self.updates[param_i] = param_i - self.learning_rate * grad_i
self.updates = [
(param_i, param_i - self.learning_rate * grad_i)
for param_i, grad_i in zip(self.params, self.grads)
]
train_model = theano.function(
[index],
cost,
updates=self.updates,
givens={
self.x: train_set_x[index * self.batch_size: (index + 1) * self.batch_size],
self.y: train_set_y[index * self.batch_size: (index + 1) * self.batch_size]
}
)
###############
# TRAIN MODEL #
###############
if self.verbose:
logger.info('\n... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = np.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
logger.info('... training @ iter = {}'.format(iter))
cost_ij = train_model(minibatch_index)
print cost_ij
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
logger.info('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in xrange(n_test_batches)
]
test_score = np.mean(test_losses)
logger.info((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
self.save(fpath=base_path + '/data/')
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
logger.info('Optimization complete.')
logger.info('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
def predict(self, data):
"""
the CNN expects inputs with Nsamples = self.batch_size.
In order to run 'predict' on an arbitrary number of samples we
pad as necessary.
"""
if isinstance(data, list):
data = np.array(data)
if data.ndim == 1:
data = np.array([data])
nsamples = data.shape[0]
n_batches = nsamples//self.batch_size
n_rem = nsamples%self.batch_size
if n_batches > 0:
preds = [list(self.predict_wrap(data[i*self.batch_size:(i+1)*self.batch_size]))\
for i in range(n_batches)]
else:
preds = []
if n_rem > 0:
z = np.zeros((self.batch_size, self.im_width * self.im_height))
z[0:n_rem] = data[n_batches*self.batch_size:n_batches*self.batch_size+n_rem]
preds.append(self.predict_wrap(z)[0:n_rem])
return np.hstack(preds).flatten()
def predict_proba(self, data):
"""
the CNN expects inputs with Nsamples = self.batch_size.
In order to run 'predict_proba' on an arbitrary number of samples we
pad as necessary.
"""
if isinstance(data, list):
data = np.array(data)
if data.ndim == 1:
data = np.array([data])
nsamples = data.shape[0]
n_batches = nsamples//self.batch_size
n_rem = nsamples%self.batch_size
if n_batches > 0:
preds = [list(self.predict_proba_wrap(data[i*self.batch_size:(i+1)*self.batch_size]))\
for i in range(n_batches)]
else:
preds = []
if n_rem > 0:
z = np.zeros((self.batch_size, self.n_in * self.n_in))
z[0:n_rem] = data[n_batches*self.batch_size:n_batches*self.batch_size+n_rem]
preds.append(self.predict_proba_wrap(z)[0:n_rem])
return np.vstack(preds)
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
#check if we're using ubc_AI.classifier wrapper,
#adding it's params to the state
if hasattr(self, 'orig_class'):
superparams = self.get_params()
#now switch to orig. class (MetaCNN)
oc = self.orig_class
cc = self.__class__
self.__class__ = oc
params = self.get_params()
for k, v in superparams.iteritems():
params[k] = v
self.__class__ = cc
else:
params = self.get_params() #sklearn.BaseEstimator
if hasattr(self, 'cnn'):
weights = [p.get_value() for p in self.cnn.params]
else:
weights = []
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
if hasattr(self, 'cnn'):
for param in self.cnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
#we may have several classes or superclasses
for k in ['n_comp', 'use_pca', 'feature']:
if k in params:
self.set_params(**{k:params[k]})
params.pop(k)
#now switch to MetaCNN if necessary
if hasattr(self,'orig_class'):
cc = self.__class__
oc = self.orig_class
self.__class__ = oc
self.set_params(**params)
self.ready()
if len(weights) > 0:
self._set_weights(weights)
self.__class__ = cc
else:
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
import datetime
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
#fname = '%s.%s.pkl' % (class_name, date_str)
fname = 'best_model.pkl'
fabspath = os.path.join(fpath, fname)
logger.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logger.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(value=np.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W', borrow=True)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=np.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = np.asarray(rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height,filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
np.prod(poolsize))
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
self.input = input
def cosine_distance(a, b):
import numpy as np
from numpy import linalg as LA
dot_product = np.dot(a,b.T)
cosine_distance = dot_product / (LA.norm(a) * LA.norm(b))
return cosine_distance
if __name__ == '__main__':
base_path = '/Applications/MAMP/htdocs/DeepLearningTutorials'
#base_path = '/home/ubuntu/DeepLearningTutorials'
from fetex_image import FetexImage
from PIL import Image
import random
datasets = load_data('mnist.pkl.gz')
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
cnn = MetaCNN(learning_rate=0.05,nkerns=[48,128,256], filters=[13,5,4], batch_size=64,poolsize=[(2,2),(2,2),(2,2)], n_hidden=[200,50,2] , n_out=2, im_width=128,im_height=128)
# cnn.fit(train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y, n_epochs=5)
# cnn.save(fpath=base_path + '/data/')
#folder = base_path + '/data/cnn-furniture/'
# Predictions after training
cnn.load(base_path + '/data/best_model.pkl')
#cnn.load('/home/ubuntu/DeepLearningTutorials/data/MetaCNN.2015-10-19-13:59:18.pkl')
#sample = np.asarray(X_train, dtype=theano.config.floatX)
#print sample[0].reshape((64,64)).shape
#Image.fromarray(sample[2].reshape((64,64)),mode="L").show()
pkl_file = open( '../data/train_set.pkl', 'rb')
train_set = pickle.load(pkl_file)
X_train, Y_train = train_set
pkl_file = open( '../data/lb.pkl', 'rb')
lb = pickle.load(pkl_file)
# arr = np.array(np.round((X_train[0] * 256).reshape((128,128))),dtype=np.uint8)
# Image.fromarray(arr,mode="L").show()
# arr = np.array(np.round((X_train[1] * 256).reshape((128,128))),dtype=np.uint8)
# Image.fromarray(arr,mode="L").show()
# arr = np.array(np.round((X_train[2] * 256).reshape((128,128))),dtype=np.uint8)
# Image.fromarray(arr,mode="L").show()
#print Y_train[0:3]
# arr = np.array(np.round((X_train[1300] * 256).reshape((64,64))),dtype=np.uint8)
# Image.fromarray(arr,mode="L").show()
#print sample[0]
# #print sample.shape
#sample = X_train[0:25]
#print lb.classes_
#sample = X_train[0]
#print Y_train[4000:4100]
#print cnn.predict(X_train[0:3])
# sample = X_train[4400]
# print Y_train[4400]
# print cnn.predict(sample)
# pkl_file = open( '../data/X_original.pkl', 'rb')
# X_original = cPickle.load(pkl_file)
# a = X_original[0:25]
# a = np.asarray(a, dtype=theano.config.floatX)
# #fe.reconstructImage(a[2]).show()
def flaten_aux(V):
return V.flatten(order='F')
#print X_train[0].shape
# cnn_output_vectors = np.array([])
# for i in xrange(1,8):
# #a = map(flaten_aux, X_train[128 * (i - 1): 128 * i ])
# a = X_train[64 * (i - 1): 64 * i ]
# # #print cnn.predict(a)
# a = cnn.predict_vector(a)
# #print a
# print len(cnn_output_vectors)
# #cnn_output_vectors.append(a)
# if len(cnn_output_vectors) == 0:
# cnn_output_vectors = a
# else:
# cnn_output_vectors = np.concatenate((cnn_output_vectors, a), axis=0)
# #cnn_output_vectors = cnn_output_vectors + a
# print len(cnn_output_vectors)
# file = open('../data/cnn_output_vectors.pkl', 'wb')
# pickle.dump(cnn_output_vectors, file, protocol=pickle.HIGHEST_PROTOCOL)
# file.close()
file = open('../data/cnn_output_vectors.pkl', 'rb')
cnn_output_vectors = pickle.load(file)
file.close()
print len(cnn_output_vectors)
#print len(cnn_output_vectors)
#print len(X_train)
#print cnn.predict(sample)
#print cnn.predict_wrap(a)
#rn_im_index = random.randint(0, len(X_train))
#base_image_index = 1
base_image_index = random.randint(0, 448)
max_similarity = 0
max_similarity_pos = -1
#max_similarity_pos = []
#for i in xrange(1,len(train_set_x)):
a = cnn_output_vectors[base_image_index]
#a = X_train[base_image_index]
#print a.shape
for i in xrange(0,64 * 7):
if i != base_image_index:
b = cnn_output_vectors[i]
#b = X_train[i]
d = cosine_distance(a, b)
print d
#if d > max_similarity:
if d > max_similarity:
max_similarity = d
max_similarity_pos = i
#max_similarity_pos.append(i)
print 'max_similarity: {}'.format(max_similarity)
fe = FetexImage(mode='L')
fe.reconstructImage(X_train[base_image_index]).show()
fe.reconstructImage(X_train[max_similarity_pos]).show()
# fe.reconstructImage(X_train[max_similarity_pos[0]]).show()
# fe.reconstructImage(X_train[max_similarity_pos[1]]).show()
# fe.reconstructImage(X_train[max_similarity_pos[2]]).show()
# fe.reconstructImage(X_train[max_similarity_pos[3]]).show()
# print a.shape
# print b.shape
# print cosine_distance(a, b) | bsd-3-clause | 1,384,039,947,904,350,500 | 31.631625 | 175 | 0.664785 | false | 2.902024 | true | false | false |
skewerr/deskbot | modules/commands/decide.py | 1 | 1511 | import random
from .. import irc, var
# Fill command dictionary.
def ins_command ():
var.commands["decide"] = type("command", (object,), {})()
var.commands["decide"].method = decide
var.commands["decide"].tags = ["other"]
var.commands["decide"].aliases = [".decide", ".choose"]
var.commands["decide"].usage = [
"{} a|b|c|d|... - Decide between a, b, c, ...",
"{} a or b or c or ... - Decide between a, b, c, ...",
"{} a,b,c,... - Decide between a, b, c, ...",
"{} a - Decide between Yes and No.",
"That is the order of preference. You can do {} a or b | c, which will decide between \"a or b\" and c."
]
# Command method.
def decide (user, channel, word):
if len(word) == 1:
irc.msg(channel, "{}: You have to give me some choices.".format(user))
else:
string = " ".join(word[1:])
if "|" in string:
choices = [choice.strip() for choice in string.split("|") if choice]
elif " or " in string:
choices = [choice.strip() for choice in string.split(" or ") if choice]
elif "," in string:
choices = [choice.strip() for choice in string.split(",") if choice]
else:
choices = ["Yes.", "No."]
# Empty lists can't be taken.
if not choices:
irc.msg(channel, "{}: Give me some choices, man, come on.".format(user))
return
if random.random() < 0.05:
if choices == ["Yes.", "No."]:
irc.msg(channel, "{}: Maybe.".format(user))
else:
irc.msg(channel, "{}: Neither.".format(user))
else:
irc.msg(channel, "{}: {}".format(user, random.choice(choices)))
| bsd-3-clause | -9,165,051,844,989,602,000 | 32.577778 | 106 | 0.598279 | false | 2.939689 | false | false | false |
CeltonMcGrath/TACTIC | src/tactic/ui/startup/column_edit_wdg.py | 6 | 10659 | ###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['ColumnEditWdg', 'ColumnEditCbk']
from pyasm.biz import Pipeline, Project
from pyasm.command import Command, CommandException
from pyasm.search import Search, SearchType
from pyasm.web import DivWdg, Table
from pyasm.widget import TextWdg, IconWdg, SelectWdg, HiddenWdg, WidgetConfigView
from pyasm.common import TacticException
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.widget import SingleButtonWdg, ActionButtonWdg, IconButtonWdg
class ColumnEditWdg(BaseRefreshWdg):
def get_display(my):
top = my.top
top.add_color("background", "background")
top.add_class("spt_columns_top")
my.set_as_panel(top)
top.add_style("padding: 10px")
search_type = my.kwargs.get("search_type")
search_type_obj = SearchType.get(search_type)
inner = DivWdg()
top.add(inner)
inner.add_style("width: 500px")
#text = TextWdg("search_type")
text = HiddenWdg("search_type")
inner.add(text)
text.set_value(search_type)
title_wdg = DivWdg()
inner.add(title_wdg)
title_wdg.add( search_type_obj.get_title() )
title_wdg.add(" <i style='font-size: 9px;opacity: 0.5'>(%s)</i>" % search_type)
title_wdg.add_style("padding: 5px")
title_wdg.add_color("background", "background3")
title_wdg.add_color("color", "color3")
title_wdg.add_style("margin: -10px -10px 10px -10px")
title_wdg.add_style("font-weight: bold")
shelf_wdg = DivWdg()
inner.add(shelf_wdg)
shelf_wdg.add_style("height: 30px")
button = ActionButtonWdg(title='Create', icon=IconWdg.SAVE)
shelf_wdg.add(button)
shelf_wdg.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'search_type': search_type,
'cbjs_action': '''
var class_name = 'tactic.ui.startup.ColumnEditCbk';
var top = bvr.src_el.getParent(".spt_columns_top");
var elements = top.getElements(".spt_columns_element");
var values = [];
for (var i = 0; i < elements.length; i++ ) {
var data = spt.api.Utility.get_input_values(elements[i], null, false);
values.push(data)
}
var kwargs = {
search_type: bvr.search_type,
values: values
}
var server = TacticServerStub.get();
try {
server.execute_cmd(class_name, kwargs);
var names = [];
for (var i = 0; i < values.length; i++) {
var name = values[i].name;
name = name.strip();
if (name == '') { continue; }
names.push(name);
}
spt.table.add_columns(names)
// prevent grabbing all values, pass in a dummy one
spt.panel.refresh(top, {'refresh': true});
} catch(e) {
spt.alert(spt.exception.handler(e));
}
'''
} )
# add the headers
table = Table()
inner.add(table)
table.add_style("width: 100%")
tr = table.add_row()
tr.add_gradient("background", "background3")
tr.add_style("padding", "3px")
th = table.add_header("Column Name")
th.add_style("width: 170px")
th.add_style("text-align: left")
th = table.add_header("Format")
th.add_style("text-align: left")
from tactic.ui.container import DynamicListWdg
dyn_list = DynamicListWdg()
inner.add(dyn_list)
from tactic.ui.manager import FormatDefinitionEditWdg
for i in range(0, 4):
column_div = DivWdg()
column_div.add_class("spt_columns_element")
if i == 0:
dyn_list.add_template(column_div)
else:
dyn_list.add_item(column_div)
column_div.add_style("padding: 3px")
column_div.add_style("float: left")
table = Table()
column_div.add(table)
table.add_row()
text_wdg = NewTextWdg("name")
td = table.add_cell(text_wdg)
text_wdg.add_behavior( {
'type': 'blur',
'cbjs_action': '''
var value = bvr.src_el.value;
var code = spt.convert_to_alpha_numeric(value);
bvr.src_el.value = code;
'''
} )
option = {
'name': 'xxx',
'values': 'integer|float|percent|currency|date|time|scientific|boolean|text|timecode',
}
format_wdg = FormatDefinitionEditWdg(option=option)
td = table.add_cell(format_wdg)
td.add_style("width: 260px")
td.add_style("padding-left: 40px")
# show the current columns
title_wdg = DivWdg()
inner.add(title_wdg)
title_wdg.add_style("margin-top: 20px")
title_wdg.add("<b>Existing Columns</b>")
title_wdg.add_color("background", "background3")
title_wdg.add_style("padding: 5px")
title_wdg.add_style("margin: 20px -10px 10px -10px")
config = WidgetConfigView.get_by_search_type(search_type, "definition")
element_names = config.get_element_names()
table = Table()
inner.add(table)
table.add_style("width: 100%")
tr = table.add_row()
tr.add_gradient("background", "background3")
th = table.add_header("Column")
th.add_style("text-align: left")
th = table.add_header("Data Type")
th.add_style("text-align: left")
th = table.add_header("Format")
th.add_style("text-align: left")
th = table.add_header("Edit")
th.add_style("text-align: left")
count = 0
for element_name in element_names:
display_class = config.get_display_handler(element_name)
if display_class != 'tactic.ui.table.FormatElementWdg':
continue
table.add_row()
display_options = config.get_display_options(element_name)
format = display_options.get("format")
if not format:
format = '<i>text</i>'
data_type = display_options.get("type")
table.add_cell(element_name)
table.add_cell(data_type)
table.add_cell(format)
td = table.add_cell()
button = IconButtonWdg(title="Edit Definition", icon=IconWdg.EDIT)
td.add(button)
button.add_behavior( {
'type': 'click_up',
'search_type': search_type,
'element_name': element_name,
'cbjs_action': '''
var class_name = 'tactic.ui.manager.ElementDefinitionWdg';
var kwargs = {
search_type: bvr.search_type,
view: 'definition',
element_name: bvr.element_name
};
spt.panel.load_popup("Element Definition", class_name, kwargs);
'''
} )
count += 1
if not count:
table.add_row()
td = table.add_cell()
td.add_style("height: 50px")
td.add("No existing columns found")
td.add_style("text-align: center")
td.add_border()
td.add_color("background", "background", -5)
if my.kwargs.get("is_refresh"):
return inner
else:
return top
class ColumnEditCbk(Command):
def execute(my):
search_type = my.kwargs.get("search_type")
column_info = SearchType.get_column_info(search_type)
values = my.kwargs.get("values")
# get the definition config for this search_type
from pyasm.search import WidgetDbConfig
config = WidgetDbConfig.get_by_search_type(search_type, "definition")
if not config:
config = SearchType.create("config/widget_config")
config.set_value("search_type", search_type)
config.set_value("view", "definition")
config.commit()
config._init()
for data in values:
name = data.get("name")
name = name.strip()
if name == '':
continue
try:
name.encode('ascii')
except UnicodeEncodeError:
raise TacticException('Column name needs to be in English. Non-English characters can be used in Title when performing [Edit Column Definition] afterwards.')
if column_info.get(name):
raise CommandException("Column [%s] is already defined" % name)
format = data.get("format")
fps = data.get("fps")
data_type = data.get("data_type")
from pyasm.command import ColumnAddCmd
cmd = ColumnAddCmd(search_type, name, data_type)
cmd.execute()
#(my, search_type, attr_name, attr_type, nullable=True):
class_name = 'tactic.ui.table.FormatElementWdg'
options = {
'format': format,
'type': data_type,
'fps': fps
}
# add a new widget to the definition
config.append_display_element(name, class_name, options=options)
config.commit_config()
class NewTextWdg(TextWdg):
def init(my):
#color = my.get_color("border", -20)
color2 = my.get_color("border")
color = my.get_color("border", -20)
my.add_event("onfocus", "this.focused=true")
my.add_event("onblur", "this.focused=false;$(this).setStyle('border-color','%s')" % color2)
my.add_behavior( {
'type': 'mouseover',
'color': color,
'cbjs_action': '''
bvr.src_el.setStyle("border-color", bvr.color);
'''
} )
my.add_behavior( {
'type': 'mouseout',
'color': color2,
'cbjs_action': '''
if (!bvr.src_el.focused) {
bvr.src_el.setStyle("border-color", bvr.color);
}
'''
} )
super(NewTextWdg,my).init()
| epl-1.0 | -5,752,984,038,108,893,000 | 28.941011 | 173 | 0.535604 | false | 3.810869 | true | false | false |
polarise/python-bioclasses | BioClasses/FrameshiftTranscript.py | 1 | 3519 | # -*- encoding: utf-8 -*-
from __future__ import division
import sys
import scipy
from FrameshiftSite import *
class FrameshiftTranscript( object ):
def __init__( self, name, length ):
self.name = name
self.length = length
self.frameshift_sites = dict()
def add_frameshift_site( self, position, signal, radians_vector=( 2*scipy.pi/3, 2*scipy.pi/3, 2*scipy.pi/3 ), desig=None ):
def frameshift_position_score( x, L ):
"""
triangular function
P( frameshift ) is maximum in the middle and decreases to the edges
"""
if x < L/2:
return x/(L/2)
else:
return ( L - x )/(L/2)
position_score = frameshift_position_score( position, self.length )
self.frameshift_sites[position] = FrameshiftSite( ( 0, position ), \
( 0, 0 ), signal, self.length, position_score, radians_vector, desig )
def __repr__( self ):
output_str = "Transcript: %s of length %s\n" % ( self.name, self.length )
i = 1
for pos,FS in self.frameshift_sites.iteritems():
output_str += "Frameshift #%s: %s (desig: %s) at %s (pos-score = %s).\n" % ( i, \
FS.signal, FS.designation, FS.position, FS.position_score )
i += 1
return output_str
def filtered_print( self, p0=0, p1=1, theta0=scipy.pi ):
output_str = "Transcript: %s of length %s\n" % ( self.name, self.length )
i = 1
for pos,FS in self.frameshift_sites.iteritems():
if p0 <= FS.posscore2proportion( self.length ) <= p1 and FS.radians_vector_f[0] <= theta0:
output_str += "Frameshift #%s: %s (desig: %s) at %s (pos-score = %s).\n" % ( i, \
FS.signal, FS.designation, FS.position, FS.position_score )
i += 1
return output_str
def frameshifts( self, p0=0, p1=1, theta0=scipy.pi ):
for fss_i,fss in self.frameshift_sites.iteritems():
if p0 <= fss.posscore2proportion( self.length ) <= p1 and fss.radians_vector_f[0] <= theta0:
yield self.name, fss
def has_frameshift( self, p0=0, p1=1, theta0=scipy.pi ):
"""
beware!
"""
frameshift_count = 0
for fss_i,fss in self.frameshift_sites.iteritems():
if p0 <= fss.posscore2proportion( self.length ) <= p1 and fss.radians_vector_f[0] <= theta0:
frameshift_count += 1
if frameshift_count > 0:
return True
else:
return False
def has_exact_frameshift( self, other, p0=0, p1=1, theta0=scipy.pi, tol=3 ):
"""
beware!
"""
self_fsss = self.frameshift_sites.values()
other_fsss = other.frameshift_sites.values()
present = False
for fss in self_fsss:
for oss in other_fsss:
if p0 <= fss.posscore2proportion( self.length ) <= p1 and fss.radians_vector_f[0] <= theta0 and -tol <= fss.distance_from_5prime - oss.distance_from_5prime <= tol and fss.signal == oss.signal and fss.designation == oss.designation:
present = True
return present
def rough_equality( self, other ):
if len( self.frameshift_sites ) > 0 and len( other.frameshift_sites ) > 0:
return True
else:
return False
def is_equal( self, other, p0, p1, theta0 ):
# each FSTObject has one or more FSSObjects
# we look for equality on FSSObjects by comparing positions and signals
equal = False
number_equal = 0
frameshift_sites_self = [ fss for fsss in self.frameshift_sites.values() if p0 <= fss.posscore2proportion( self.length ) <= p1 and fss.radians_vector <= theta0 ]
frameshift_sites_other = other.frameshift_sites.values()
for fsss in frameshift_sites_self:
for fsso in frameshift_sites_other:
if fsss == fsso:
equal = True
number_equal += 1
return equal, number_equal
| gpl-2.0 | -4,739,742,955,990,341,000 | 33.165049 | 236 | 0.658142 | false | 2.786223 | false | false | false |
tcpcloud/openvstorage | webapps/api/backend/views/users.py | 1 | 6661 | # Copyright 2014 Open vStorage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for users
"""
import hashlib
import random
import string
from backend.serializers.user import PasswordSerializer
from backend.serializers.serializers import FullSerializer
from backend.decorators import required_roles, load, return_object, return_list, log
from backend.toolbox import Toolbox
from rest_framework import status, viewsets
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from ovs.dal.hybrids.user import User
from ovs.dal.hybrids.client import Client
from ovs.dal.hybrids.j_roleclient import RoleClient
from ovs.dal.lists.userlist import UserList
class UserViewSet(viewsets.ViewSet):
"""
Information about Users
"""
permission_classes = (IsAuthenticated,)
prefix = r'users'
base_name = 'users'
@log()
@required_roles(['read'])
@return_list(User)
@load()
def list(self, request):
"""
Lists all available Users where the logged in user has access to
"""
if Toolbox.is_client_in_roles(request.client, ['manage']):
return UserList.get_users()
else:
return [request.client.user]
@log()
@required_roles(['read'])
@return_object(User)
@load(User)
def retrieve(self, request, user):
"""
Load information about a given User
Only the currently logged in User is accessible, or all if the logged in User has a
system role
"""
if user.guid == request.client.user_guid or Toolbox.is_client_in_roles(request.client, ['manage']):
return user
raise PermissionDenied('Fetching user information not allowed')
@log()
@required_roles(['read', 'write', 'manage'])
@load()
def create(self, request):
"""
Creates a User
"""
serializer = FullSerializer(User, instance=User(), data=request.DATA, allow_passwords=True)
if serializer.is_valid():
user = serializer.object
if UserList.get_user_by_username(user.username) is not None:
return Response('User already exists', status=status.HTTP_303_SEE_OTHER)
user.save()
pw_client = Client()
pw_client.ovs_type = 'INTERNAL'
pw_client.grant_type = 'PASSWORD'
pw_client.user = user
pw_client.save()
cc_client = Client()
cc_client.ovs_type = 'INTERNAL'
cc_client.grant_type = 'CLIENT_CREDENTIALS'
cc_client.client_secret = ''.join(random.choice(string.ascii_letters +
string.digits +
'|_=+*#@!/-[]{}<>.?,\'";:~')
for _ in range(128))
cc_client.user = user
cc_client.save()
for junction in user.group.roles:
for client in [cc_client, pw_client]:
roleclient = RoleClient()
roleclient.client = client
roleclient.role = junction.role
roleclient.save()
serializer = FullSerializer(User, instance=user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@log()
@required_roles(['read', 'write', 'manage'])
@load(User)
def destroy(self, request, user):
"""
Deletes a user
"""
if request.client.user_guid == user.guid:
raise PermissionDenied('A user cannot delete itself')
for client in user.clients:
for token in client.tokens:
for junction in token.roles.itersafe():
junction.delete()
token.delete()
for junction in client.roles.itersafe():
junction.delete()
client.delete()
user.delete(abandon=['logs']) # Detach from the log entries
return Response(status=status.HTTP_204_NO_CONTENT)
@log()
@required_roles(['read', 'write', 'manage'])
@load(User)
def partial_update(self, contents, user, request):
"""
Update a User
"""
contents = None if contents is None else contents.split(',')
serializer = FullSerializer(User, contents=contents, instance=user, data=request.DATA)
if serializer.is_valid():
if user.guid == request.client.user_guid:
raise PermissionDenied('A user cannot update itself')
serializer.save()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@action()
@log()
@required_roles(['read', 'write'])
@load(User)
def set_password(self, request, user):
"""
Sets the password of a given User. A logged in User can only changes its own password,
or all passwords if the logged in User has a system role
"""
if user.guid == request.client.user_guid or Toolbox.is_client_in_roles(request.client, ['manage']):
serializer = PasswordSerializer(data=request.DATA)
if serializer.is_valid():
user.password = hashlib.sha256(str(serializer.data['new_password'])).hexdigest()
user.save()
# Now, invalidate all access tokens granted
for client in user.clients:
for token in client.tokens:
for junction in token.roles:
junction.delete()
token.delete()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
raise PermissionDenied('Updating password not allowed')
| apache-2.0 | 967,324,019,206,811,600 | 38.414201 | 107 | 0.605915 | false | 4.42004 | false | false | false |
Godricly/zhihu_cup | concise_data.py | 2 | 2604 | from read_embed import read_embed
char_raw = open('sorted_char_count.txt').readlines()
word_raw = open('sorted_word_count.txt').readlines()
char_raw = [v.strip().split(',') for v in char_raw]
word_raw = [v.strip().split(',') for v in word_raw]
char = [v[0] for v in char_raw]
char = {k:0 for k in char}
word = [v[0] for v in word_raw]
word = {k:0 for k in word}
char_embed_path='./char_embedding.txt'
word_embed_path='./word_embedding.txt'
word_dict,_,_ = read_embed(word_embed_path)
char_dict,_,_ = read_embed(char_embed_path)
word = {k:0 for k in word if word_dict.has_key(k)}
char = {k:0 for k in char if char_dict.has_key(k)}
f = open('question_topic_train_set.txt')
question_topic = f.readlines()
f = open('question_train_set.txt')
raw_questions = f.readlines()
f_tidy_question = open('tidy_question_train_set.txt','w')
f_tidy_topic = open('tidy_question_topic_train_set.txt','w')
tc_length = {i:0 for i in range(10000)}
cc_length = {i:0 for i in range(30000)}
tw_length = {i:0 for i in range(1000)}
cw_length = {i:0 for i in range(4000)}
for raw_value, raw_label in zip(raw_questions, question_topic):
value = raw_value.split()
if len(value) < 3:
continue
#f_tidy_question.write(value[0])
tc = value[1].split(',')
tc = [v for v in tc if char.has_key(v)]
tc_length[len(tc)] +=1
tc = ','.join(tc)
#f_tidy_question.write('\t'+tc)
tw = value[2].split(',')
tw = [v for v in tw if word.has_key(v)]
tw_length[len(tw)] +=1
tw = ','.join(tw)
#f_tidy_question.write('\t'+tw)
if len(tc)==0 or len(tw) ==0:
continue
write_line = '\t'.join([value[0], tc, tw])
if len(value)>3:
cc = value[3].split(',')
cc = [v for v in cc if char.has_key(v)]
cc_length[len(cc)] +=1
cc = ','.join(cc)
write_line += '\t'+cc
if len(value)>4:
cw = value[4].split(',')
cw = [v for v in cw if word.has_key(v)]
cw_length[len(cw)] +=1
cw = ','.join(cw)
write_line += '\t'+cw
write_line += '\n'
f_tidy_question.write(write_line)
f_tidy_topic.write(raw_label)
f_tidy_question.close()
f_tidy_topic.close()
with open('tc_length.txt','w') as f:
for k,v in tc_length.items():
f.write(str(k)+','+str(v)+'\n')
with open('cc_length.txt','w') as f:
for k,v in cc_length.items():
f.write(str(k)+','+str(v)+'\n')
with open('tw_length.txt','w') as f:
for k,v in tw_length.items():
f.write(str(k)+','+str(v)+'\n')
with open('cw_length.txt','w') as f:
for k,v in cw_length.items():
f.write(str(k)+','+str(v)+'\n')
| mit | -8,551,566,206,774,961,000 | 29.635294 | 63 | 0.580261 | false | 2.649034 | false | false | false |
yfried/ansible | lib/ansible/module_utils/facts/virtual/openbsd.py | 199 | 2319 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
from ansible.module_utils.facts.utils import get_file_content
class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
"""
This is a OpenBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def get_virtual_facts(self):
virtual_facts = {}
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
virtual_product_facts = self.detect_virt_product('hw.product')
virtual_facts.update(virtual_product_facts)
if virtual_facts['virtualization_type'] == '':
virtual_vendor_facts = self.detect_virt_vendor('hw.vendor')
virtual_facts.update(virtual_vendor_facts)
# Check the dmesg if vmm(4) attached, indicating the host is
# capable of virtualization.
dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT)
for line in dmesg_boot.splitlines():
match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line)
if match:
virtual_facts['virtualization_type'] = 'vmm'
virtual_facts['virtualization_role'] = 'host'
return virtual_facts
class OpenBSDVirtualCollector(VirtualCollector):
_fact_class = OpenBSDVirtual
_platform = 'OpenBSD'
| gpl-3.0 | 5,320,738,837,033,867,000 | 35.234375 | 81 | 0.695558 | false | 3.964103 | false | false | false |
massimo-nocentini/on-python | beazley-metaprogramming/execly/execly.py | 1 | 3654 | # execly.py
#
# Example of generating code and executing it with exec()
# in the context of descriptors/metaclasses
from inspect import Parameter, Signature
import re
from collections import OrderedDict
# Utility functions
def _make_init(fields):
'''
Give a list of field names, make an __init__ method
'''
code = 'def __init__(self, %s):\n' % \
','.join(fields)
for name in fields:
code += ' self.%s = %s\n' % (name, name)
return code
def _make_setter(dcls):
code = 'def __set__(self, instance, value):\n'
for d in dcls.__mro__:
if 'set_code' in d.__dict__:
for line in d.set_code():
code += ' ' + line + '\n'
return code
class DescriptorMeta(type):
def __init__(self, clsname, bases, clsdict):
if '__set__' not in clsdict:
code = _make_setter(self)
exec(code, globals(), clsdict)
setattr(self, '__set__', clsdict['__set__'])
else:
raise TypeError('Define set_code(), not __set__()')
class Descriptor(metaclass=DescriptorMeta):
def __init__(self, name=None):
self.name = name
@staticmethod
def set_code():
return [
'instance.__dict__[self.name] = value'
]
def __delete__(self, instance):
raise AttributeError("Can't delete")
class Typed(Descriptor):
ty = object
@staticmethod
def set_code():
return [
'if not isinstance(value, self.ty):',
' raise TypeError("Expected %s" % self.ty)'
]
# Specialized types
class Integer(Typed):
ty = int
class Float(Typed):
ty = float
class String(Typed):
ty = str
# Value checking
class Positive(Descriptor):
@staticmethod
def set_code():
return [
'if value < 0:',
' raise ValueError("Expected >= 0")',
]
super().__set__(instance, value)
# More specialized types
class PosInteger(Integer, Positive):
pass
class PosFloat(Float, Positive):
pass
# Length checking
class Sized(Descriptor):
def __init__(self, *args, maxlen, **kwargs):
self.maxlen = maxlen
super().__init__(*args, **kwargs)
@staticmethod
def set_code():
return [
'if len(value) > self.maxlen:',
' raise ValueError("Too big")',
]
class SizedString(String, Sized):
pass
# Pattern matching
class Regex(Descriptor):
def __init__(self, *args, pat, **kwargs):
self.pat = re.compile(pat)
super().__init__(*args, **kwargs)
@staticmethod
def set_code():
return [
'if not self.pat.match(value):',
' raise ValueError("Invalid string")',
]
class SizedRegexString(SizedString, Regex):
pass
# Structure definition code
class StructMeta(type):
@classmethod
def __prepare__(cls, name, bases):
return OrderedDict()
def __new__(cls, clsname, bases, clsdict):
fields = [key for key, val in clsdict.items()
if isinstance(val, Descriptor) ]
for name in fields:
clsdict[name].name = name
# Make the init function
if fields:
exec(_make_init(fields), globals(), clsdict)
clsobj = super().__new__(cls, clsname, bases, dict(clsdict))
setattr(clsobj, '_fields', fields)
return clsobj
class Structure(metaclass=StructMeta):
pass
if __name__ == '__main__':
class Stock(Structure):
name = SizedRegexString(maxlen=8, pat='[A-Z]+$')
shares = PosInteger()
price = PosFloat()
| mit | -5,169,347,624,705,959,000 | 23.689189 | 68 | 0.557471 | false | 3.95027 | false | false | false |
snakedragon/scrapy-hive | starlord/test/geetest-demo.py | 1 | 3901 | # -*- coding: utf-8 -*-
from starlord.ocr.api import *
import requests
import selenium
from selenium import webdriver
import json, urllib,urllib2
import hashlib
from urllib import urlencode
from selenium.webdriver.common import keys as KEYS
import bs4
import sys
import time
from selenium.webdriver.common.action_chains import ActionChains
from PIL import Image as PILImage
import cv2
from PIL import Image
import random
def extractEdges(image_file):
edges = []
img = cv2.imread(image_file, 0)
gray_lap = cv2.Laplacian(img,cv2.CV_16S,ksize = 3)
dst = cv2.convertScaleAbs(gray_lap)
cv2.imwrite("verify2.png",dst)
#cv2.imshow("showimage", dst)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
image = Image.open("verify2.png")
image_rgb = image.convert("RGB")
for x in xrange(2, image_rgb.size[0] - 1):
for y in xrange(2, image_rgb.size[1] - 1):
color1 = image_rgb.getpixel((x,y))
#白色
if color1==(255,255,255):
k = min(y+22,image.size[1] - 1)
allwhite = False
for j in xrange(y+1,k):
#余下竖线为白色
color2= image_rgb.getpixel((x,j))
if color2==color1:
allwhite = True
continue
else:
allwhite=False
break
if allwhite:
if edges.count(x)==0:
edges.append(x)
for i in xrange(0,len(edges)-1):
if edges[i]+1==edges[i+1]:
edges[i]=0
for x in edges:
if x==0:
edges.remove(x)
for z in edges:
print str(z)
if len(edges)==2:
distance1 = edges[1]-edges[0]
elif len(edges)>2:
distance1 = edges[2]-edges[0]
return distance1
headers0 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Cache-Control': 'max-age=0'
}
driver = webdriver.Chrome()
driver.maximize_window()
page = driver.get("http://www.qixin.com/login")
#elem = driver.find_element_by_xpath("//script[6]")
time.sleep(2)
#elem = driver.find_element_by_xpath("//div[@class='behavior_verify_content']")
elem = driver.find_element_by_css_selector('.gt_slider_knob.gt_show')
ActionChains(driver).click_and_hold(elem).perform()
time.sleep(1)
driver.get_screenshot_as_file('web.png')
#print elem.location.values()
elem2 = driver.find_element_by_css_selector('.gt_cut_fullbg.gt_show')
#ActionChains(driver).move_to_element(elem).perform()
#driver.get_screenshot_as_file('2.png')
#print elem2.location.values()
#print elem2.size.values()
topx = elem2.location.values()[1]
topy = elem2.location.values()[0]
botx = topx + elem2.size.values()[0]
boty = topy + elem2.size.values()[1]
box=(topx, topy, botx, boty)
image1 = PILImage.open('web.png')
image1.crop(box).save('verify.png')
image1.close()
distance = extractEdges("verify.png")
ActionChains(driver).move_to_element(elem)
#ActionChains(driver).drag_and_drop_by_offset(elem,distance,0).perform()
road = 0
for seconds in xrange(0,20):
if seconds==19:
bias = distance-road
ActionChains(driver).move_by_offset(bias, 0).perform()
else:
ActionChains(driver).move_by_offset(0.05 * distance, 0).perform()
road = road + 0.05*distance
time.sleep(1*random.random())
#ActionChains(driver).move_to_element_with_offset(elem,distance, 0).perform()
driver.get_screenshot_as_file('web2.png')
ActionChains(driver).release(elem)
time.sleep(10)
| apache-2.0 | -1,942,913,534,372,948,200 | 20.692737 | 93 | 0.617564 | false | 3.055075 | false | false | false |
HackerTool/vivisect | vivisect/impemu/emulator.py | 2 | 19864 | import struct
import traceback
import itertools
import envi
import envi.bits as e_bits
import envi.memory as e_mem
import envi.registers as e_reg
import visgraph.pathcore as vg_path
from vivisect.const import *
# Pre-initialize a stack memory bytes
init_stack_map = ''
for i in xrange(8192/4):
init_stack_map += struct.pack("<I", 0xfefe0000+(i*4))
def imphook(impname):
def imptemp(f):
f.__imphook__ = impname
return f
return imptemp
class WorkspaceEmulator:
taintregs = []
def __init__(self, vw, logwrite=False, logread=False):
self.vw = vw
self.funcva = None # Set if using runFunction
self.emustop = False
self.hooks = {}
self.taints = {}
self.taintva = itertools.count(0x41560000, 8192)
self.uninit_use = {}
self.logwrite = logwrite
self.logread = logread
self.path = self.newCodePathNode()
self.curpath = self.path
self.op = None
self.opcache = {}
self.emumon = None
self.psize = self.getPointerSize()
# Possibly need an "options" API?
self._safe_mem = True # Should we be forgiving about memory accesses?
self._func_only = True # is this emulator meant to stay in one function scope?
self.strictops = True # shoudl we bail on emulation if unsupported instruction encountered
# Map in all the memory associated with the workspace
for va, size, perms, fname in vw.getMemoryMaps():
offset, bytes = vw.getByteDef(va)
self.addMemoryMap(va, perms, fname, bytes)
for regidx in self.taintregs:
rname = self.getRegisterName(regidx)
regval = self.setVivTaint( 'uninitreg', regidx )
self.setRegister(regidx, regval)
for name in dir(self):
val = getattr(self, name, None)
if val == None:
continue
impname = getattr(val, '__imphook__',None)
if impname == None:
continue
self.hooks[impname] = val
self.stack_map_mask = None
self.stack_map_base = None
self.stack_map_top = None
self.stack_pointer = None
self.initStackMemory()
def initStackMemory(self, stacksize=4096):
'''
Setup and initialize stack memory.
You may call this prior to emulating instructions.
'''
if self.stack_map_base is None:
self.stack_map_mask = e_bits.sign_extend(0xfff00000, 4, self.vw.psize)
self.stack_map_base = e_bits.sign_extend(0xbfb00000, 4, self.vw.psize)
self.stack_map_top = self.stack_map_base + stacksize
self.stack_pointer = self.stack_map_top
# Map in a memory map for the stack
stack_map = init_stack_map
if stacksize != 4096:
stack_map = ''.join([struct.pack('<I', self.stack_map_base+(i*4))
for i in xrange(stacksize)])
self.addMemoryMap(self.stack_map_base, 6, "[stack]", stack_map)
self.setStackCounter(self.stack_pointer)
# Create some pre-made taints for positive stack indexes
# NOTE: This is *ugly* for speed....
taints = [ self.setVivTaint('funcstack', i * self.psize) for i in xrange(20) ]
taintbytes = ''.join([ e_bits.buildbytes(taint,self.psize) for taint in taints ])
self.writeMemory(self.stack_pointer, taintbytes)
else:
existing_map_size = self.stack_map_top - self.stack_map_base
new_map_size = stacksize - existing_map_size
if new_map_size < 0:
raise RuntimeError('cannot shrink stack')
new_map_top = self.stack_map_base
new_map_base = new_map_top - new_map_size
stack_map = ''.join([struct.pack('<I', new_map_base+(i*4))
for i in xrange(new_map_size)])
self.addMemoryMap(new_map_base, 6, "[stack]", stack_map)
self.stack_map_base = new_map_base
# no need to do tainting here, since SP will always be in the
# first map
def stopEmu(self):
'''
This is called by monitor to stop emulation
'''
self.emustop = True
def getPathProp(self, key):
'''
Retrieve a named value from the current code path context.
'''
return vg_path.getNodeProp(self.curpath, key)
def setPathProp(self, key, value):
"""
Set a named value which is only relevant for the current code path.
"""
return vg_path.setNodeProp(self.curpath, key, value)
def setEmulationMonitor(self, emumon):
"""
Snap in an emulation monitor. (see EmulationMonitor doc from vivisect.impemu)
"""
self.emumon = emumon
def parseOpcode(self, pc):
# We can make an opcode *faster* with the workspace because of
# getByteDef etc... use it.
op = self.opcache.get(pc)
if op == None:
op = envi.Emulator.parseOpcode(self, pc)
self.opcache[pc] = op
return op
def checkCall(self, starteip, endeip, op):
"""
Check if this was a call, and if so, do the required
import emulation and such...
"""
iscall = bool(op.iflags & envi.IF_CALL)
if iscall:
api = self.getCallApi(endeip)
rtype,rname,convname,callname,funcargs = api
callconv = self.getCallingConvention(convname)
argv = callconv.getCallArgs(self, len(funcargs))
ret = None
if self.emumon != None:
try:
ret = self.emumon.apicall(self, op, endeip, api, argv)
except Exception, e:
self.emumon.logAnomaly(self, endeip, "%s.apicall failed: %s" % (self.emumon.__class__.__name__, e))
hook = self.hooks.get(callname)
if ret == None and hook:
hook( self, callconv, api, argv )
else:
if ret == None:
ret = self.setVivTaint('apicall', (op,endeip,api,argv))
callconv.execCallReturn( self, ret, len(funcargs) )
# Either way, if it's a call PC goes to next instruction
if self._func_only:
self.setProgramCounter(starteip+len(op))
return iscall
def newCodePathNode(self, parent=None, bva=None):
'''
NOTE: Right now, this is only called from the actual branch state which
needs it. it must stay that way for now (register context is being copied
for symbolic emulator...)
'''
props = {
'bva':bva, # the entry virtual address for this branch
'valist':[], # the virtual addresses in this node in order
'calllog':[], # FIXME is this even used?
'readlog':[], # a log of all memory reads from this block
'writelog':[],# a log of all memory writes from this block
}
ret = vg_path.newPathNode(parent=parent, **props)
return ret
def getBranchNode(self, node, bva):
'''
If a node exists already for the specified branch, return it. Otherwise,
create a new one and return that...
'''
for knode in vg_path.getNodeKids(node):
if vg_path.getNodeProp(knode, 'bva') == bva:
return knode
return self.newCodePathNode(node, bva)
def checkBranches(self, starteip, endeip, op):
"""
This routine gets the current branch list for this opcode, adds branch
entries to the current path, and updates current path as needed
(returns a list of (va, CodePath) tuples.
"""
ret = []
# Add all the known branches to the list
blist = op.getBranches(emu=self)
# FIXME this should actually check for conditional...
# If there is more than one branch target, we need a new code block
if len(blist) > 1:
for bva,bflags in blist:
if bva == None:
print "Unresolved branch even WITH an emulator?"
continue
bpath = self.getBranchNode(self.curpath, bva)
ret.append((bva, bpath))
return ret
def stepi(self):
# NOTE: when we step, we *always* want to be stepping over calls
# (and possibly import emulate them)
starteip = self.getProgramCounter()
# parse out an opcode
op = self.parseOpcode(starteip)
if self.emumon:
self.emumon.prehook(self, op, starteip)
# Execute the opcode
self.executeOpcode(op)
vg_path.getNodeProp(self.curpath, 'valist').append(starteip)
endeip = self.getProgramCounter()
if self.emumon:
self.emumon.posthook(self, op, endeip)
if not self.checkCall(starteip, endeip, op):
self.checkBranches(starteip, endeip, op)
def runFunction(self, funcva, stopva=None, maxhit=None, maxloop=None):
"""
This is a utility function specific to WorkspaceEmulation (and impemu) that
will emulate, but only inside the given function. You may specify a stopva
to return once that location is hit.
"""
self.funcva = funcva
# Let the current (should be base also) path know where we are starting
vg_path.setNodeProp(self.curpath, 'bva', funcva)
hits = {}
todo = [(funcva,self.getEmuSnap(),self.path),]
vw = self.vw # Save a dereference many many times
while len(todo):
va,esnap,self.curpath = todo.pop()
self.setEmuSnap(esnap)
self.setProgramCounter(va)
# Check if we are beyond our loop max...
if maxloop != None:
lcount = vg_path.getPathLoopCount(self.curpath, 'bva', va)
if lcount > maxloop:
continue
while True:
starteip = self.getProgramCounter()
if not vw.isValidPointer(starteip):
break
if starteip == stopva:
return
# Check straight hit count...
if maxhit != None:
h = hits.get(starteip, 0)
h += 1
if h > maxhit:
break
hits[starteip] = h
# If we ran out of path (branches that went
# somewhere that we couldn't follow?
if self.curpath == None:
break
try:
# FIXME unify with stepi code...
op = self.parseOpcode(starteip)
self.op = op
if self.emumon:
self.emumon.prehook(self, op, starteip)
if self.emustop:
return
# Execute the opcode
self.executeOpcode(op)
vg_path.getNodeProp(self.curpath, 'valist').append(starteip)
endeip = self.getProgramCounter()
if self.emumon:
self.emumon.posthook(self, op, endeip)
if self.emustop:
return
iscall = self.checkCall(starteip, endeip, op)
if self.emustop:
return
# If it wasn't a call, check for branches, if so, add them to
# the todo list and go around again...
if not iscall:
blist = self.checkBranches(starteip, endeip, op)
if len(blist):
# pc in the snap will be wrong, but over-ridden at restore
esnap = self.getEmuSnap()
for bva,bpath in blist:
todo.append((bva, esnap, bpath))
break
# If we enounter a procedure exit, it doesn't
# matter what EIP is, we're done here.
if op.iflags & envi.IF_RET:
vg_path.setNodeProp(self.curpath, 'cleanret', True)
break
except envi.UnsupportedInstruction, e:
if self.strictops:
break
else:
print 'runFunction continuing after unsupported instruction: 0x%08x %s' % (e.op.va, e.op.mnem)
self.setProgramCounter(e.op.va+ e.op.size)
except Exception, e:
#traceback.print_exc()
if self.emumon != None:
self.emumon.logAnomaly(self, starteip, str(e))
break # If we exc during execution, this branch is dead.
def getCallApi(self, va):
'''
Retrieve an API definition from either the vivisect workspace
( if the call target is a function within the workspace ) or
the impapi definition subsystem ( if the call target is a known
import definition )
'''
vw = self.vw
ret = None
if vw.isFunction(va):
ret = vw.getFunctionApi(va)
if ret != None:
return ret
else:
taint = self.getVivTaint(va)
if taint:
tva,ttype,tinfo = taint
if ttype == 'import':
lva,lsize,ltype,linfo = tinfo
ret = vw.getImpApi( linfo )
elif ttype == 'dynfunc':
libname,funcname = tinfo
ret = vw.getImpApi('%s.%s' % (libname,funcname))
if ret:
return ret
defcall = vw.getMeta("DefaultCall")
return ('int', None, defcall, 'UnknownApi', () )
def nextVivTaint(self):
# One page into the new taint range
return self.taintva.next() + 4096
def setVivTaint(self, typename, taint):
'''
Set a taint in the emulator. Returns the new value for
the created taint.
'''
va = self.nextVivTaint()
self.taints[ va & 0xffffe000 ] = (va,typename,taint)
return va
def getVivTaint(self, va):
'''
Retrieve a previously registered taint ( this will automagically
mask values down and allow you to retrieve "near taint" values.)
'''
return self.taints.get( va & 0xffffe000 )
def reprVivTaint(self, taint):
'''
For the base "known" taint types, return a humon readable string
to represent the value of the taint.
'''
va,ttype,tinfo = taint
if ttype == 'uninitreg':
return self.getRegisterName(tinfo)
if ttype == 'import':
lva,lsize,ltype,linfo = tinfo
return linfo
if ttype == 'dynlib':
libname = tinfo
return libname
if ttype == 'dynfunc':
libname,funcname = tinfo
return '%s.%s' % (libname,funcname)
if ttype == 'funcstack':
stackoff = tinfo
if self.funcva:
flocal = self.vw.getFunctionLocal(self.funcva, stackoff)
if flocal != None:
typename,argname = flocal
return argname
o = '+'
if stackoff < 0:
o = '-'
return 'sp%s%d' % (o, abs(stackoff))
if ttype == 'apicall':
op,pc,api,argv = tinfo
rettype,retname,callconv,callname,callargs = api
callstr = self.reprVivValue( pc )
argsstr = ','.join([ self.reprVivValue( x ) for x in argv])
return '%s(%s)' % (callstr,argsstr)
return 'taint: 0x%.8x %s %r' % (va, ttype, tinfo)
def reprVivValue(self, val):
'''
Return a humon readable string which is the best description for
the given value ( given knowledge of the workspace, emu,
and taint subsystems ).
'''
if self.vw.isFunction(val):
thunk = self.vw.getFunctionMeta(val,'Thunk')
if thunk:
return thunk
vivname = self.vw.getName(val)
if vivname:
return vivname
taint = self.getVivTaint(val)
if taint:
# NOTE we need to prevent infinite recursion due to args being
# tainted and then referencing the same api call
va,ttype,tinfo = taint
if ttype == 'apicall':
op,pc,api,argv = tinfo
rettype,retname,callconv,callname,callargs = api
if val not in argv:
return self.reprVivTaint(taint)
stackoff = self.getStackOffset(val)
if stackoff != None:
funclocal = self.vw.getFunctionLocal(self.funcva, stackoff)
if funclocal != None:
typename,varname = funclocal
return varname
if val < 4096:
return str(val)
return '0x%.8x' % val
def _useVirtAddr(self, va):
taint = self.getVivTaint(va)
if taint == None:
return
tva,ttype,tinfo = taint
if ttype == 'uninitreg':
self.logUninitRegUse(tinfo)
def writeMemory(self, va, bytes):
"""
Try to write the bytes to the memory object, otherwise, dont'
complain...
"""
if self.logwrite:
wlog = vg_path.getNodeProp(self.curpath, 'writelog')
wlog.append((self.getProgramCounter(),va,bytes))
self._useVirtAddr( va )
# It's totally ok to write to invalid memory during the
# emulation pass (as long as safe_mem is true...)
probeok = self.probeMemory(va, len(bytes), e_mem.MM_WRITE)
if self._safe_mem and not probeok:
return
return e_mem.MemoryObject.writeMemory(self, va, bytes)
def logUninitRegUse(self, regid):
self.uninit_use[regid] = True
def getUninitRegUse(self):
return self.uninit_use.keys()
def readMemory(self, va, size):
if self.logread:
rlog = vg_path.getNodeProp(self.curpath, 'readlog')
rlog.append((self.getProgramCounter(),va,size))
# If they read an import entry, start a taint...
loc = self.vw.getLocation(va)
if loc != None:
lva, lsize, ltype, ltinfo = loc
if ltype == LOC_IMPORT and lsize == size: # They just read an import.
ret = self.setVivTaint('import', loc)
return e_bits.buildbytes(ret, lsize)
self._useVirtAddr(va)
# Read from the emulator's pages if we havent resolved it yet
probeok = self.probeMemory(va, size, e_mem.MM_READ)
if self._safe_mem and not probeok:
return 'A' * size
return e_mem.MemoryObject.readMemory(self, va, size)
# Some APIs for telling if pointers are in runtime memory regions
def isUninitStack(self, val):
"""
If val is a numerical value in the same memory page
as the un-initialized stack values return True
"""
#NOTE: If uninit_stack_byte changes, so must this!
if (val & 0xfffff000) == 0xfefef000:
return True
return False
def isStackPointer(self, va):
return (va & self.stack_map_mask) == self.stack_map_base
def getStackOffset(self, va):
if (va & self.stack_map_mask) == self.stack_map_base:
return va - self.stack_pointer
| apache-2.0 | -5,625,522,150,030,716,000 | 32.724958 | 119 | 0.540727 | false | 3.967246 | false | false | false |
mday299/MAVProxy | setup.py | 2 | 2663 | from setuptools import setup
version = "1.5.1"
setup(name='MAVProxy',
version=version,
zip_safe=True,
description='MAVProxy MAVLink ground station',
long_description='''A MAVLink protocol proxy and ground station. MAVProxy
is oriented towards command line operation, and is suitable for embedding in
small autonomous vehicles or for using on ground control stations. It also
features a number of graphical tools such as a slipmap for satellite mapping
view of the vehicles location, and status console and several useful vehicle
control modules. MAVProxy is extensible via a modules system - see the modules
subdirectory for some example modules. MAVProxy was developed by CanberraUAV
for use in the 2012 Outback Challenge, and includes a module for the
CanberraUAV search and rescue system. See
http://Dronecode.github.io/MAVProxy/ for more information
on how to use MAVProxy.''',
url='https://github.com/Dronecode/MAVProxy',
author='Andrew Tridgell',
author_email='andrew@tridgell.net',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'],
license='GPLv3',
packages=['MAVProxy',
'MAVProxy.modules',
'MAVProxy.modules.mavproxy_map',
'MAVProxy.modules.mavproxy_misseditor',
'MAVProxy.modules.mavproxy_smartcamera',
'MAVProxy.modules.lib',
'MAVProxy.modules.lib.ANUGA',
'MAVProxy.modules.lib.optparse_gui'],
# note that we do not include all the real dependencies here (like matplotlib etc)
# as that breaks the pip install. It seems that pip is not smart enough to
# use the system versions of these dependencies, so it tries to download and install
# large numbers of modules like numpy etc which may be already installed
install_requires=['pymavlink>=1.1.73',
'pyserial>=3.0'],
scripts=['MAVProxy/mavproxy.py',
'MAVProxy/tools/mavflightview.py',
'MAVProxy/tools/MAVExplorer.py',
'MAVProxy/modules/mavproxy_map/mp_slipmap.py',
'MAVProxy/modules/mavproxy_map/mp_tile.py'],
package_data={'MAVProxy':
['modules/mavproxy_map/data/*.jpg',
'modules/mavproxy_map/data/*.png',
'tools/graphs/*.xml']}
)
| gpl-3.0 | 8,074,820,753,599,540,000 | 47.418182 | 90 | 0.651145 | false | 4.206951 | false | false | false |
pydicom/sendit | sendit/logger.py | 1 | 8575 | # -*- coding: utf-8 -*-
'''
logger.py: Simple logger for sendit. Note that levels info and log are the
only two considered stdout, the rest are sent to stderr.
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import sys
ABRT = -4
ERROR = -3
WARNING = -2
LOG = -1
INFO = 1
QUIET = 0
VERBOSE = VERBOSE1 = 2
VERBOSE2 = 3
VERBOSE3 = 4
DEBUG = 5
class SenditMessage:
def __init__(self,MESSAGELEVEL=None):
self.level = get_logging_level()
self.history = []
self.errorStream = sys.stderr
self.outputStream = sys.stdout
self.colorize = self.useColor()
self.colors = {ABRT:"\033[31m", # dark red
ERROR: "\033[91m", # red
WARNING:"\033[93m", # dark yellow
LOG:"\033[95m", # purple
DEBUG:"\033[36m", # cyan
'OFF':"\033[0m"} # end sequence
# Colors --------------------------------------------
def useColor(self):
'''useColor will determine if color should be added
to a print. Will check if being run in a terminal, and
if has support for asci'''
COLORIZE = get_user_color_preference()
if COLORIZE is not None:
return COLORIZE
streams = [self.errorStream,self.outputStream]
for stream in streams:
if not hasattr(stream, 'isatty'):
return False
if not stream.isatty():
return False
return True
def addColor(self,level,text):
'''addColor to the prompt (usually prefix) if terminal
supports, and specified to do so'''
if self.colorize:
if level in self.colors:
text = "%s%s%s" %(self.colors[level],
text,
self.colors["OFF"])
return text
def emitError(self,level):
'''determine if a level should print to
stderr, includes all levels but INFO and QUIET'''
if level in [ABRT,
ERROR,
WARNING,
VERBOSE,
VERBOSE1,
VERBOSE2,
VERBOSE3,
DEBUG ]:
return True
return False
def emitOutput(self,level):
'''determine if a level should print to stdout
only includes INFO'''
if level in [LOG,
INFO]:
return True
return False
def isEnabledFor(self,messageLevel):
'''check if a messageLevel is enabled to emit a level
'''
if messageLevel <= self.level:
return True
return False
def emit(self,level,message,prefix=None):
'''emit is the main function to print the message
optionally with a prefix
:param level: the level of the message
:param message: the message to print
:param prefix: a prefix for the message
'''
if prefix is not None:
prefix = self.addColor(level,"%s " %(prefix))
else:
prefix = ""
message = self.addColor(level,message)
# Add the prefix
message = "%s%s" %(prefix,message)
if not message.endswith('\n'):
message = "%s\n" %message
# If the level is quiet, only print to error
if self.level == QUIET:
pass
# Otherwise if in range print to stdout and stderr
elif self.isEnabledFor(level):
if self.emitError(level):
self.write(self.errorStream,message)
else:
self.write(self.outputStream,message)
# Add all log messages to history
self.history.append(message)
def write(self,stream,message):
'''write will write a message to a stream,
first checking the encoding
'''
if isinstance(message,bytes):
message = message.decode('utf-8')
stream.write(message)
def get_logs(self,join_newline=True):
''''get_logs will return the complete history, joined by newline
(default) or as is.
'''
if join_newline:
return '\n'.join(self.history)
return self.history
def show_progress(self,iteration,total,length=40,min_level=0,prefix=None,
carriage_return=True,suffix=None,symbol=None):
'''create a terminal progress bar, default bar shows for verbose+
:param iteration: current iteration (Int)
:param total: total iterations (Int)
:param length: character length of bar (Int)
'''
percent = 100 * (iteration / float(total))
progress = int(length * iteration // total)
if suffix is None:
suffix = ''
if prefix is None:
prefix = 'Progress'
# Download sizes can be imperfect, setting carriage_return to False
# and writing newline with caller cleans up the UI
if percent >= 100:
percent = 100
progress = length
if symbol is None:
symbol = "="
if progress < length:
bar = symbol * progress + '|' + '-' * (length - progress - 1)
else:
bar = symbol * progress + '-' * (length - progress)
# Only show progress bar for level > min_level
if self.level > min_level:
percent = "%5s" %("{0:.1f}").format(percent)
output = '\r' + prefix + " |%s| %s%s %s" % (bar, percent, '%', suffix)
sys.stdout.write(output),
if iteration == total and carriage_return:
sys.stdout.write('\n')
sys.stdout.flush()
def abort(self,message):
self.emit(ABRT,message,'ABRT')
def error(self,message):
self.emit(ERROR,message,'ERROR')
def warning(self,message):
self.emit(WARNING,message,'WARNING')
def log(self,message):
self.emit(LOG,message,'LOG')
def info(self,message):
self.emit(INFO,message)
def verbose(self,message):
self.emit(VERBOSE,message,"VERBOSE")
def verbose1(self,message):
self.emit(VERBOSE,message,"VERBOSE1")
def verbose2(self,message):
self.emit(VERBOSE2,message,'VERBOSE2')
def verbose3(self,message):
self.emit(VERBOSE3,message,'VERBOSE3')
def debug(self,message):
self.emit(DEBUG,message,'DEBUG')
def is_quiet(self):
'''is_quiet returns true if the level is under 1
'''
if self.level < 1:
return False
return True
def get_logging_level():
'''get_logging_level will configure a logging to standard out based on the user's
selected level, which should be in an environment variable called
SENDIT_MESSAGELEVEL. if SENDIT_MESSAGELEVEL is not set, the maximum level
(5) is assumed (all messages).
'''
return int(os.environ.get("SENDIT_MESSAGELEVEL",5))
def get_user_color_preference():
COLORIZE = os.environ.get('SENDIT_COLORIZE',None)
if COLORIZE is not None:
COLORIZE = convert2boolean(COLORIZE)
return COLORIZE
def convert2boolean(arg):
'''convert2boolean is used for environmental variables that must be
returned as boolean'''
if not isinstance(arg,bool):
return arg.lower() in ("yes", "true", "t", "1","y")
return arg
bot = SenditMessage()
| mit | 2,299,047,214,545,847,600 | 29.734767 | 95 | 0.580175 | false | 4.293941 | false | false | false |
cyanfish/heltour | heltour/tournament/automod.py | 1 | 17435 | from heltour import settings
from heltour.tournament.models import *
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from heltour.tournament.tasks import pairings_published
import reversion
import time
logger = logging.getLogger(__name__)
@receiver(post_save, sender=ModRequest, dispatch_uid='heltour.tournament.automod')
def mod_request_saved(instance, created, **kwargs):
if created:
signals.mod_request_created.send(sender=MOD_REQUEST_SENDER[instance.type],
instance=instance)
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['appeal_late_response'],
dispatch_uid='heltour.tournament.automod')
def appeal_late_response_created(instance, **kwargs):
# Figure out which round to use
if not instance.round or instance.round.publish_pairings:
instance.round = instance.season.round_set.order_by('number').filter(publish_pairings=True,
is_completed=False).first()
instance.save()
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['request_continuation'],
dispatch_uid='heltour.tournament.automod')
def request_continuation_created(instance, **kwargs):
# Figure out which round to use
if not instance.round or instance.round.publish_pairings:
instance.round = instance.season.round_set.order_by('number').filter(
publish_pairings=False).first()
instance.save()
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['withdraw'],
dispatch_uid='heltour.tournament.automod')
def withdraw_created(instance, **kwargs):
# Figure out which round to add the withdrawal on
if not instance.round or instance.round.publish_pairings:
instance.round = instance.season.round_set.order_by('number').filter(
publish_pairings=False).first()
instance.save()
# Check that the requester is part of the season
sp = SeasonPlayer.objects.filter(player=instance.requester, season=instance.season).first()
if sp is None:
instance.reject(response='You aren\'t currently a participant in %s.' % instance.season)
return
if not instance.round:
instance.reject(response='You can\'t withdraw from the season at this time.')
return
instance.approve(response='You\'ve been withdrawn for round %d.' % instance.round.number)
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['withdraw'],
dispatch_uid='heltour.tournament.automod')
def withdraw_approved(instance, **kwargs):
if not instance.round:
return
# Add the withdrawal if it doesn't already exist
with reversion.create_revision():
reversion.set_comment('Withdraw request approved by %s' % instance.status_changed_by)
PlayerWithdrawal.objects.get_or_create(player=instance.requester, round=instance.round)
@receiver(signals.automod_unresponsive, dispatch_uid='heltour.tournament.automod')
def automod_unresponsive(round_, **kwargs):
groups = {'warning': [], 'yellow': [], 'red': []}
for p in round_.pairings.filter(game_link='', result='', scheduled_time=None).exclude(
white=None).exclude(black=None):
# verify that neither player is previously marked unavailable
if round_.season.league.competitor_type == 'team':
white_unavail = PlayerAvailability.objects.filter(round=round_, player=p.white,
is_available=False).exists()
black_unavail = PlayerAvailability.objects.filter(round=round_, player=p.black,
is_available=False).exists()
if white_unavail or black_unavail:
continue
# check who is not present
white_present = p.get_player_presence(p.white).first_msg_time is not None
black_present = p.get_player_presence(p.black).first_msg_time is not None
if not white_present:
player_unresponsive(round_, p, p.white, groups)
if black_present:
signals.notify_opponent_unresponsive.send(sender=automod_unresponsive,
round_=round_, player=p.black,
opponent=p.white, pairing=p)
time.sleep(1)
if not black_present:
player_unresponsive(round_, p, p.black, groups)
if white_present:
signals.notify_opponent_unresponsive.send(sender=automod_unresponsive,
round_=round_, player=p.white,
opponent=p.black, pairing=p)
time.sleep(1)
signals.notify_mods_unresponsive.send(sender=automod_unresponsive, round_=round_,
warnings=groups['warning'], yellows=groups['yellow'],
reds=groups['red'])
def player_unresponsive(round_, pairing, player, groups):
season = round_.season
league = season.league
has_warning = PlayerWarning.objects.filter(player=player, round__season=season,
type='unresponsive').exists()
if not has_warning and league.get_leaguesetting().warning_for_late_response:
with reversion.create_revision():
reversion.set_comment('Automatic warning for unresponsiveness')
PlayerWarning.objects.get_or_create(player=player, round=round_, type='unresponsive')
punishment = 'You may receive a yellow card.'
allow_continue = league.competitor_type != 'team'
groups['warning'].append(player)
else:
card_color = give_card(round_, player, 'card_unresponsive')
if not card_color:
return
punishment = 'You have been given a %s card.' % card_color
allow_continue = card_color != 'red' and league.competitor_type != 'team'
groups[card_color].append(player)
if league.competitor_type == 'team':
avail, _ = PlayerAvailability.objects.get_or_create(round=round_, player=player)
avail.is_available = False
avail.save()
signals.notify_unresponsive.send(sender=automod_unresponsive, round_=round_, player=player,
punishment=punishment, allow_continue=allow_continue,
pairing=pairing)
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['appeal_late_response'],
dispatch_uid='heltour.tournament.automod')
def appeal_late_response_approved(instance, **kwargs):
if not instance.pairing:
return
with reversion.create_revision():
reversion.set_comment('Late response appeal approved by %s' % instance.status_changed_by)
warning = PlayerWarning.objects.filter(player=instance.requester, round=instance.round,
type='unresponsive').first()
if warning:
warning.delete()
else:
revoke_card(instance.round, instance.requester, 'card_unresponsive')
@receiver(signals.automod_noshow, dispatch_uid='heltour.tournament.automod')
def automod_noshow(pairing, **kwargs):
if pairing.game_link:
# Game started, no action necessary
return
white_online = pairing.get_player_presence(pairing.white).online_for_game
black_online = pairing.get_player_presence(pairing.black).online_for_game
if white_online and not black_online:
player_noshow(pairing, pairing.white, pairing.black)
if black_online and not white_online:
player_noshow(pairing, pairing.black, pairing.white)
def player_noshow(pairing, player, opponent):
round_ = pairing.get_round()
signals.notify_noshow.send(sender=automod_unresponsive, round_=round_, player=player,
opponent=opponent)
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['claim_win_noshow'],
dispatch_uid='heltour.tournament.automod')
def claim_win_noshow_created(instance, **kwargs):
# Figure out which round to add the claim on
if not instance.round:
instance.round = instance.season.round_set.order_by('number').filter(is_completed=False,
publish_pairings=True).first()
instance.save()
if not instance.pairing and instance.round:
instance.pairing = instance.round.pairing_for(instance.requester)
instance.save()
# Check that the requester is part of the season
sp = SeasonPlayer.objects.filter(player=instance.requester, season=instance.season).first()
if sp is None:
instance.reject(response='You aren\'t currently a participant in %s.' % instance.season)
return
if not instance.round:
instance.reject(response='You can\'t claim a win at this time.')
return
if not instance.pairing:
instance.reject(response='You don\'t currently have a pairing you can claim a win for.')
return
p = instance.pairing
opponent = p.white if p.white != instance.requester else p.black
if p.get_player_presence(instance.requester).online_for_game \
and not p.get_player_presence(opponent).online_for_game \
and timezone.now() > p.scheduled_time + timedelta(minutes=21):
instance.approve(response='You\'ve been given a win by forfeit.')
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['claim_win_noshow'],
dispatch_uid='heltour.tournament.automod')
def claim_win_noshow_approved(instance, **kwargs):
if not instance.pairing:
return
p = instance.pairing
opponent = p.white if p.white != instance.requester else p.black
with reversion.create_revision():
reversion.set_comment('Auto forfeit for no-show')
if p.white == instance.requester:
p.result = '1X-0F'
if p.black == instance.requester:
p.result = '0F-1X'
p.save()
add_system_comment(p, '%s no-show' % opponent.lichess_username)
sp = SeasonPlayer.objects.filter(player=opponent, season=instance.season).first()
add_system_comment(sp, 'Round %d no-show' % instance.round.number)
card_color = give_card(instance.round, opponent, 'card_noshow')
if not card_color:
return
punishment = 'You have been given a %s card.' % card_color
allow_continue = card_color != 'red' and instance.season.league.competitor_type != 'team'
signals.notify_noshow_claim.send(sender=claim_win_noshow_approved, round_=instance.round,
player=opponent, punishment=punishment,
allow_continue=allow_continue)
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['appeal_noshow'],
dispatch_uid='heltour.tournament.automod')
def appeal_noshow_created(instance, **kwargs):
# Figure out which round to use
if not instance.round:
instance.round = instance.season.round_set.order_by('number').filter(publish_pairings=True,
is_completed=False).first()
instance.save()
if not instance.pairing and instance.round:
instance.pairing = instance.round.pairing_for(instance.requester)
instance.save()
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['appeal_noshow'],
dispatch_uid='heltour.tournament.automod')
def appeal_noshow_approved(instance, **kwargs):
if not instance.pairing:
return
with reversion.create_revision():
reversion.set_comment('No-show appeal approved by %s' % instance.status_changed_by)
revoke_card(instance.round, instance.requester, 'card_noshow')
with reversion.create_revision():
reversion.set_comment('No-show appeal approved by %s' % instance.status_changed_by)
instance.pairing.result = ''
instance.pairing.save()
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['claim_draw_scheduling'],
dispatch_uid='heltour.tournament.automod')
def claim_draw_scheduling_created(instance, **kwargs):
# Figure out which round to add the claim on
if not instance.round:
instance.round = instance.season.round_set.order_by('number').filter(is_completed=False,
publish_pairings=True).first()
instance.save()
if not instance.pairing and instance.round:
instance.pairing = instance.round.pairing_for(instance.requester)
instance.save()
# Check that the requester is part of the season
sp = SeasonPlayer.objects.filter(player=instance.requester, season=instance.season).first()
if sp is None:
instance.reject(response='You aren\'t currently a participant in %s.' % instance.season)
return
if not instance.round:
instance.reject(response='You can\'t claim a scheduling draw at this time.')
return
if not instance.pairing:
instance.reject(
response='You don\'t currently have a pairing you can claim a scheduling draw for.')
return
if instance.pairing.result:
instance.reject(
response='You can\'t claim a scheduling draw for a game which already has a set result.')
return
add_system_comment(instance.pairing, 'Scheduling draw claim made by %s' % instance.requester)
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['claim_draw_scheduling'],
dispatch_uid='heltour.tournament.automod')
def claim_scheduling_draw_approved(instance, **kwargs):
if not instance.pairing:
return
p = instance.pairing
opponent = p.white if p.white != instance.requester else p.black
comment_ = 'Scheduling draw claim approved by %s' % instance.status_changed_by
with reversion.create_revision():
reversion.set_comment(comment_)
p.result = '1/2Z-1/2Z'
p.save()
add_system_comment(p, comment_)
signals.notify_scheduling_draw_claim.send(sender=claim_scheduling_draw_approved,
round_=instance.round, player=opponent)
@receiver(signals.mod_request_created, sender=MOD_REQUEST_SENDER['appeal_draw_scheduling'],
dispatch_uid='heltour.tournament.automod')
def appeal_scheduling_draw_created(instance, **kwargs):
# Figure out which round to use
if not instance.round:
instance.round = instance.season.round_set.order_by('number').filter(publish_pairings=True,
is_completed=False).first()
instance.save()
if not instance.pairing and instance.round:
instance.pairing = instance.round.pairing_for(instance.requester)
instance.save()
add_system_comment(instance.pairing, 'Scheduling draw appeal by %s' % instance.requester)
@receiver(signals.mod_request_approved, sender=MOD_REQUEST_SENDER['appeal_draw_scheduling'],
dispatch_uid='heltour.tournament.automod')
def appeal_scheduling_draw_approved(instance, **kwargs):
if not instance.pairing:
return
comment_ = 'Scheduling draw appeal approved by %s' % instance.status_changed_by
with reversion.create_revision():
reversion.set_comment(comment_)
instance.pairing.result = ''
instance.pairing.save()
add_system_comment(instance.pairing, comment_)
def give_card(round_, player, type_):
# TODO: Unit tests?
with transaction.atomic():
sp = SeasonPlayer.objects.filter(season=round_.season, player=player).first()
if not sp:
logger.error('Season player did not exist for %s %s' % (round_.season, player))
return None
already_has_card = PlayerWarning.objects.filter(player=player, round=round_,
type__startswith='card').exists()
card, _ = PlayerWarning.objects.get_or_create(player=player, round=round_, type=type_)
if not already_has_card:
sp.games_missed += 1
with reversion.create_revision():
reversion.set_comment('Automatic %s %s' % (sp.card_color, card.get_type_display()))
sp.save()
return sp.card_color
def revoke_card(round_, player, type_):
with transaction.atomic():
sp = SeasonPlayer.objects.filter(season=round_.season, player=player).first()
if not sp:
logger.error('Season player did not exist for %s %s' % (round_.season, player))
return
card = PlayerWarning.objects.filter(player=player, round=round_, type=type_).first()
if not card:
return
card.delete()
has_other_card = PlayerWarning.objects.filter(player=player, round=round_,
type__startswith='card').exists()
if not has_other_card and sp.games_missed > 0:
sp.games_missed -= 1
with reversion.create_revision():
reversion.set_comment('Card revocation')
sp.save()
| mit | -8,901,362,672,809,592,000 | 45.002639 | 107 | 0.638142 | false | 3.958002 | false | false | false |
kescobo/util_hutlab | workflows/strainphlan_workflow.py | 1 | 4981 | #!/usr/bin/env python
"""
bioBakery Workflows: strainphlan
Copyright (c) 2018 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os, fnmatch
# import the workflow class from anadama2
from anadama2 import Workflow
# import the library of biobakery_workflow tasks for shotgun sequences
from biobakery_workflows.tasks import shotgun, general
# import the utilities functions and config settings from biobakery_workflows
from biobakery_workflows import utilities, config
# create a workflow instance, providing the version number and description
# the version number will appear when running this script with the "--version" option
# the description will appear when running this script with the "--help" option
workflow = Workflow(version="0.1", description="A workflow to run strainphlan")
# add the custom arguments to the workflow
workflow_config = config.ShotGun()
workflow.add_argument("input-extension", desc="the input file extension", default="fastq.gz", choices=["fastq.gz","fastq","fq.gz","fq","fasta","fasta.gz"])
workflow.add_argument("threads", desc="number of threads/cores for each task to use", default=1)
workflow.add_argument("bypass-taxonomic-profiling", desc="do not run the taxonomic profiling tasks (a tsv profile for each sequence file must be included in the input folder using the same sample name)", action="store_true")
workflow.add_argument("strain-profiling-options", desc="additional options when running the strain profiling step", default="")
workflow.add_argument("max-strains", desc="the max number of strains to profile", default=20, type=int)
# get the arguments from the command line
args = workflow.parse_args()
# get all input files with the input extension provided on the command line
# return an error if no files are found
input_files = utilities.find_files(args.input, extension=args.input_extension, exit_if_not_found=True)
### STEP #1: Run taxonomic profiling on all of the filtered files ###
if not args.bypass_taxonomic_profiling:
merged_taxonomic_profile, taxonomy_tsv_files, taxonomy_sam_files = shotgun.taxonomic_profile(workflow,
input_files,args.output,args.threads,args.input_extension)
elif:
sample_names = utilities.sample_names(input_files,args.input_extension)
tsv_profiles = utilities.name_files(sample_names, demultiplex_output_folder, tag="taxonomic_profile", extension="tsv")
# check all of the expected profiles are found
if len(tsv_profiles) != len(list(filter(os.path.isfile,tsv_profiles))):
sys.exit("ERROR: Bypassing taxonomic profiling but all of the tsv taxonomy profile files are not found in the input folder. Expecting the following input files:\n"+"\n".join(tsv_profiles))
# run taxonomic profile steps bypassing metaphlan2
merged_taxonomic_profile, taxonomy_tsv_files, taxonomy_sam_files = shotgun.taxonomic_profile(workflow,
tsv_profiles,args.output,args.threads,"tsv",already_profiled=True)
# look for the sam profiles
taxonomy_sam_files = utilities.name_files(sample_names, demultiplex_output_folder, tag="bowtie2", extension="sam")
# if they do not all exist, then bypass strain profiling if not already set
if len(taxonomy_sam_files) != len(list(filter(os.path.isfile,taxonomy_sam_files))):
print("Warning: Bypassing taxonomic profiling but not all taxonomy sam files are present in the input folder. Strain profiling will be bypassed. Expecting the following input files:\n"+"\n".join(taxonomy_sam_files))
args.bypass_strain_profiling = True
### STEP #2: Run strain profiling
# Provide taxonomic profiling output so top strains by abundance will be selected
if not args.bypass_strain_profiling:
shotgun.strain_profile(workflow,taxonomy_sam_files,args.output,args.threads,
workflow_config.strainphlan_db_reference,workflow_config.strainphlan_db_markers,merged_taxonomic_profile,
args.strain_profiling_options,args.max_strains)
# start the workflow
workflow.go()
| mit | 1,441,946,323,739,177,000 | 56.918605 | 224 | 0.775146 | false | 3.846332 | true | false | false |
gudeg-united/mishapp-api | mishapp_api/views/__init__.py | 1 | 3263 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from flask import Blueprint
from flask import jsonify
from webargs import Arg
from webargs import ValidationError
from webargs.flaskparser import use_args
from mishapp_api.database import Disaster
disaster_api = Blueprint("disaster", __name__)
def radius_gte_zero(val):
if val < 0:
raise ValidationError("radius must greater than equal 0")
@disaster_api.errorhandler(400)
def handle_bad_request(err):
data = getattr(err, "data")
if data:
err_message = data["message"]
else:
err_message = "Bad request"
return jsonify({"message": err_message}), 400
@disaster_api.errorhandler(404)
def handle_not_found(err):
return jsonify({"message": "Not found"}), 404
@disaster_api.route("/disasters")
@use_args({
"page": Arg(int, default=1),
"per_page": Arg(int, default=20),
"category": Arg(str),
})
def index(args):
q = Disaster.objects
if args["category"]:
q = q(properties__type=args["category"])
docs = q.order_by("-modified_at").paginate(
args["page"],
min(args["per_page"], 20),
)
return jsonify({
"meta": {
"total": docs.total,
"page": docs.page,
"per_page": docs.per_page,
},
"items": [doc.asdict() for doc in docs.items]
})
@disaster_api.route("/disasters/nearby")
@use_args({
"lat": Arg(float, required=True),
"lon": Arg(float, required=True),
"radius": Arg(float, validate=radius_gte_zero, required=True),
"page": Arg(int, default=1),
"per_page": Arg(int, default=20),
"category": Arg(str),
})
def nearby(args):
q = Disaster.objects(
geometry__near={
"$geometry": {
"type": "Point",
"coordinates": [args["lon"], args["lat"]],
},
"$maxDistance": args["radius"],
},
)
if args["category"]:
q = q(properties__type=args["category"])
docs = q.order_by("-modified_at").paginate(
args["page"],
min(args["per_page"], 20),
)
return jsonify({
"meta": {
"total": docs.total,
"page": docs.page,
"per_page": docs.per_page,
},
"items": [doc.asdict() for doc in docs.items]
})
@disaster_api.route("/disasters/verify")
@use_args({
"lat": Arg(float, required=True),
"lon": Arg(float, required=True),
"radius": Arg(float, validate=radius_gte_zero, required=True),
"category": Arg(str),
})
def verify(args):
q = Disaster.objects(
geometry__near={
"$geometry": {
"type": "Point",
"coordinates": [args["lon"], args["lat"]],
},
"$maxDistance": args["radius"],
},
)
if args["category"]:
q = q(properties__type=args["category"])
counter = q.count()
if counter > 0:
return jsonify({"message": "OK"})
return jsonify({"message": "Not found"}), 404
@disaster_api.route("/disasters/<id>")
def get(id):
disaster = Disaster.objects.get_or_404(id=id)
return jsonify(disaster.asdict())
| bsd-3-clause | 3,217,415,846,938,317,300 | 24.692913 | 66 | 0.569108 | false | 3.550598 | false | false | false |
ehliang/myo-unlock | myo/lowlevel/enums.py | 1 | 3312 | # Copyright (c) 2015 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__all__ = [
'Result', 'VibrationType', 'StreamEmg', 'Pose', 'EventType',
'VersionComponent', 'OrientationIndex', 'HandlerResult', 'LockingPolicy',
'Arm', 'XDirection',
# Backwards compatibility
'result_t', 'vibration_type_t', 'stream_emg', 'pose_t', 'event_type_t',
'version_component_t', 'orientation_index_t', 'handler_result_t',
'locking_policy_t', 'arm_t', 'x_direction_t']
from ..utils.enum import Enumeration
class Result(Enumeration):
success = 0
error = 1
error_invalid_argument = 2
error_runtime = 3
__fallback__ = -1
class VibrationType(Enumeration):
short = 0
medium = 1
long = 2
__fallback__ = -1
class StreamEmg(Enumeration):
disabled = 0
enabled = 1
__fallback__ = -1
class Pose(Enumeration):
rest = 0
fist = 1
wave_in = 2
wave_out = 3
fingers_spread = 4
double_tap = 5
__fallback__ = -1
num_poses = Enumeration.Data(6)
class EventType(Enumeration):
paired = 0
unpaired = 1
connected = 2
disconnected = 3
arm_synced = 4
arm_unsynced = 5
orientation = 6
pose = 7
rssi = 8
unlocked = 9
locked = 10
emg = 11
__fallback__ = -1
class VersionComponent(Enumeration):
major = 0
minor = 1
patch = 2
__fallback__ = -1
class OrientationIndex(Enumeration):
x = 0
y = 1
z = 2
w = 3
__fallback__ = -1
class HandlerResult(Enumeration):
continue_ = 0
stop = 1
__fallback__ = -1
class LockingPolicy(Enumeration):
none = 0 # Pose events are always sent.
standard = 1 # (default) Pose events are not sent while a Myo is locked.
__fallback__ = -1
class Arm(Enumeration):
right = 0
left = 1
unknown = 2
__fallback__ = -1
class XDirection(Enumeration):
toward_wrist = 0
toward_elbow = 1
unknown = 2
__fallback__ = -1
# Backwards compatibility
result_t = Result
vibration_type_t = VibrationType
stream_emg = StreamEmg
pose_t = Pose
event_type_t = EventType
version_component_t = VersionComponent
orientation_index_t = OrientationIndex
handler_result_t = HandlerResult
locking_policy_t = LockingPolicy
arm_t = Arm
x_direction_t = XDirection
| mit | 4,962,799,579,743,529,000 | 23.533333 | 79 | 0.670592 | false | 3.603917 | false | false | false |
ClimateImpactLab/open-estimate | openest/generate/smart_curve.py | 1 | 19082 | """Curve classes that apply to xarray Datasets.
Curves are mathematical functions on one or more independent
variables. The basic form of the curves classes is in
`models/curve.py`. The curve classes defined here, derived from
`SmartCurve`, take Datasets as arguments.
Smart Curves fall back on Curve logic, but take xarray DataSets and
know which variables they want.
"""
import numpy as np
from . import juliatools, latextools, formatting, diagnostic, formattools
from statsmodels.distributions.empirical_distribution import StepFunction
from openest.models import curve as curve_module
class SmartCurve(object):
def __init__(self):
self.xx = [-np.inf, np.inf] # Backwards compatibility to functions expecting curves
self.deltamethod = False
def __call__(self, ds):
raise NotImplementedError("call not implemented")
@property
def univariate(self):
raise NotImplementedError("univariate not implemented")
def format(self, lang):
raise NotImplementedError()
@staticmethod
def format_call(lang, curve, *args):
if isinstance(curve, SmartCurve):
return curve.format(lang)
if lang == 'latex':
return latextools.call(curve, None, *args)
elif lang == 'julia':
return juliatools.call(curve, None, *args)
class CurveCurve(SmartCurve):
def __init__(self, curve, variable):
super(CurveCurve, self).__init__()
self.curve = curve
self.variable = variable
def __call__(self, ds):
return self.curve(ds[self.variable])
def format(self, lang):
return SmartCurve.format_call(self.curve, lang, self.variable)
class ConstantCurve(SmartCurve):
def __init__(self, constant, dimension):
super(ConstantCurve, self).__init__()
self.constant = constant
self.dimension = dimension
def __call__(self, ds):
return np.repeat(self.constant, len(ds[self.dimension]))
def format(self, lang):
return {'main': formatting.FormatElement(str(self.constant))}
class LinearCurve(CurveCurve):
def __init__(self, slope, variable):
super(LinearCurve, self).__init__(lambda x: slope * x, variable)
class StepCurve(CurveCurve):
def __init__(self, xxlimits, levels, variable):
step_function = StepFunction(xxlimits[1:-1], levels[1:], ival=levels[0])
super(StepCurve, self).__init__(step_function, variable)
self.xxlimits = xxlimits
self.levels = levels
class CoefficientsCurve(SmartCurve):
def __init__(self, coeffs, variables):
super(CoefficientsCurve, self).__init__()
self.coeffs = coeffs
self.variables = variables
assert isinstance(variables, list) and len(variables) == len(coeffs), "Variables do not match coefficients: %s <> %s" % (variables, coeffs)
def __call__(self, ds):
result = np.zeros(ds[self.variables[0]].shape)
for ii in range(len(self.variables)):
#result += self.coeffs[ii] * ds[self.variables[ii]].values # TOO SLOW
result += self.coeffs[ii] * ds._variables[self.variables[ii]]._data
return result
def format(self, lang):
coeffvar = formatting.get_variable()
if lang == 'latex':
return {'main': formatting.FormatElement(r"(%s) \cdot \vec{%s}" % (', '.join([varname for varname in self.variables]), coeffvar))}
elif lang == 'julia':
return {'main': formatting.FormatElement(' + '.join(["%s * %s_%d" % (self.variables[ii], coeffvar, ii + 1) for ii in range(len(self.variables))]))}
class ZeroInterceptPolynomialCurve(CoefficientsCurve):
def __init__(self, coeffs, variables, allow_raising=False, descriptions=None):
super(ZeroInterceptPolynomialCurve, self).__init__(coeffs, variables)
if descriptions is None:
descriptions = {}
self.allow_raising = allow_raising
self.descriptions = descriptions
self.getters = [((lambda ds, var=variable: ds._variables[var]) if isinstance(variable, str) else variable)
for variable in self.variables]
def __call__(self, ds):
result = self.coeffs[0] * self.getters[0](ds)._data
for ii in range(1, len(self.variables)):
if not self.allow_raising or self.variables[ii] in ds._variables:
#result += self.coeffs[ii] * ds[self.variables[ii]].values # TOO SLOW
result += self.coeffs[ii] * self.getters[ii](ds)._data
else:
result += self.coeffs[ii] * (self.getters[0](ds)._data ** (ii + 1))
return result
@property
def univariate(self):
return curve_module.ZeroInterceptPolynomialCurve([-np.inf, np.inf], self.coeffs)
def format(self, lang):
coeffvar = formatting.get_variable()
variable = formatting.get_variable()
funcvars = {}
repterms = []
if lang == 'latex':
if isinstance(self.variables[0], str):
repterms.append(r"%s_1 %s" % (coeffvar, variable))
else:
funcvar = formatting.get_function()
funcvars[self.variables[0]] = funcvar
repterms.append(r"%s_1 %s(%s)" % (coeffvar, funcvar, variable))
elif lang == 'julia':
if isinstance(self.variables[0], str):
repterms.append(r"%s[1] * %s" % (coeffvar, variable))
else:
funcvar = formatting.get_function()
funcvars[self.variables[0]] = funcvar
repterms.append(r"%s[1] * %s(%s)" % (coeffvar, funcvar, variable))
for ii in range(1, len(self.variables)):
if lang == 'latex':
if isinstance(self.variables[0], str):
repterms.append(r"%s_1 %s^%d" % (coeffvar, variable, ii + 1))
else:
funcvar = formatting.get_function()
funcvars[self.variables[ii]] = funcvar
repterms.append(r"%s_1 %s(%s)^%d" % (coeffvar, funcvar, variable, ii + 1))
elif lang == 'julia':
if isinstance(self.variables[0], str):
repterms.append(r"%s[1] * %s^%d" % (coeffvar, variable, ii + 1))
else:
funcvar = formatting.get_function()
funcvars[self.variables[ii]] = funcvar
repterms.append(r"%s[1] * %s(%s)^%d" % (coeffvar, funcvar, variable, ii + 1))
result = {'main': formatting.FormatElement(' + '.join(repterms))}
for variable in funcvars:
result[funcvars[variable]] = formatting.FormatElement(self.descriptions.get(variable, "Unknown"))
return result
class SumByTimePolynomialCurve(SmartCurve):
"""Equivalent to `ZeroInterceptPolynomialCurve`, but with a different coefficient per timestep.
Parameters
----------
coeffmat : array_like
Matrix of K (order) x T (timesteps)
variables : list of str or function
Name of variable in DataSet or getter function for each exponent term
allow_raising : bool, optional
Can we just raise the linear term to an exponent, or should each bein the ds (default)
descriptions : dict of str => str
Description of each getter function
"""
def __init__(self, coeffmat, variables, allow_raising=False, descriptions=None):
super(SumByTimePolynomialCurve, self).__init__()
self.coeffmat = coeffmat # K x T
assert len(self.coeffmat.shape) == 2
self.variables = variables
self.allow_raising = allow_raising
if descriptions is None:
descriptions = {}
self.descriptions = descriptions
self.getters = [(lambda ds: ds._variables[variable]) if isinstance(variable, str) else variable for variable in self.variables] # functions return vector of length T
def __call__(self, ds):
maxtime = self.coeffmat.shape[1]
lindata = self.getters[0](ds)._data[:maxtime]
result = np.sum(self.coeffmat[0, :len(lindata)] * lindata)
for ii in range(1, len(self.variables)):
if not self.allow_raising or self.variables[ii] in ds._variables:
termdata = self.getters[ii](ds)._data[:maxtime]
result += np.sum(self.coeffmat[ii, :len(lindata)] * termdata) # throws error if length mismatch
else:
result += np.sum(self.coeffmat[ii, :len(lindata)] * (lindata ** (ii + 1)))
return result
@property
def univariate(self):
raise NotImplementedError("Probably want to define a matrix-taking curve before this.")
def format(self, lang):
coeffvar = formatting.get_variable()
variable = formatting.get_variable()
funcvars = {}
repterms = []
if lang == 'latex':
if isinstance(self.variables[0], str):
repterms.append(r"%s_1 \cdot %s" % (coeffvar, variable))
else:
funcvar = formatting.get_function()
funcvars[self.variables[0]] = funcvar
repterms.append(r"%s_1 \cdot %s(%s)" % (coeffvar, funcvar, variable))
elif lang == 'julia':
if isinstance(self.variables[0], str):
repterms.append(r"sum(%s[1,:] * %s)" % (coeffvar, variable))
else:
funcvar = formatting.get_function()
funcvars[self.variables[0]] = funcvar
repterms.append(r"sum(%s[1,:] * %s(%s))" % (coeffvar, funcvar, variable))
for ii in range(1, len(self.variables)):
if lang == 'latex':
if isinstance(self.variables[0], str):
repterms.append(r"%s_1 \cdot %s^%d" % (coeffvar, variable, ii + 1))
else:
funcvar = formatting.get_function()
funcvars[self.variables[ii]] = funcvar
repterms.append(r"%s_1 \cdot %s(%s)^%d" % (coeffvar, funcvar, variable, ii + 1))
elif lang == 'julia':
if isinstance(self.variables[0], str):
repterms.append(r"sum(%s[1,:] * %s^%d)" % (coeffvar, variable, ii + 1))
else:
funcvar = formatting.get_function()
funcvars[self.variables[ii]] = funcvar
repterms.append(r"sum(%s[1,:] * %s(%s)^%d)" % (coeffvar, funcvar, variable, ii + 1))
result = {'main': formatting.FormatElement(' + '.join(repterms))}
for variable in funcvars:
result[funcvars[variable]] = formatting.FormatElement(self.descriptions.get(variable, "Unknown"))
return result
class SumByTimeCoefficientsCurve(SmartCurve):
"""Equivalent to `TransformCoefficientsCurve`, but with a different coefficient per timestep.
Parameters
----------
coeffmat : array_like
Matrix of K (#predictors) x T (timesteps)
transforms : list of functions
Functions of DataSet to return each predictor
descriptions : list of str
Descriptions of each transformation/predictor
diagnames : list of str
Keys to be used for each predictor in the diagnostic files, or None for no-recording
"""
def __init__(self, coeffmat, transforms, descriptions, diagnames=None):
super(SumByTimeCoefficientsCurve, self).__init__()
self.coeffmat = coeffmat # K x T
assert len(coeffmat.shape) == 2 or np.all(coeffmat == 0)
self.transforms = transforms
self.descriptions = descriptions
self.diagnames = diagnames
assert isinstance(transforms, list) and len(transforms) == coeffmat.shape[0], "Transforms do not match coefficients: %s <> %s" % (transforms, coeffmat.shape)
assert diagnames is None or isinstance(diagnames, list) and len(diagnames) == len(transforms)
def __call__(self, ds):
if np.all(self.coeffmat == 0):
# Happens with edge case of conditional suffixes
return 0
maxtime = self.coeffmat.shape[1]
result = None
for ii in range(len(self.transforms)):
predictor = self.transforms[ii](ds)._data.ravel()[:maxtime]
if self.diagnames:
diagnostic.record(ds.region, ds.year, self.diagnames[ii], np.sum(predictor))
if result is None:
result = np.sum(self.coeffmat[ii, :] * predictor)
else:
result += np.sum(self.coeffmat[ii, :] * predictor)
return result
@property
def univariate(self):
raise NotImplementedError("Probably want to define a matrix-taking curve before this.")
def format(self, lang):
raise NotImplementedError()
class CubicSplineCurve(CoefficientsCurve):
def __init__(self, coeffs, knots, variables, allow_raising=False):
super(CubicSplineCurve, self).__init__(coeffs, variables)
self.allow_raising = allow_raising
self.knots = knots
def __call__(self, ds):
result = np.zeros(ds[self.variables[0]].shape)
try:
for ii in range(len(self.variables)):
result += self.coeffs[ii] * ds._variables[self.variables[ii]]._data
return result
except KeyError as ex:
# This should only catch KeyErrors coming from coming from
# ds._variables[x].
if self.allow_raising:
return curve_module.CubicSplineCurve(self.knots, self.coeffs)(ds._variables[self.variables[0]]._data)
raise ex
@property
def univariate(self):
return curve_module.CubicSplineCurve(self.knots, self.coeffs)
class TransformCoefficientsCurve(SmartCurve):
"""Use a transformation of ds to produce each predictor.
Parameters
----------
coeffs : array_like
Vector of coefficients on each [transformed] predictor
transforms : list of functions
Functions of DataSet to return each predictor
descriptions : list of str
Descriptions of each transformation/predictor
diagnames : list of str (optional)
Keys to be used for each predictor in the diagnostic files, or None for no-recording
univariate_curve : UnivariateCurve (optional)
If a univariate function is requested, can we produce one?
"""
def __init__(self, coeffs, transforms, descriptions, diagnames=None, univariate_curve=None):
super(TransformCoefficientsCurve, self).__init__()
self.coeffs = coeffs
self.transforms = transforms
self.descriptions = descriptions
self.diagnames = diagnames
self._univariate_curve = univariate_curve
assert isinstance(transforms, list) and len(transforms) == len(coeffs), "Transforms do not match coefficients: %s <> %s" % (transforms, coeffs)
assert diagnames is None or isinstance(diagnames, list) and len(diagnames) == len(transforms)
def __call__(self, ds):
result = None
for ii in range(len(self.transforms)):
predictor = self.transforms[ii](ds)
if self.diagnames:
diagnostic.record(ds.region, ds.year, self.diagnames[ii], np.sum(predictor._data))
if result is None:
result = self.coeffs[ii] * predictor._data
else:
result += self.coeffs[ii] * predictor._data
return result
def format(self, lang):
coeffvar = formatting.get_variable()
funcvars = [formatting.get_function() for transform in self.transforms]
if lang == 'latex':
result = {'main': formatting.FormatElement(r"(%s) \cdot \vec{%s}" % (', '.join(["%s" % funcvars[ii] for ii in range(len(funcvars))]), coeffvar))}
elif lang == 'julia':
result = {'main': formatting.FormatElement(' + '.join(["%s() * %s_%d" % (funcvars[ii], coeffvar, ii + 1) for ii in range(len(funcvars))]))}
for ii in range(len(funcvars)):
result[funcvars[ii]] = formatting.FormatElement(self.descriptions[ii])
return result
@property
def univariate(self):
if self._univariate_curve is not None:
return self._univariate_curve
raise NotImplementedError("univariate transform not specified")
class SelectiveInputCurve(SmartCurve):
"""Assumes input is a matrix, and only pass selected input columns to child curve."""
def __init__(self, curve, variable):
super(SelectiveInputCurve, self).__init__()
self.curve = curve
self.variable = variable
def __call__(self, ds):
return self.curve(ds[self.variable]._data)
def format(self, lang, dsname):
return SmartCurve.format_call(self.curve, lang, self.variable)
class SumCurve(SmartCurve):
def __init__(self, curves):
super(SmartCurve, self).__init__()
self.curves = curves
def __call__(self, ds):
total = 0
for curve in self.curves:
total += curve(ds)
return total
def format(self, lang):
formatteds = [SmartCurve.format_call(self.curves[ii], lang, self.variable) for ii in range(len(self.curves))]
return formattools.join(' + ', formatteds)
class ProductCurve(SmartCurve):
def __init__(self, curve1, curve2):
super(ProductCurve, self).__init__()
self.curve1 = curve1
self.curve2 = curve2
def __call__(self, ds):
return self.curve1(ds) * self.curve2(ds)
def format(self, lang):
return formatting.build_recursive({'latex': r"(%s) (%s)",
'julia': r"(%s) .* (%s)"}, lang,
self.curve1, self.curve2)
class ShiftedCurve(SmartCurve):
def __init__(self, curve, offset):
super(ShiftedCurve, self).__init__()
self.curve = curve
self.offset = offset
def __call__(self, ds):
return self.curve(ds) + self.offset
@property
def univariate(self):
return curve_module.ShiftedCurve(self.curve.univariate, self.offset)
def format(self, lang):
return formatting.build_recursive({'latex': r"(%s + " + str(self.offset) + ")",
'julia': r"(%s + " + str(self.offset) + ")"},
lang, self.curve)
class ClippedCurve(curve_module.ClippedCurve, SmartCurve):
@property
def univariate(self):
return curve_module.ClippedCurve(self.curve.univariate, self.cliplow)
class OtherClippedCurve(curve_module.OtherClippedCurve, SmartCurve):
@property
def univariate(self):
return curve_module.OtherClippedCurve(self.clipping_curve.univariate, self.curve.univariate, self.clipy)
class MinimumCurve(curve_module.MinimumCurve, SmartCurve):
@property
def univariate(self):
return curve_module.MinimumCurve(self.curve1.univariate, self.curve2.univariate)
| gpl-3.0 | 1,786,271,144,499,775,000 | 39.427966 | 173 | 0.600933 | false | 3.987879 | false | false | false |
leopoul/ncclient | ncclient/devices/nexus.py | 15 | 3378 | """
Handler for Cisco Nexus device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Nexus", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from ncclient.xml_ import BASE_NS_1_0
from ncclient.operations.third_party.nexus.rpc import ExecCommand
from .default import DefaultDeviceHandler
class NexusDeviceHandler(DefaultDeviceHandler):
"""
Cisco Nexus handler for device specific information.
In the device_params dictionary, which is passed to __init__, you can specify
the parameter "ssh_subsystem_name". That allows you to configure the preferred
SSH subsystem name that should be tried on your Nexus switch. If connecting with
that name fails, or you didn't specify that name, the other known subsystem names
will be tried. However, if you specify it then this name will be tried first.
"""
_EXEMPT_ERRORS = [
"*VLAN with the same name exists*", # returned even if VLAN was created, but
# name was already in use (switch will
# automatically choose different, unique
# name for VLAN)
]
def __init__(self, device_params):
super(NexusDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict['exec_command'] = ExecCommand
return dict
def get_capabilities(self):
# Just need to replace a single value in the default capabilities
c = super(NexusDeviceHandler, self).get_capabilities()
c[0] = "urn:ietf:params:xml:ns:netconf:base:1.0"
return c
def get_xml_base_namespace_dict(self):
"""
Base namespace needs a None key.
See 'nsmap' argument for lxml's Element().
"""
return { None : BASE_NS_1_0 }
def get_xml_extra_prefix_kwargs(self):
"""
Return keyword arguments per request, which are applied to Element().
Mostly, this is a dictionary containing the "nsmap" key.
See 'nsmap' argument for lxml's Element().
"""
d = {
"nxos":"http://www.cisco.com/nxos:1.0",
"if":"http://www.cisco.com/nxos:1.0:if_manager",
"nfcli": "http://www.cisco.com/nxos:1.0:nfcli",
"vlan_mgr_cli": "http://www.cisco.com/nxos:1.0:vlan_mgr_cli"
}
d.update(self.get_xml_base_namespace_dict())
return { "nsmap" : d }
def get_ssh_subsystem_names(self):
"""
Return a list of possible SSH subsystem names.
Different NXOS versions use different SSH subsystem names for netconf.
Therefore, we return a list so that several can be tried, if necessary.
The Nexus device handler also accepts
"""
preferred_ssh_subsystem = self.device_params.get("ssh_subsystem_name")
name_list = [ "netconf", "xmlagent" ]
if preferred_ssh_subsystem:
return [ preferred_ssh_subsystem ] + \
[ n for n in name_list if n != preferred_ssh_subsystem ]
else:
return name_list
| apache-2.0 | 8,493,074,875,138,781,000 | 34.1875 | 87 | 0.618413 | false | 4.134639 | false | false | false |
em-2/em2 | em2/utils/network.py | 1 | 2025 | import asyncio
import logging
from aiohttp import ClientSession
from async_timeout import timeout
from em2 import Settings
from em2.exceptions import StartupException
logger = logging.getLogger('em2.utils')
async def _wait_port_open(host, port, delay, loop):
step_size = 0.05
steps = int(delay / step_size)
start = loop.time()
for i in range(steps):
try:
with timeout(step_size, loop=loop):
transport, proto = await loop.create_connection(lambda: asyncio.Protocol(), host=host, port=port)
except asyncio.TimeoutError:
pass
except OSError:
await asyncio.sleep(step_size, loop=loop)
else:
transport.close()
logger.debug('Connected successfully to %s:%s after %0.2fs', host, port, loop.time() - start)
return
raise StartupException(f'Unable to connect to {host}:{port} after {loop.time() - start:0.2f}s')
def wait_for_services(settings, *, delay=5):
"""
Wait for up to `delay` seconds for postgres and redis ports to be open
"""
loop = asyncio.get_event_loop()
coros = [
_wait_port_open(settings.pg_host, settings.pg_port, delay, loop),
_wait_port_open(settings.R_HOST, settings.R_PORT, delay, loop),
]
logger.debug('waiting for postgres and redis to come up...')
loop.run_until_complete(asyncio.gather(*coros, loop=loop))
async def check_server(settings: Settings, path='/', expected_status=200):
url = f'http://127.0.0.1:{settings.web_port}' + path
try:
async with ClientSession() as session:
async with session.get(url) as r:
assert r.status == expected_status, f'response error {r.status} != {expected_status}'
except (ValueError, AssertionError, OSError) as e:
logger.error('web check error: %s: %s, url: "%s"', e.__class__.__name__, e, url)
return 1
else:
logger.info('web check successful "%s", response %d', url, expected_status)
return 0
| mit | -8,350,600,370,374,822,000 | 35.160714 | 113 | 0.633086 | false | 3.715596 | false | false | false |
ushahidi/riverid-python | api/riversite.py | 1 | 1288 | # RiverID Site Class
# ==================
#
# This file is part of RiverID.
#
# RiverID is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RiverID is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with RiverID. If not, see <http://www.gnu.org/licenses/>.
from riverexception import RiverException
class RiverSite(object):
def __init__(self, db):
self.db = db
def add_site(self, url):
self.db.site.insert({'url': url})
def add_user(self, url, user_id):
self.db.site.update({'url': url}, {'$push': {'user_id': user_id}})
def exists(self, url):
return self.db.site.find_one({'url': url}) != None
def get_user_urls(self, user_id):
urls = []
for site in self.db.site.find({'user_id': user_id}):
urls.append(site['url'])
return urls
| agpl-3.0 | -7,584,865,214,060,750,000 | 32.894737 | 77 | 0.659161 | false | 3.638418 | false | false | false |
mendrugory/monkey-note-bot | app/model/notelist.py | 1 | 2987 | def find(db, user):
"""
find the notelist
:param db:
:param user:
:return:
"""
document = db.notelist.find_one({"_id": user})
return document
def find_all_lists(db, user):
"""
It finds all lists
:param db:
:param user:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists": 1})
return document.get("lists", [])
def find_list(db, user, list_name):
"""
It finds the list
:param db:
:param user:
:param list_name:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists.{}".format(list_name): 1})
if not document:
return []
return document["lists"].get(list_name, [])
def find_all_lists_names(db, user):
"""
It finds all the lists names
:param db:
:param user:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists": 1})
return [name for name in document["lists"].keys()]
def find_notes(db, user, list_name):
"""
It returns all the notes of a list
:param db:
:param user:
:param list_name:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists": 1})
return document["lists"][list_name]
def insert_new_notelist(db, user):
"""
It inserts a new notelist
:param db:
:param user:
:return:
"""
db.notelist.insert({"_id": user, "lists": {}})
def add_new_list(db, user, list_name):
"""
It adds a new list
:param db:
:param user:
:param list_name:
:return:
"""
notelist = find(db, user)
if not notelist:
insert_new_notelist(db, user)
db.notelist.update({"_id": user}, {"$set": {"lists.{}".format(list_name): []}})
def remove_list(db, user, list_name):
"""
It removes the given list
:param db:
:param user:
:param list_name:
:return:
"""
db.notelist.update({"_id": user}, {"$unset": {"lists.{}".format(list_name): 1}})
def add_note(db, user, list_name, note):
"""
It adds a note
:param db:
:param user:
:param list_name:
:param note:
:return:
"""
the_list = find_list(db, user, list_name)
if not the_list:
add_new_list(db, user, list_name)
db.notelist.update({"_id": user}, {"$addToSet": {"lists.{}".format(list_name): note}})
return True
def remove_note(db, user, list_name, note):
"""
It removes a note
:param db:
:param user:
:param list_name:
:param note:
:return:
"""
result = False
the_list = find_list(db, user, list_name)
if the_list:
try:
index = int(note) - 1
db.notelist.update({"_id": user}, {"$unset": {"lists.{}.{}".format(list_name, index): 1}})
db.notelist.update({"_id": user}, {"$pull": {"lists.{}".format(list_name): None}})
except:
db.notelist.update({"_id": user}, {"$pull": {"lists.{}".format(list_name): note}})
result = True
return result
| mit | 5,042,442,036,290,355,000 | 21.976923 | 102 | 0.544024 | false | 3.239696 | false | false | false |
LTD-Beget/sprutio-rpc | lib/FileManager/workers/local/extractArchive.py | 1 | 12194 | import gzip
import os
import pprint
import threading
import time
import traceback
import libarchive
import pyinotify
import rarfile
from lib.FileManager.FM import REQUEST_DELAY
from lib.FileManager.LibArchiveEntry import Entry
from lib.FileManager.SevenZFile import SevenZFile
from lib.FileManager.ZipFile import ZipFile, is_zipfile
from lib.FileManager.workers.baseWorkerCustomer import BaseWorkerCustomer
class ExtractArchive(BaseWorkerCustomer):
def __init__(self, params, *args, **kwargs):
super(ExtractArchive, self).__init__(*args, **kwargs)
self.file = params.get('file')
self.extract_path = params.get('extract_path')
self.params = params
self.NUM_WORKING_THREADS = 48
self.extracted_files = {
"count": 0,
"done": False
}
def run(self):
try:
self.preload()
abs_extract_path = self.get_abs_path(self.extract_path)
if not os.path.exists(abs_extract_path):
try:
os.makedirs(abs_extract_path)
except Exception as e:
self.logger.error("Cannot create extract path %s. %s" % (str(e), traceback.format_exc()))
raise Exception("Cannot create extract path")
elif os.path.isfile(abs_extract_path):
raise Exception("Extract path incorrect - file exists")
abs_archive_path = self.get_abs_path(self.file.get("path"))
if not os.path.exists(abs_archive_path):
raise Exception("Archive file is not exist")
self.on_running(self.status_id, pid=self.pid, pname=self.name)
self.logger.debug("Start extracting %s", abs_archive_path)
# for rar and zip same algorithm
if is_zipfile(abs_archive_path) or rarfile.is_rarfile(abs_archive_path) or SevenZFile.is_7zfile(
abs_archive_path):
if is_zipfile(abs_archive_path):
self.logger.info("Archive ZIP type, using zipfile (beget)")
a = ZipFile(abs_archive_path)
elif rarfile.is_rarfile(abs_archive_path):
self.logger.info("Archive RAR type, using rarfile")
a = rarfile.RarFile(abs_archive_path)
else:
self.logger.info("Archive 7Zip type, using py7zlib")
a = SevenZFile(abs_archive_path)
# extract Empty Files first
for fileinfo in a.archive.header.files.files:
if not fileinfo['emptystream']:
continue
name = fileinfo['filename']
try:
unicode_name = name.encode('UTF-8').decode('UTF-8')
except UnicodeDecodeError:
unicode_name = name.encode('cp866').decode('UTF-8')
unicode_name = unicode_name.replace('\\', '/') # For windows name in rar etc.
file_name = os.path.join(abs_extract_path, unicode_name)
dir_name = os.path.dirname(file_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if os.path.exists(dir_name) and not os.path.isdir(dir_name):
os.remove(dir_name)
os.makedirs(dir_name)
if os.path.isdir(file_name):
continue
f = open(file_name, 'w')
f.close()
infolist = a.infolist()
not_ascii = False
# checking ascii names
try:
abs_extract_path.encode('utf-8').decode('ascii')
for name in a.namelist():
name.encode('utf-8').decode('ascii')
except UnicodeDecodeError:
not_ascii = True
except UnicodeEncodeError:
not_ascii = True
t = threading.Thread(target=self.progress, args=(infolist, self.extracted_files, abs_extract_path))
t.daemon = True
t.start()
try:
if not_ascii:
for name in a.namelist():
try:
unicode_name = name.encode('UTF-8').decode('UTF-8')
except UnicodeDecodeError:
unicode_name = name.encode('cp866').decode('UTF-8')
unicode_name = unicode_name.replace('\\', '/') # For windows name in rar etc.
file_name = os.path.join(abs_extract_path, unicode_name)
dir_name = os.path.dirname(file_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if os.path.exists(dir_name) and not os.path.isdir(dir_name):
os.remove(dir_name)
os.makedirs(dir_name)
if os.path.isdir(file_name):
continue
f = open(file_name, 'wb')
try:
data = a.read(name)
f.write(data)
f.close()
except TypeError:
# pass for directories its make recursively for files
f.close()
os.remove(file_name)
else:
self.logger.info("EXTRACT ALL to %s , encoded = %s" % (
pprint.pformat(abs_extract_path), pprint.pformat(abs_extract_path)))
a.extractall(abs_extract_path) # Not working with non-ascii windows folders
except Exception as e:
self.logger.error("Error extract path %s. %s" % (str(e), traceback.format_exc()))
raise e
finally:
self.extracted_files["done"] = True
t.join()
elif libarchive.is_archive(abs_archive_path):
self.logger.info("Archive other type, using libarchive")
next_tick = time.time() + REQUEST_DELAY
print(pprint.pformat("Clock = %s , tick = %s" % (str(time.time()), str(next_tick))))
infolist = []
with libarchive.Archive(abs_archive_path, entry_class=Entry) as a:
for entry in a:
infolist.append(entry)
with libarchive.Archive(abs_archive_path, entry_class=Entry) as a:
for entry in a:
entry_path = os.path.join(abs_extract_path, entry.pathname)
self.logger.debug("Entry pathname %s - %s", entry.pathname, entry.size)
if time.time() > next_tick:
progress = {
'percent': round(float(self.extracted_files["count"]) / float(len(infolist)), 2),
'text': str(int(
round(float(self.extracted_files["count"]) / float(len(infolist)), 2) * 100)) + '%'
}
self.on_running(self.status_id, progress=progress, pid=self.pid, pname=self.name)
next_tick = time.time() + REQUEST_DELAY
self.extracted_files["count"] += 1
dir_name = os.path.dirname(entry_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if os.path.exists(dir_name) and not os.path.isdir(dir_name):
os.remove(dir_name)
os.makedirs(dir_name)
if os.path.isdir(entry_path):
continue
f = open(entry_path, 'w')
a.readpath(f)
elif abs_archive_path[-3:] == ".gz":
self.logger.info("gz file type, using gzip")
try:
# if its just a gz file
a = gzip.open(abs_archive_path)
file_content = a.read()
a.close()
file_name = os.path.splitext(os.path.basename(abs_archive_path))[0]
file_path = os.path.join(abs_extract_path, file_name)
infolist = [file_name]
dir_name = os.path.dirname(file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
extracted = open(file_path, 'wb')
extracted.write(file_content)
extracted.close()
except Exception as e:
raise e
finally:
self.extracted_files["done"] = True
else:
raise Exception("Archive file has unkown format")
progress = {
'percent': round(float(self.extracted_files["count"]) / float(len(infolist)), 2),
'text': str(int(round(float(self.extracted_files["count"]) / float(len(infolist)), 2) * 100)) + '%'
}
result = {}
time.sleep(REQUEST_DELAY)
self.on_success(self.status_id, progress=progress, data=result, pid=self.pid, pname=self.name)
except Exception as e:
self.extracted_files["done"] = True
result = {
"error": True,
"message": str(e),
"traceback": traceback.format_exc()
}
self.on_error(self.status_id, result, pid=self.pid, pname=self.name)
def progress(self, infolist, progress, extract_path):
self.logger.debug("extract thread progress() start")
next_tick = time.time() + REQUEST_DELAY
# print pprint.pformat("Clock = %s , tick = %s" % (str(time.time()), str(next_tick)))
progress["count"] = 0
class Identity(pyinotify.ProcessEvent):
def process_default(self, event):
progress["count"] += 1
# print("Has event %s progress %s" % (repr(event), pprint.pformat(progress)))
wm1 = pyinotify.WatchManager()
wm1.add_watch(extract_path, pyinotify.IN_CREATE, rec=True, auto_add=True)
s1 = pyinotify.Stats() # Stats is a subclass of ProcessEvent
notifier1 = pyinotify.ThreadedNotifier(wm1, default_proc_fun=Identity(s1))
notifier1.start()
total = float(len(infolist))
while not progress["done"]:
if time.time() > next_tick:
# print("Tick progress %s / %s" % (pprint.pformat(progress), str(total)))
count = float(progress["count"]) * 1.5
if count <= total:
op_progress = {
'percent': round(count / total, 2),
'text': str(int(round(count / total, 2) * 100)) + '%'
}
else:
op_progress = {
'percent': round(99, 2),
'text': '99%'
}
self.on_running(self.status_id, progress=op_progress, pid=self.pid, pname=self.name)
next_tick = time.time() + REQUEST_DELAY
time.sleep(REQUEST_DELAY)
# иначе пользователям кажется что распаковалось не полностью
op_progress = {
'percent': round(99, 2),
'text': '99%'
}
self.on_running(self.status_id, progress=op_progress, pid=self.pid, pname=self.name)
time.sleep(REQUEST_DELAY)
notifier1.stop()
| gpl-3.0 | -8,980,587,258,137,486,000 | 40.868966 | 119 | 0.480152 | false | 4.439488 | false | false | false |
gkc1000/pyscf | pyscf/nao/test/test_0044_h2_scf_gto_vs_nao_nao.py | 1 | 1518 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import nao, scf as scf_nao
#from pyscf.nao.hf import RHF
mol = gto.M( verbose = 1,
atom = '''
H 0 0 0
H 0 0.757 0.587''', basis = 'cc-pvdz',)
class KnowValues(unittest.TestCase):
def test_scf_gto_vs_nao(self):
""" Test computation of overlaps between NAOs against overlaps computed between GTOs"""
gto_hf = scf.RHF(mol)
gto_hf.kernel()
nao_hf = scf_nao(mf=gto_hf, gto=mol)
nao_hf.dump_chkfile=False
e_tot = nao_hf.kernel_scf()
self.assertAlmostEqual(gto_hf.e_tot, e_tot, 4)
for e1,e2 in zip(nao_hf.mo_energy[0,0],gto_hf.mo_energy): self.assertAlmostEqual(e1, e2, 3)
for o1,o2 in zip(nao_hf.mo_occ[0,0],gto_hf.mo_occ): self.assertAlmostEqual(o1, o2)
if __name__ == "__main__": unittest.main()
| apache-2.0 | 1,189,539,529,152,935,200 | 36.95 | 95 | 0.685771 | false | 3.079108 | true | false | false |
wangheda/youtube-8m | youtube-8m-wangheda/all_frame_models/cnn_lstm_memory_model.py | 1 | 3248 | import sys
import models
import model_utils
import math
import numpy as np
import video_level_models
import tensorflow as tf
import utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
FLAGS = flags.FLAGS
class CnnLstmMemoryModel(models.BaseModel):
def cnn(self,
model_input,
l2_penalty=1e-8,
num_filters = [1024, 1024, 1024],
filter_sizes = [1,2,3],
**unused_params):
max_frames = model_input.get_shape().as_list()[1]
num_features = model_input.get_shape().as_list()[2]
shift_inputs = []
for i in xrange(max(filter_sizes)):
if i == 0:
shift_inputs.append(model_input)
else:
shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])
cnn_outputs = []
for nf, fs in zip(num_filters, filter_sizes):
sub_input = tf.concat(shift_inputs[:fs], axis=2)
sub_filter = tf.get_variable("cnn-filter-len%d"%fs, shape=[num_features*fs, nf], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))
cnn_output = tf.concat(cnn_outputs, axis=2)
return cnn_output
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
lstm_size = int(FLAGS.lstm_cells)
number_of_layers = FLAGS.lstm_layers
cnn_output = self.cnn(model_input, num_filters=[1024,1024,1024], filter_sizes=[1,2,3])
normalized_cnn_output = tf.nn.l2_normalize(cnn_output, dim=2)
## Batch normalize the input
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=True)
for _ in range(number_of_layers)
],
state_is_tuple=True)
loss = 0.0
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, normalized_cnn_output,
sequence_length=num_frames,
swap_memory=FLAGS.rnn_swap_memory,
dtype=tf.float32)
final_state = tf.concat(map(lambda x: x.c, state), axis = 1)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=final_state,
original_input=model_input,
vocab_size=vocab_size,
**unused_params)
| apache-2.0 | 2,656,356,261,795,894,000 | 36.333333 | 105 | 0.607759 | false | 3.608889 | false | false | false |
edips/GeoEndaze | QT_DESIGNER/calculatorUi.py | 1 | 2079 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'calculatorUi.ui'
#
# Created: Mon Jan 6 00:27:51 2014
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_hesap(object):
def setupUi(self, hesap):
hesap.setObjectName("hesap")
hesap.resize(157, 135)
self.label = QtGui.QLabel(hesap)
self.label.setGeometry(QtCore.QRect(10, 10, 21, 16))
self.label.setObjectName("label")
self.label_2 = QtGui.QLabel(hesap)
self.label_2.setGeometry(QtCore.QRect(10, 40, 21, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtGui.QLabel(hesap)
self.label_3.setGeometry(QtCore.QRect(10, 100, 21, 16))
self.label_3.setObjectName("label_3")
self.sum = QtGui.QPushButton(hesap)
self.sum.setGeometry(QtCore.QRect(30, 70, 111, 24))
self.sum.setObjectName("sum")
self.a = QtGui.QLineEdit(hesap)
self.a.setGeometry(QtCore.QRect(30, 10, 113, 23))
self.a.setObjectName("a")
self.b = QtGui.QLineEdit(hesap)
self.b.setGeometry(QtCore.QRect(30, 40, 113, 23))
self.b.setObjectName("b")
self.c = QtGui.QLineEdit(hesap)
self.c.setGeometry(QtCore.QRect(30, 100, 113, 23))
self.c.setObjectName("c")
self.retranslateUi(hesap)
QtCore.QMetaObject.connectSlotsByName(hesap)
def retranslateUi(self, hesap):
hesap.setWindowTitle(QtGui.QApplication.translate("hesap", "addition", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("hesap", "a", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("hesap", "b", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("hesap", "c", None, QtGui.QApplication.UnicodeUTF8))
self.sum.setText(QtGui.QApplication.translate("hesap", "calculate", None, QtGui.QApplication.UnicodeUTF8))
| gpl-2.0 | -3,659,458,863,494,783,000 | 43.234043 | 117 | 0.665705 | false | 3.315789 | false | false | false |
constantinius/YaaGame | engine/gui.py | 1 | 1804 | from engine.service import (
AbstractService, GraphicsService,
ServiceManager
)
import pyglet
import kytten
class GuiService(AbstractService):
""" Service to manage GUI screens """
def __init__(self, window, group_index = 1):
self.guis = {}
self.window = window
self.batch = ServiceManager.instance[GraphicsService].batch
self.group = pyglet.graphics.OrderedGroup(group_index)
def add_gui(self, gui):
"""Add a gui to the manager."""
assert(isinstance(gui, AbstractGui))
self.guis[gui.name] = gui
def show_gui(self, name):
self.guis[name].show(self.window, self.batch,
self.group)
def hide_gui(self, name):
self.guis[name].hide()
def on_draw(self):
self.batch.draw()
class AbstractGui(object):
def __init__(self, name):
self.name = name
self.root = None
import os.path
pth = os.path.abspath(os.path.join('graphics', 'theme'))
self.theme = kytten.Theme(pth,
override={
"gui_color": [64, 128, 255, 255],
"font_size": 14
})
self.visible = False
def _build_gui(self, window, batch, group):
return kytten.Dialog(
kytten.TitleFrame("AbstractGui",
width=200, height=150
),
window=window, batch=batch,
group=group, theme=self.theme
)
def show(self, window, batch, group):
if not self.visible:
self.root = self._build_gui(window, batch, group)
self.visible = True
def hide(self):
if self.visible:
self.root.teardown()
self.visible = False
self.root = None | mit | 2,763,050,527,148,521,500 | 27.650794 | 67 | 0.547672 | false | 4.008889 | false | false | false |
justinchuby/cmu-courseapi-flask | common/search.py | 1 | 20717 | import re
import copy
import json
import arrow
import datetime
# Elasticsearch libraries, certifi required by Elasticsearch
import elasticsearch
from elasticsearch_dsl import Search
from elasticsearch_dsl.query import Q
from elasticsearch_dsl.connections import connections
import certifi
from common import Message, utils
import config
from config.es_config import ES_COURSE_INDEX_PREFIX, ES_FCE_INDEX
##
# @brief The Searcher object that parses input and generates queries.
##
class Searcher(object):
_doc_type = None
_default_size = 5
#
# @brief init
#
# @param self The object
# @param raw_query The raw query
# @param index The index
# @param size The size
# @param sort sort is either None or a list
#
def __init__(self, raw_query, index=None, size=_default_size, sort=None):
self.raw_query = copy.deepcopy(raw_query)
self.index = index
self.size = size
self.doc_type = self._doc_type
self.sort = sort
def __repr__(self):
return "<Searcher Object: raw_query={}>".format(repr(self.raw_query))
@property
def index(self):
return self._index
@index.setter
def index(self, value):
self._index = value
def execute(self):
response = self.fetch(self.generate_query(), self.index,
size=self.size, doc_type=self.doc_type,
sort=self.sort)
# if config.settings.DEBUG:
# print("[DEBUG] ES response:")
# print(json.dumps(response.to_dict(), indent=2))
return response
@staticmethod
def fetch(query, index, size=5, doc_type=None, sort=None):
s = Search(index=index, doc_type=doc_type).query(query).extra(size=size)
if sort:
s = s.sort(*sort)
try:
response = s.execute()
except elasticsearch.exceptions.NotFoundError as e:
# print(formatErrMsg(e, "ES"))
response = e.info
except elasticsearch.exceptions.RequestError as e:
# print(formatErrMsg(e, "ES"))
response = e.info
except elasticsearch.exceptions.TransportError as e:
# print(formatErrMsg(e, "ES"))
response = e.info
return response
##
# @brief Generate the query for the database.
##
# @return (dict) The query for querying the database.
##
def generate_query(self):
query = Q()
return query
class FCESearcher(Searcher):
_doc_type = 'fce'
_default_size = 5
def __init__(self, raw_query, index=None, size=_default_size, sort=None):
super().__init__(raw_query, index=index, size=size, sort=sort)
@property
def index(self):
return self._index
@index.setter
def index(self, value):
self._index = value
def generate_query(self):
raw_query = self.raw_query
query = Q()
if 'courseid' in raw_query:
courseid = raw_query['courseid'][0]
query &= Q('term', courseid=courseid)
if 'instructor' in raw_query:
instructor = raw_query['instructor'][0]
query &= Q('match', instructor={'query': instructor, 'operator': 'and'})
if config.settings.DEBUG:
print(json.dumps(query.to_dict(), indent=2))
print("[DEBUG] max size: {}, index: {}".format(self.size, self.index))
return query
class CourseSearcher(Searcher):
_doc_type = 'course'
_default_size = 5
def __init__(self, raw_query, index=None, size=_default_size):
super().__init__(raw_query, index, size)
@property
def index(self):
return self._index
# @brief Sets the index from short representation of a term. e.g. f17
# To the ES index
@index.setter
def index(self, value):
if value is None:
# Everything
self._index = ES_COURSE_INDEX_PREFIX + '*'
elif value == 'current':
# Current semester
self._index = utils.get_current_course_index()
elif re.match('^(f|s|m1|m2)\d{2}$', value):
# Match a semester, e.g. f17 or m217
self._index = ES_COURSE_INDEX_PREFIX + value
else:
# Unknown index, use as is
self._index = value
def generate_query(self):
raw_query = self.raw_query
query = Q()
# TODO: use the English analyser.
# TODO BUG: text and courseid presented in the same time would cause
# empty return value
if 'text' in raw_query:
text = raw_query['text'][0]
text_query = Q('bool',
should=[
Q('match', name=text),
Q('match', desc=text)
]
)
query &= text_query
else:
if 'name' in raw_query:
name = raw_query['name'][0]
name_query = Q('bool',
must=Q('match', name=name)
)
query &= name_query
if 'desc' in raw_query:
desc = raw_query['desc'][0]
desc_query = Q('bool',
must=Q('match', desc=desc)
)
query &= desc_query
if 'courseid' in raw_query:
courseid = raw_query['courseid'][0]
if self.index is None:
current_semester = utils.get_semester_from_date(
datetime.datetime.today())
id_query = Q('bool',
must=Q('term', id=courseid),
should=Q('match', semester=current_semester)
)
else:
id_query = Q('term', id=courseid)
query &= id_query
# Declare the variables to store the temporary nested queries
lec_nested_queries = {}
sec_nested_queries = {}
lec_name_query = Q()
sec_name_query = Q()
if 'instructor' in raw_query:
instructor = " ".join(raw_query['instructor'])
_query_obj = {'query': instructor,
'operator': 'and'}
if 'instructor_fuzzy' in raw_query:
_query_obj['fuzziness'] = 'AUTO'
lec_name_query = Q('match',
lectures__instructors=_query_obj)
sec_name_query = Q('match',
sections__instructors=_query_obj)
# TODO: check if DH 100 would give DH 2135 and PH 100
# see if multilevel nesting is needed
if 'building' in raw_query:
building = raw_query['building'][0].upper()
lec_building_query = Q('match', lectures__times__building=building)
sec_building_query = Q('match', sections__times__building=building)
lec_nested_queries['lec_building_query'] = lec_building_query
sec_nested_queries['sec_building_query'] = sec_building_query
if 'room' in raw_query:
room = raw_query['room'][0].upper()
lec_room_query = Q('match', lectures__times__room=room)
sec_room_query = Q('match', sections__times__room=room)
lec_nested_queries['lec_room_query'] = lec_room_query
sec_nested_queries['sec_room_query'] = sec_room_query
if 'datetime' in raw_query:
# Get day and time from the datetime object
# raw_query['datetime'] is of type [arrow.arrow.Arrow]
date_time = raw_query['datetime'][0].to('America/New_York')
day = date_time.isoweekday() % 7
time = date_time.time().strftime("%I:%M%p")
delta_time = datetime.timedelta(minutes=raw_query['timespan'][0])
shifted_time = (date_time + delta_time).time().strftime("%I:%M%p")
# NOTE: Known bug: if the time spans across two days, it would
# give a wrong result because day is calculated based
# on the begin time
# Construct the query based on day and time
_times_begin_query = {'lte': shifted_time, 'format': 'hh:mma'}
_times_end_query = {'gt': time, 'format': 'hh:mma'}
lec_time_query = Q('bool', must=[Q('match', lectures__times__days=day),
Q('range', lectures__times__begin=_times_begin_query),
Q('range', lectures__times__end=_times_end_query)])
sec_time_query = Q('bool', must=[Q('match', sections__times__days=day),
Q('range', sections__times__begin=_times_begin_query),
Q('range', sections__times__end=_times_end_query)])
lec_nested_queries['lec_time_query'] = lec_time_query
sec_nested_queries['sec_time_query'] = sec_time_query
# Combine all the nested queries
_lec_temp = Q()
_sec_temp = Q()
for key, value in lec_nested_queries.items():
if _lec_temp is None:
_lec_temp = value
else:
_lec_temp &= value
for key, value in sec_nested_queries.items():
if _sec_temp is None:
_sec_temp = value
else:
_sec_temp &= value
combined_lec_query = Q('nested',
query=(
Q('nested',
query=(_lec_temp),
path='lectures.times') &
lec_name_query
),
path='lectures',
inner_hits={}
)
combined_sec_query = Q('nested',
query=(
Q('nested',
query=(_sec_temp),
path='sections.times') &
sec_name_query),
path='sections',
inner_hits={}
)
# And finally combine the lecture query and section query with "or"
query &= Q('bool', must=[combined_lec_query | combined_sec_query])
if config.settings.DEBUG:
print(json.dumps(query.to_dict(), indent=2))
print("[DEBUG] max size: {}".format(self.size))
return query
# @brief Initializes connection to the Elasticsearch server
# The settings are in config/es_config.py
def init_es_connection():
if config.es_config.SERVICE == 'AWS':
from elasticsearch import RequestsHttpConnection
from requests_aws4auth import AWS4Auth
from config.es_config import AWS_ES_HOSTS, AWS_ACCESS_KEY,\
AWS_SECRET_KEY, AWS_REGION
awsauth = AWS4Auth(AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, 'es')
connections.create_connection(
hosts=AWS_ES_HOSTS,
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
else:
from config.es_config import ES_HOSTS, ES_HTTP_AUTH
connections.create_connection(
hosts=ES_HOSTS,
timeout=20,
use_ssl=True,
verify_certs=True,
http_auth=ES_HTTP_AUTH
)
# @brief Initializes an output dictionary for "courses" endpoint
def init_courses_output():
output = {'response': {},
'courses': []}
return output
# @brief Formats the output for the courses endpoint
def format_courses_output(response):
output = init_courses_output()
output['response'] = response_to_dict(response)
if has_error(response):
return output
for hit in response:
output['courses'].append(hit.to_dict())
return output
def init_fces_output():
output = {'response': {},
'fces': []}
return output
def format_fces_output(response):
output = init_fces_output()
output['response'] = response_to_dict(response)
if has_error(response):
return output
for hit in response:
output['fces'].append(hit.to_dict())
return output
def has_error(response):
if isinstance(response, dict) and response.get('status') is not None:
return True
return False
def response_to_dict(response):
if isinstance(response, dict):
return response
else:
if config.settings.DEBUG:
print("[DEBUG] hits count: {}".format(response.hits.total))
return response.to_dict()
#
#
# @brief Get the course by courseid.
#
# @param courseid (str) The courseid
# @param term (str) The elasticsearch index
#
# @return A dictionary {course: [<dictionary containing the course info>],
# response: <response from the server> }
#
def get_course_by_id(courseid, term=None):
output = {'response': {},
'course': None}
index = term
if re.search("^\d\d-\d\d\d$", courseid):
searcher = CourseSearcher({'courseid': [courseid]}, index=index)
response = searcher.execute()
output['response'] = response_to_dict(response)
if has_error(response):
return output
if response.hits.total != 0:
# Got some hits
output['course'] = response[0].to_dict()
return output
def get_courses_by_id(courseid):
output = init_courses_output()
if re.search("^\d\d-\d\d\d$", courseid):
searcher = CourseSearcher({'courseid': [courseid]}, index=None)
response = searcher.execute()
output = format_courses_output(response)
if len(output['courses']) == 0:
output['response']['status'] = 404
return output
#
#
# @brief Get the course by instructor name.
#
# @param name (str) The instructor name
# @param index (str) The elasticsearch index
#
# @return A dictionary {courses: [<dictionary containing the course info>],
# response: <response from the server> }
#
def get_courses_by_instructor(name, fuzzy=False, index=None, size=100):
raw_query = {'instructor': [name]}
if fuzzy:
raw_query['instructor_fuzzy'] = [name]
searcher = CourseSearcher(raw_query, index=index, size=size)
response = searcher.execute()
output = format_courses_output(response)
return output
def get_courses_by_building_room(building, room, index=None, size=100):
assert(building is not None or room is not None)
raw_query = dict()
if building is not None:
raw_query['building'] = [building]
if room is not None:
raw_query['room'] = [room]
searcher = CourseSearcher(raw_query, index=index, size=size)
response = searcher.execute()
output = format_courses_output(response)
return output
def get_courses_by_datetime(datetime_str, span_str=None, size=200):
span_minutes = 0
if span_str is not None:
try:
span_minutes = int(span_str)
if not (config.course.SPAN_LOWER_LIMIT <= span_minutes <=
config.course.SPAN_UPPER_LIMIT):
raise(Exception(Message.SPAN_PARSE_FAIL))
except:
output = init_courses_output()
output['response'] = {
'status': 400,
'error': {
'message': Message.SPAN_PARSE_FAIL
}
}
return output
try:
# Try to convert the input string into arrow datetime format
# if the string is 'now', then set time to current time
if datetime_str == 'now':
date_time = arrow.now()
else:
date_time = arrow.get(datetime_str)
except:
output = init_courses_output()
output['response'] = {
'status': 400,
'error': {
'message': Message.DATETIME_PARSE_FAIL
}
}
return output
index = utils.get_course_index_from_date(date_time.datetime)
searcher = CourseSearcher(
{'datetime': [date_time],
'timespan': [span_minutes]},
index=index, size=size
)
response = searcher.execute()
output = format_courses_output(response)
return output
def get_courses_by_searching(args, size=100):
# valid_args = ('text', 'name', 'desc', 'instructor', 'courseid',
# 'building', 'room', 'datetime_str', 'span_str', 'term')
if len(args) == 0:
output = init_courses_output()
output['response'] = {
'status': 400,
'error': {
'message': Message.EMPTY_SEARCH
}
}
return output
raw_query = {}
if 'text' in args:
raw_query['text'] = [args['text']]
else:
if 'name' in args:
raw_query['name'] = [args['name']]
# TODO: fix here
if 'desc' in args:
raw_query['desc'] = [args['desc']]
if 'instructor' in args:
raw_query['instructor'] = [args['instructor']]
if 'courseid' in args:
raw_query['courseid'] = [args['courseid']]
if 'building' in args:
raw_query['building'] = [args['building']]
if 'room' in args:
raw_query['room'] = [args['room']]
# if 'datetime_str' in args:
# # Duplicated from get_courses_by_datetime()
# # TODO: combine code
# span_minutes = 0
# datetime_str = args['datetime_str']
# span_str = args.get('span_str')
# if span_str is not None:
# try:
# span_minutes = int(span_str)
# if not (config.course.SPAN_LOWER_LIMIT <= span_minutes <=
# config.course.SPAN_UPPER_LIMIT):
# raise(Exception(Message.SPAN_PARSE_FAIL))
# raw_query['timespan'] = [span_minutes]
# except:
# output = init_courses_output()
# output['response'] = {
# 'status': 400,
# 'error': {
# 'message': Message.SPAN_PARSE_FAIL
# }
# }
# return output
# try:
# date_time = arrow.get(datetime_str)
# raw_query['datetime'] = [date_time]
# except:
# output = init_courses_output()
# output['response'] = {
# 'status': 400,
# 'error': {
# 'message': Message.DATETIME_PARSE_FAIL
# }
# }
# return output
# index = utils.get_course_index_from_date(date_time.datetime)
#
index = None
if 'term' in args:
# TODO: this is a quick hack to support the term arg
index = 'current'
searcher = CourseSearcher(raw_query, index=index, size=size)
response = searcher.execute()
output = format_courses_output(response)
return output
def get_fce_by_id(courseid, size=100):
searcher = FCESearcher({'courseid': [courseid]},
index=ES_FCE_INDEX,
size=size,
sort=['-year'])
response = searcher.execute()
output = format_fces_output(response)
return output
def get_fce_by_instructor(instructor, size=100):
searcher = FCESearcher({'instructor': [instructor]},
index=ES_FCE_INDEX,
size=size,
sort=['-year'])
response = searcher.execute()
output = format_fces_output(response)
return output
def list_all_courses(term):
if term == 'current':
index = utils.get_current_course_index()
else:
index = ES_COURSE_INDEX_PREFIX + term
print(index)
query = Q()
# Use ES api to search
s = Search(index=index).query(query).extra(
size=7000).source(False)
try:
response = s.execute().to_dict()
if "hits" in response:
for elem in response['hits']['hits']:
print(elem['_id'])
courseids = [elem['_id'] for elem in response['hits']['hits']]
return courseids
except:
pass
return []
if __name__ == '__main__':
config.settings.DEBUG = True
init_es_connection()
| mit | -2,254,356,826,381,168,400 | 32.041467 | 99 | 0.526572 | false | 4.052621 | true | false | false |
amrishparmar/mal_cl_interface | nl_interface/ui.py | 1 | 1055 | from time import sleep
from multiprocessing.pool import ThreadPool
import click
def loading_animation(msg):
"""Print out one rotation of a spinning bar loading animation
:param msg: A string, the message to display
"""
for c in "|/-\\":
click.echo("\r{}...{}".format(msg, c), nl=False)
sleep(0.07)
def threaded_action(action, msg="Loading", *args, **kwds):
"""Perform a potentially long-running action while displaying a loading animation
:param action: A function to perform
:param msg: A string, the message to display while action is running
:param args: A tuple, arguments to pass to the action function
:param kwds: A dictionary, keyword arguments to pass to the action function
:return: The return value of action function
"""
tp = ThreadPool(processes=1)
action_result = tp.apply_async(action, args=args, kwds=kwds)
while not action_result.ready():
loading_animation(msg)
click.echo("\r{}...Finished".format(msg))
return action_result.get()
| mit | -6,162,271,973,474,892,000 | 29.142857 | 85 | 0.675829 | false | 4.105058 | false | false | false |
xuru/restler | tests/test_modelstrategy.py | 2 | 1362 | import unittest
from restler.serializers import ModelStrategy
from tests.models import Model1
class ModelStrategyTest(unittest.TestCase):
def test_empty_strategy(self):
ms = ModelStrategy(Model1)
self.assertEqual(len(ms._ModelStrategy__name_map()), 0)
def test_all_strategy(self):
ms = ModelStrategy(Model1, include_all_fields=True)
self.assertEqual(len(ms.fields), 24)
def test_one_field_strategy(self):
ms = ModelStrategy(Model1) + ["string"]
self.assertEqual(len(ms.fields), 1)
def test_remove_noexistant_field(self):
def non_existant_field():
ModelStrategy(Model1) - ["new_field"]
self.assertRaises(ValueError, non_existant_field)
def test_new_instance(self):
m1 = ModelStrategy(Model1)
self.assertNotEqual(m1 + ["string"], m1 + ["string"])
def test_remove_field(self):
self.assertEqual(
len(ModelStrategy(Model1, True).fields) - 1,
len((ModelStrategy(Model1, True) - ["rating"]).fields))
def test_add_remove_property(self):
self.assertEqual(len(((ModelStrategy(Model1) + [{"prop": lambda o: o.rating}]) - ["prop"]).fields), 0)
def test_overridine_field(self):
self.assertTrue(callable(((ModelStrategy(Model1) + ["rating"]) << [{"rating": lambda o: o.rating}]).fields[0][1]))
| mit | 1,966,132,408,643,636,200 | 34.842105 | 122 | 0.64464 | false | 3.651475 | true | false | false |
arcyfelix/Courses | 18-05-05-Apache-Spark-with-Python-Big-Data-with-PySpark-and-Spark/6_Spark_SQL/3_UkMakerSpacesSparkSQL.py | 1 | 1041 | from pyspark.sql import SparkSession, functions as fs
if __name__ == "__main__":
session = SparkSession.builder.appName("UkMakerSpaces").master("local[*]").getOrCreate()
makerSpace = session.read.option("header", "true") \
.csv("data/uk-makerspaces-identifiable-data.csv")
# Using pyspark's functions class to pre-process the postcodes
# fs.lit creates a column of literal value.
# In this case it will be a white space.
postCode = session.read.option("header", "true").csv("data/uk-postcode.csv") \
.withColumn("PostCode", fs.concat_ws("", fs.col("PostCode"), fs.lit(" ")))
print("=== Print 20 records of makerspace table ===")
makerSpace.select("Name of makerspace", "Postcode").show()
print("=== Print 20 records of postcode table ===")
postCode.select("PostCode", "Region").show()
joined = makerSpace.join(postCode, makerSpace["Postcode"].startswith(postCode["Postcode"]), "left_outer")
print("=== Group by Region ===")
joined.groupBy("Region").count().show(200) | apache-2.0 | 8,371,738,106,421,133,000 | 42.416667 | 109 | 0.661864 | false | 3.589655 | false | false | false |
bowen0701/algorithms_data_structures | lc0204_count_primes.py | 1 | 1610 | """Leetcode 204. Count Primes
Easy
URL: https://leetcode.com/problems/count-primes/
Count the number of prime numbers less than a non-negative number, n.
Example:
Input: 10
Output: 4
Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.
"""
class SolutionSqrt(object):
def _is_prime(self, i):
if i <= 1:
return 0
# Only check sqrt(i) for prime, since i = p*q <= p^2 for small p.
for p in range(2, int(i ** 0.5) + 1):
if i % p == 0:
return False
return True
def countPrimes(self, n):
"""
:type n: int
:rtype: int
Time complexity: O(n^1.5).
Space complexity: O(1).
"""
count = 0
for i in range(2, n):
if self._is_prime(i):
count += 1
return count
class SolutionSieve(object):
def countPrimes(self, n):
"""
:type n: int
:rtype: int
Time complexity: O(n*loglogn).
Space complexity: O(n).
"""
primes = [0] * n
# First set numbers, 2, ..., n - 1, as primes.
for i in range(2, n):
primes[i] = 1
# Sieve method: Flip i*i, i*i+i, ..., to non-primes, i <= sqrt(n).
for i in range(2, int(n ** 0.5) + 1):
if not primes[i]:
continue
for j in range(i * i, n, i):
primes[j] = 0
return sum(primes)
def main():
n = 10
print SolutionSqrt().countPrimes(n)
print SolutionSieve().countPrimes(n)
if __name__ == '__main__':
main()
| bsd-2-clause | 859,565,256,147,556,600 | 21.054795 | 74 | 0.496273 | false | 3.292434 | false | false | false |
markushutzler/pygeda | pygeda/commands/stat.py | 1 | 2610 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pygeda - Support tool for Electonic Design Automation
# Copyright (C) 2017 Markus Hutzler
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, absolute_import, division
from cmdparse import Command
import pygeda.lib.schem
import pygeda.lib.pcb
from pygeda.lib.log import message
class Stat(Command):
__cmd__ = "stat"
__help__ = "display project statistics"
def pcb_stat(self, path):
message('File {}'.format(path))
pcb = pygeda.lib.pcb.PCBFile(path)
pcb.open()
pcb.parse()
pcb.close()
# TODO: Read statitics
def sch_stat(self, path):
message('File {}'.format(path))
sch = pygeda.lib.schem.Schematic(path)
sch.open()
sch.parse()
sch.close()
stat = {'unique': 0, 'rerdes':0}
uids = []
for component in sch.components:
if component.refdes.is_set:
stat['refdes'] = stat.get('refdes', 0) + 1
uuid = component.uuid
if uuid and uuid not in uids:
stat['unique'] = stat.get('unique', 0) + 1
uids.append(uuid)
elif uuid:
stat['duplicate'] = stat.get('duplicate', 0) + 1
message(" Object Count : {}".format(len(sch.objects)))
message(" Components : {}".format(len(sch.components)))
message(" with refdes: {}".format(stat.get('refdes', 0)))
message(" unique : {}".format(stat.get('unique', 0)))
message(" duplicate : {}".format(stat.get('duplicate', 0)))
message(" Net Fragments : {}".format(len(sch.get_by_type('N'))))
def print_stat(self, env):
message("Statistics:")
message("===========\n")
for path in env.schematic_files:
self.sch_stat(path)
self.pcb_stat(env.pcb_file)
def run(self, env=None):
"""Run command."""
self.print_stat(env)
| gpl-3.0 | 3,676,940,689,704,679,000 | 33.342105 | 76 | 0.598467 | false | 3.681241 | false | false | false |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/tests/webgl/test_local_gemm.py | 2 | 1217 | import tvm
import numpy as np
def test_local_gemm():
if not tvm.module.enabled("opengl"):
return
if not tvm.module.enabled("llvm"):
return
nn = 1024
n = tvm.var('n')
n = tvm.convert(nn)
m = n
l = n
A = tvm.placeholder((n, l), name='A', dtype='int32')
B = tvm.placeholder((m, l), name='B', dtype='int32')
k = tvm.reduce_axis((0, l), name='k')
C = tvm.compute((n, m), lambda ii, jj: tvm.sum(A[ii, k] * B[jj, k], axis=k),
name='CC')
s = tvm.create_schedule(C.op)
s[C].opengl()
print(tvm.lower(s, [A, B, C], simple_mode=True))
f = tvm.build(s, [A, B, C], "opengl", name="gemm")
print("------opengl code------")
print(f.imported_modules[0].get_source(fmt="gl"))
ctx = tvm.opengl()
n, m, l = nn, nn, nn
a_np = np.random.uniform(low=0, high=10, size=(n, l)).astype(A.dtype)
b_np = np.random.uniform(low=0, high=10, size=(m, l)).astype(B.dtype)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), ctx)
f(a, b, c)
np.testing.assert_allclose(c.asnumpy(), np.dot(a_np, b_np.T))
if __name__ == "__main__":
test_local_gemm()
| apache-2.0 | -773,018,145,437,749,000 | 28.682927 | 80 | 0.539852 | false | 2.657205 | false | false | false |
prculley/gramps | gramps/gui/plug/report/_textreportdialog.py | 11 | 3750 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2006 Donald N. Allingham
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ...pluginmanager import GuiPluginManager
from gramps.gen.plug.report._constants import CATEGORY_TEXT
from ._docreportdialog import DocReportDialog
#-------------------------------------------------------------------------
#
# _TextFormatComboBox
#
#-------------------------------------------------------------------------
class _TextFormatComboBox(Gtk.ComboBox):
"""
This class is a combo box that allows the selection of a docgen plugin
from all textdoc plugins.
"""
def __init__(self, active):
Gtk.ComboBox.__init__(self)
pmgr = GuiPluginManager.get_instance()
self.__textdoc_plugins = []
for plugin in pmgr.get_docgen_plugins():
if plugin.get_text_support():
self.__textdoc_plugins.append(plugin)
self.store = Gtk.ListStore(GObject.TYPE_STRING)
self.set_model(self.store)
cell = Gtk.CellRendererText()
self.pack_start(cell, True)
self.add_attribute(cell, 'text', 0)
index = 0
active_index = 0
for plugin in self.__textdoc_plugins:
name = plugin.get_name()
self.store.append(row=[name])
if plugin.get_extension() == active:
active_index = index
index += 1
self.set_active(active_index)
def get_active_plugin(self):
"""
Get the plugin represented by the currently active selection.
"""
return self.__textdoc_plugins[self.get_active()]
#-----------------------------------------------------------------------
#
# TextReportDialog
#
#-----------------------------------------------------------------------
class TextReportDialog(DocReportDialog):
"""
A class of ReportDialog customized for text based reports.
"""
def __init__(self, dbstate, uistate, options, name, translated_name):
"""
Initialize a dialog to request that the user select options
for a basic text report. See the ReportDialog class for more
information.
"""
self.format_menu = None
self.category = CATEGORY_TEXT
DocReportDialog.__init__(self, dbstate, uistate, options,
name, translated_name)
def make_doc_menu(self, active=None):
"""
Build a menu of document types that are appropriate for
this text report.
"""
self.format_menu = _TextFormatComboBox( active )
| gpl-2.0 | 959,766,771,137,691,500 | 34.046729 | 79 | 0.549867 | false | 4.6875 | false | false | false |
yaukwankiu/armor | tests/colourbartest.py | 1 | 5176 | '''
Make a colorbar as a separate figure.
'''
from matplotlib import pyplot
import matplotlib as mpl
# Make a figure and axes with dimensions as desired.
fig = pyplot.figure(figsize=(8,3))
ax1 = fig.add_axes([0.05, 0.80, 0.9, 0.15])
ax2 = fig.add_axes([0.05, 0.475, 0.9, 0.15])
ax3 = fig.add_axes([0.05, 0.15, 0.9, 0.15])
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.cool
norm = mpl.colors.Normalize(vmin=5, vmax=10)
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation='horizontal')
cb1.set_label('Some Units')
# The second example illustrates the use of a ListedColormap, a
# BoundaryNorm, and extended ends to show the "over" and "under"
# value colors.
cmap = mpl.colors.ListedColormap(['r', 'g', 'b', 'c'])
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 4, 7, 8]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb2 = mpl.colorbar.ColorbarBase(ax2, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=[0]+bounds+[13],
extend='both',
ticks=bounds, # optional
spacing='proportional',
orientation='horizontal')
cb2.set_label('Discrete intervals, some other units')
# The third example illustrates the use of custom length colorbar
# extensions, used on a colorbar with discrete intervals.
colourbar = {65 : [255 ,255,255],
60 : [159 , 49 , 206],
55 : [255 , 0 ,255],
50 : [206 , 0 , 0],
45 : [255 , 0 , 0],
40 : [255 , 99 , 99],
35 : [255 , 148 , 0],
30 : [231 , 198 , 0],
25 : [255 , 255, 0],
20 : [ 0 , 148, 0 ],
15 : [ 0 , 173 , 0 ],
10 : [ 0 , 206 , 0 ],
5 : [ 0, 0, 255], # VV i made these up: VV
0 : [ 0, 99, 255],
-5 : [ 0, 198, 255],
-10 : [156 ,156 , 156],
}
# http://stackoverflow.com/questions/3373256/set-colorbar-range-in-matplotlib
# http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
# http://stackoverflow.com/questions/12073306/customize-colorbar-in-matplotlib
# http://stackoverflow.com/questions/7875688/how-can-i-create-a-standard-colorbar-for-a-series-of-plots-in-python
#* http://matplotlib.org/examples/api/colorbar_only.html
# http://stackoverflow.com/questions/4801366/convert-rgb-values-into-integer-pixel
"""
cdict = { 'red' : ( (0.0, 0.25, .25), (0.02, .59, .59), (1., 1., 1.)),
'green': ( (0.0, 0.0, 0.0), (0.02, .45, .45), (1., .97, .97)),
'blue' : ( (0.0, 1.0, 1.0), (0.02, .75, .75), (1., 0.45, 0.45))
}
"""
colourbarlen = 70 - (-10)
cdict = {
'red' : [],
'green': [],
'blue' : [],
}
##################################################################################
bounds = range(-10, 75, 5)
lowers = sorted(colourbar.keys())
cmap = mpl.colors.ListedColormap([[1.*colourbar[v][0]/255,
1.*colourbar[v][1]/255,
1.*colourbar[v][2]/255
] for v in lowers
]) # [[0., .4, 1.], [0., .8, 1.], [1., .8, 0.], [1., .4, 0.]]
cmap.set_over((1.*colourbar[65][0]/255,
1.*colourbar[65][1]/255,
1.*colourbar[65][2]/255))
cmap.set_under((1.*colourbar[-10][0]/255,
1.*colourbar[-10][1]/255,
1.*colourbar[-10][2]/255))
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
#fig = pyplot.figure()
#ax3 = fig.add_axes()
cb3 = mpl.colorbar.ColorbarBase(ax3, cmap=cmap,
norm=norm,
boundaries=[-10]+bounds+[10],
extend='both',
# Make the length of each extension
# the same as the length of the
# interior colors:
#extendfrac='auto',
ticks=bounds,
spacing='uniform',
orientation='horizontal'
)
cb3.set_label('Custom extension lengths, some other units')
pyplot.show()
| cc0-1.0 | -155,821,429,262,094,200 | 37.058824 | 113 | 0.482998 | false | 3.61958 | false | false | false |
wummel/wok | woklib/renderers.py | 1 | 3367 | # -*- coding: iso-8859-1 -*-
from __future__ import print_function
import logging
from .util import has_module
if not has_module('pygments'):
logging.warn('Pygments not enabled.')
# List of available renderers
all = []
class Renderer(object):
"""Base renderer class."""
extensions = []
@classmethod
def render(cls, plain):
"""Render text."""
return plain
all.append(Renderer)
class Plain(Renderer):
"""Plain text renderer. Replaces new lines with html </br>s"""
extensions = ['txt']
@classmethod
def render(cls, plain):
"""Render plain text."""
return plain.replace('\n', '<br>')
all.append(Plain)
# Include markdown, if it is available.
if has_module('markdown'):
from markdown import markdown
class Markdown(Renderer):
"""Markdown renderer."""
extensions = ['markdown', 'mkd', 'md']
plugins = ['def_list', 'footnotes']
if has_module('pygments'):
plugins.extend(['codehilite(css_class=codehilite)', 'fenced_code'])
@classmethod
def render(cls, plain):
"""Render markdown text."""
return markdown(plain, cls.plugins)
all.append(Markdown)
else:
logging.warn("markdown isn't available, trying markdown2")
# Try Markdown2
if has_module('markdown2'):
import markdown2
class Markdown2(Renderer):
"""Markdown2 renderer."""
extensions = ['markdown', 'mkd', 'md']
extras = ['def_list', 'footnotes']
if has_module('pygments'):
extras.append('fenced-code-blocks')
@classmethod
def render(cls, plain):
"""Render markdown text."""
return markdown2.markdown(plain, extras=cls.extras)
all.append(Markdown2)
else:
logging.warn('Markdown not enabled.')
# Include ReStructuredText Parser, if we have docutils
if has_module('docutils'):
import docutils.core
from docutils.writers.html4css1 import Writer as rst_html_writer
from docutils.parsers.rst import directives
if has_module('pygments'):
from .rst_pygments import Pygments as RST_Pygments
directives.register_directive('Pygments', RST_Pygments)
class ReStructuredText(Renderer):
"""reStructuredText renderer."""
extensions = ['rst']
@classmethod
def render(cls, plain):
"""Render reStructuredText text."""
w = rst_html_writer()
return docutils.core.publish_parts(plain, writer=w)['body']
all.append(ReStructuredText)
else:
logging.warn('reStructuredText not enabled.')
# Try Textile
if has_module('textile'):
import textile
class Textile(Renderer):
"""Textile renderer."""
extensions = ['textile']
@classmethod
def render(cls, plain):
"""Render textile text."""
return textile.textile(plain)
all.append(Textile)
else:
logging.warn('Textile not enabled.')
if len(all) <= 2:
logging.error("You probably want to install either a Markdown library (one of "
"'Markdown', or 'markdown2'), 'docutils' (for reStructuredText), or "
"'textile'. Otherwise only plain text input will be supported. You "
"can install any of these with 'sudo pip install PACKAGE'.")
| mit | 2,064,293,836,571,738,400 | 27.533898 | 83 | 0.6139 | false | 4.23522 | false | false | false |
brahle/fitmarket-python-api | fitmarket_api/models/status.py | 1 | 5711 | # coding: utf-8
"""
Fitmarket
Mali broj ljudi - donori - dijele dnevna mjerenja svoje težine. Iz dnevne težine jednog donora određujemo vrijednosti dviju dionica: - dionica X ima vrijednost koja odgovara težini donora na taj dan. - inverzna dionica ~X ima vrijednost (150 kg - X). Primjetimo da: - kako X raste, ~X pada. - X + ~X = 150 kg Svaki igrač počinje igru sa 10,000 kg raspoloživog novca. Igrač koristi taj novac za trgovanje dionicama. Ukupna vrijednost igrača je zbroj rapoloživog novca i aktualne vrijednosti svih dionica koje posjeduje. Cilj igre je maksimizirati ukupnu vrijednost dobrim predviđanjem kretanja vrijednosti dionica. Na primjer, u prvom danu igrac kupi 125 dionica \"X\" za 80 kg. U drugom danu, dionica naraste na 82 kg. Ako igrac proda sve dionice \"X\", zaradio je 2 kg * 125 = 250 kg! Igra ne dopušta donoru da trguje vlastitim dionicama.
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class Status(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, total_money=None, free_money=None, shares=None):
"""
Status - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'total_money': 'float',
'free_money': 'float',
'shares': 'list[StockWithCount]'
}
self.attribute_map = {
'total_money': 'total_money',
'free_money': 'free_money',
'shares': 'shares'
}
self._total_money = total_money
self._free_money = free_money
self._shares = shares
@property
def total_money(self):
"""
Gets the total_money of this Status.
:return: The total_money of this Status.
:rtype: float
"""
return self._total_money
@total_money.setter
def total_money(self, total_money):
"""
Sets the total_money of this Status.
:param total_money: The total_money of this Status.
:type: float
"""
if total_money is None:
raise ValueError("Invalid value for `total_money`, must not be `None`")
self._total_money = total_money
@property
def free_money(self):
"""
Gets the free_money of this Status.
:return: The free_money of this Status.
:rtype: float
"""
return self._free_money
@free_money.setter
def free_money(self, free_money):
"""
Sets the free_money of this Status.
:param free_money: The free_money of this Status.
:type: float
"""
if free_money is None:
raise ValueError("Invalid value for `free_money`, must not be `None`")
self._free_money = free_money
@property
def shares(self):
"""
Gets the shares of this Status.
:return: The shares of this Status.
:rtype: list[StockWithCount]
"""
return self._shares
@shares.setter
def shares(self, shares):
"""
Sets the shares of this Status.
:param shares: The shares of this Status.
:type: list[StockWithCount]
"""
if shares is None:
raise ValueError("Invalid value for `shares`, must not be `None`")
self._shares = shares
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -3,221,127,743,901,845,000 | 29.972826 | 850 | 0.578523 | false | 3.81204 | false | false | false |
slosar/april | py/MCMCAnalyzer.py | 1 | 7508 | #
# This is the MCMC module.
# it spits out chains that are compatible with CosmoMC
# it calculates cov matrix during burn-in.
# chain_num tells it to spit out multi-node chains.
# optional temperature makes it sample at a higher temperature but note that
# this guy, as opposed to cosmomc, reweights the weights on the fly.
#
from random import *
from math import *
from scipy import *
import scipy.linalg as la
import copy
import random
import sys
import os.path as path
class MCMCAnalyzer:
def __init__(self, like, outfile, skip=5000, nsamp=100000, temp=1.0, cov=None, chain_num=None):
self.like = like
self.outfile = outfile
self.nsamp = nsamp
self.skip = skip
self.temp = float(temp) # temperature
self.chain_num = chain_num
self.cpars = like.freeParameters()
minvals, maxvals = [], []
for lb, hb in [p.bounds for p in self.cpars]:
minvals.append(lb)
maxvals.append(hb)
self.minvals = array(minvals)
self.maxvals = array(maxvals)
print("Bounds:", self.minvals, self.maxvals)
self.N = len(self.cpars)
if (like.name() == "Composite"):
self.sublikenames = like.compositeNames()
self.composite = True
else:
self.composite = False
if (cov == None):
# make initial cov matrix from diagonal "errors"
errs = [0.01*p.error**2 for p in self.cpars]
self.init_pcov(diag(errs))
else:
self.init_pcov(cov)
self.RunChain()
def RunChain(self):
self.openFiles()
self.cloglike, self.cloglikes = self. getLikes()
# set up logofs based on the first log like which should be
# the same for all chains. Better than nothing.
# self.logofs=self.cloglike
# Actually, above doesn't seem to work very well. Instead, use zero, as our likelihoods never became very large
self.logofs = 0
# current weight
self.cw = 0
# current counter
self.co = 0
# mean for burin
self.swx = 0
self.meanx = zeros(self.N)
self.meanxx = zeros((self.N, self.N))
# max loglike
self.maxloglike = -1e30
# are we done
self.done = False
print("Starting chain...")
while not (self.done):
ppars, numout = self.GetProposal()
self.cw += numout ## things hitting outside the prior are formally rejected samples
self.like.updateParams(ppars)
ploglike, ploglikes = self.getLikes()
if (isnan(ploglike)):
print("Something bad has happened, nan in loglike, assuming zero log")
ploglike = -1e50
# print cloglike, ploglike, [p.value for p in like.freeParameters()], [p.value for p in self.cpars]
if (ploglike > self.cloglike):
accept = True
else:
accept = (exp((ploglike-self.cloglike)/self.temp)
> uniform(0., 1.))
# print [p.value for p in ppars], accept, ploglike
# stop
if (accept):
self.ProcessAccepted(ppars, ploglike, ploglikes)
else:
self.cw += 1
self.closeFiles()
def GetProposal(self):
vec = zeros(self.N)
numreject=0
while True:
ppars = copy.deepcopy(self.cpars)
step = self.draw_pcov()
# print step# [p.value for p in step]
for i, p in enumerate(ppars):
p.value += step[i]
vec[i] = p.value
if all(vec > self.minvals) and all(vec < self.maxvals):
return ppars, numreject
numreject+=1
def init_pcov(self, mat):
self.chol = la.cholesky(mat)
def draw_pcov(self):
a = array([random.gauss(0., 1,) for i in range(self.N)])
return dot(a, self.chol)
def openFiles(self):
outfile = self.outfile
if self.chain_num in [None, 1]:
fpar = open(outfile+".paramnames", 'w')
for p in self.cpars:
fpar.write(p.name+"\t\t\t"+p.Ltxname+"\n")
if self.composite:
for name in self.sublikenames:
fpar.write(name+"_like \t\t\t"+name+"\n")
fpar.write("theory_prior \t\t\t None \n")
fpar.close()
formstr = '%g '+'%g '*(self.N+1)
if (self.composite):
formstr += '%g '*(len(self.sublikenames)+1)
formstr += '\n'
if (self.chain_num == None):
cfname = outfile+".txt"
mlfname = outfile+".maxlike"
else:
cfname = outfile+"_%i.txt" % (self.chain_num)
mlfname = outfile+"_%i.maxlike" % (self.chain_num)
if (path.isfile(cfname)):
print("Due to bad habits in the past, won't open existing file.", cfname)
sys.exit(1)
self.fout = open(cfname, 'w')
self.mlfout = open(mlfname, 'w')
self.formstr = formstr
def closeFiles(self):
self.fout.close()
self.mlfout.close()
def getLikes(self):
if (self.composite):
cloglikes = self.like.compositeLogLikes_wprior()
cloglike = cloglikes.sum()
else:
cloglikes = []
cloglike = self.like.loglike_wprior()
return cloglike, cloglikes
def ProcessAccepted(self, ppars, ploglike, ploglikes):
self.co += 1
if (self.co % 1000 == 0):
print("Accepted samples", self.co, self.cw)
vec = [p.value for p in self.cpars]
if (self.co > self.skip):
# weight rescaled
wers = self.cw*exp((self.cloglike-self.logofs)
* (self.temp-1.0)/self.temp)
if (self.composite):
outstr = self.formstr % tuple(
[wers, -self.cloglike]+vec + self.cloglikes.tolist())
else:
outstr = self.formstr % tuple([wers, -self.cloglike]+vec)
self.fout.write(outstr)
# Flush file on regular basis
if (self.co % 1000 == 0):
self.fout.flush()
if (self.cloglike > self.maxloglike):
self.maxloglike = self.cloglike
print("New maxloglike", self.maxloglike)
self.mlfout.seek(0)
self.mlfout.write(outstr)
self.mlfout.flush()
if self.co > self.nsamp:
self.done = True
elif (self.co < self.skip):
self.swx += self.cw
v = array(vec)
self.meanx += v*self.cw
self.meanxx += outer(v, v)*self.cw
if (self.cw > 30):
print("Still burning in, weight too large")
self.chol *= 0.9
print(self.cw)
else: # co==skip
self.meanx /= self.swx
self.meanxx /= self.swx
self.meanxx -= outer(self.meanx, self.meanx)
print("Re-initializing covariance matrix after burn-in")
print(self.meanxx)
for i, p in enumerate(self.cpars):
print(p.name, p.value, sqrt(self.meanxx[i, i]))
self.init_pcov(self.meanxx)
self.cw = 1
self.cpars = ppars
self.cloglike = ploglike
if self.composite:
self.cloglikes = ploglikes
| gpl-2.0 | -819,347,348,433,912,800 | 32.368889 | 119 | 0.532898 | false | 3.635835 | false | false | false |
drJfunk/gbmgeometry | gbmgeometry/gbm_frame.py | 1 | 4791 | import astropy.coordinates as coord
import astropy.units as u
import numpy as np
from astropy.coordinates import BaseCoordinateFrame, Attribute, RepresentationMapping
from astropy.coordinates import frame_transform_graph
class GBMFrame(BaseCoordinateFrame):
"""
Fermi GBM Frame
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
"""
default_representation = coord.SphericalRepresentation
frame_specific_representation_info = {
'spherical': [
RepresentationMapping(
reprname='lon', framename='lon', defaultunit=u.degree),
RepresentationMapping(
reprname='lat', framename='lat', defaultunit=u.degree),
RepresentationMapping(
reprname='distance', framename='DIST', defaultunit=None)
],
'unitspherical': [
RepresentationMapping(
reprname='lon', framename='lon', defaultunit=u.degree),
RepresentationMapping(
reprname='lat', framename='lat', defaultunit=u.degree)
],
'cartesian': [
RepresentationMapping(
reprname='x', framename='SCX'), RepresentationMapping(
reprname='y', framename='SCY'), RepresentationMapping(
reprname='z', framename='SCZ')
]
}
# Specify frame attributes required to fully specify the frame
sc_pos_X = Attribute(default=None)
sc_pos_Y = Attribute(default=None)
sc_pos_Z = Attribute(default=None)
quaternion_1 = Attribute(default=None)
quaternion_2 = Attribute(default=None)
quaternion_3 = Attribute(default=None)
quaternion_4 = Attribute(default=None)
# equinox = TimeFrameAttribute(default='J2000')
@staticmethod
def _set_quaternion(q1, q2, q3, q4):
sc_matrix = np.zeros((3, 3))
sc_matrix[0, 0] = (q1 ** 2 - q2 ** 2 - q3
** 2 + q4 ** 2)
sc_matrix[0, 1] = 2.0 * (
q1 * q2 + q4 * q3)
sc_matrix[0, 2] = 2.0 * (
q1 * q3 - q4 * q2)
sc_matrix[1, 0] = 2.0 * (
q1 * q2 - q4 * q3)
sc_matrix[1, 1] = (-q1 ** 2 + q2 ** 2 - q3
** 2 + q4 ** 2)
sc_matrix[1, 2] = 2.0 * (
q2 * q3 + q4 * q1)
sc_matrix[2, 0] = 2.0 * (
q1 * q3 + q4 * q2)
sc_matrix[2, 1] = 2.0 * (
q2 * q3 - q4 * q1)
sc_matrix[2, 2] = (-q1 ** 2 - q2 ** 2 + q3
** 2 + q4 ** 2)
return sc_matrix
@frame_transform_graph.transform(coord.FunctionTransform, GBMFrame, coord.ICRS)
def gbm_to_j2000(gbm_coord, j2000_frame):
""" Compute the transformation from heliocentric Sgr coordinates to
spherical Galactic.
"""
sc_matrix = gbm_coord._set_quaternion(gbm_coord.quaternion_1,
gbm_coord.quaternion_2,
gbm_coord.quaternion_3,
gbm_coord.quaternion_4)
# X,Y,Z = gbm_coord.cartesian
pos = gbm_coord.cartesian.xyz.value
X0 = np.dot(sc_matrix[:, 0], pos)
X1 = np.dot(sc_matrix[:, 1], pos)
X2 = np.clip(np.dot(sc_matrix[:, 2], pos), -1., 1.)
#dec = np.arcsin(X2)
dec = np.pi/2. - np.arccos(X2)
idx = np.logical_and(np.abs(X0) < 1E-6, np.abs(X1) < 1E-6)
ra = np.zeros_like(dec)
ra[~idx] = np.arctan2(X1, X0) % (2 * np.pi)
return coord.ICRS(ra=ra * u.radian, dec=dec * u.radian)
@frame_transform_graph.transform(coord.FunctionTransform, coord.ICRS, GBMFrame)
def j2000_to_gbm(j2000_frame, gbm_coord):
""" Compute the transformation from heliocentric Sgr coordinates to
spherical Galactic.
"""
sc_matrix = gbm_coord._set_quaternion(gbm_coord.quaternion_1,
gbm_coord.quaternion_2,
gbm_coord.quaternion_3,
gbm_coord.quaternion_4)
pos = j2000_frame.cartesian.xyz.value
X0 = np.dot(sc_matrix[0, :], pos)
X1 = np.dot(sc_matrix[1, :], pos)
X2 = np.clip(np.dot(sc_matrix[2, :], pos), -1., 1.)
el = np.pi / 2. - np.arccos(X2) # convert to proper frame
idx = np.logical_and(np.abs(X0) < 1E-6, np.abs(X1) < 1E-6)
az = np.zeros_like(el)
az[~idx] = np.arctan2(X1, X0) % (2 * np.pi)
az[np.rad2deg(el) == 90.] = 0.
return GBMFrame(
lon=az * u.radian, lat=el * u.radian,
quaternion_1=gbm_coord.quaternion_1,
quaternion_2=gbm_coord.quaternion_2,
quaternion_3=gbm_coord.quaternion_3,
quaternion_4=gbm_coord.quaternion_4)
| mit | 2,118,053,221,106,580,000 | 31.371622 | 85 | 0.548111 | false | 3.265849 | false | false | false |
I-sektionen/i-portalen | wsgi/iportalen_django/exchange_portal/migrations/0008_auto_20180129_1747.py | 1 | 3257 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('exchange_portal', '0007_auto_20171121_2041'),
]
operations = [
migrations.RemoveField(
model_name='travel_story',
name='body',
),
migrations.AddField(
model_name='travel_story',
name='living_text',
field=models.TextField(verbose_name='boende', default=1, help_text='Hur bodde du?\u2028 Hur hittade du ditt boende? Tips på eventuell mäklare eller liknande? Vilka alternativ finns?\u2028 Priser och standard?\u2028'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='location_text',
field=models.TextField(verbose_name='landet och staden', default=1, help_text='Hur upplevdes landet? Staden? Kultur? Billigt eller dyrt?'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='other_text',
field=models.TextField(verbose_name='övrigt', default=datetime.datetime(2018, 1, 29, 16, 47, 31, 204380, tzinfo=utc), help_text='Brödtext syns när en reseberättelse visas enskilt.'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='prep_text',
field=models.TextField(verbose_name='förberedelser', default=datetime.datetime(2018, 1, 29, 16, 47, 43, 578495, tzinfo=utc), help_text='Var det några särskilda förberedelser som krävdes?\u2028 Har du några generella tips gällande ansökan? Visum?'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='school_text',
field=models.TextField(verbose_name='skolan', default=datetime.datetime(2018, 1, 29, 16, 47, 49, 523930, tzinfo=utc), help_text='Geografisk placering i staden?\u2028 Hur var campus?\u2028 Var det lätt att träffa lokalbefolkning?\u2028 Hur var studentlivet? Kurser: var det lätt/svårt att få kurser? Var de lätta/svåra att få tillgodoräknade?'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='sparetime_text',
field=models.TextField(verbose_name='fritid', default=datetime.datetime(2018, 1, 29, 16, 47, 54, 168192, tzinfo=utc), help_text='Vad gör man på fritiden?\u2028 Resor?\u2028 Tips på saker man inte får missa'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='studies_text',
field=models.TextField(verbose_name='studier', default=datetime.datetime(2018, 1, 29, 16, 47, 58, 966304, tzinfo=utc), help_text='Hur var nivån på kurserna?\u2028 Råd angående att välja kurser på plats?\u2028 Svårt att hitta kurser på engelska?\u2028 Hur var språket? (framförallt för de som läser ii eller som inte läste på engelska)'),
preserve_default=False,
),
]
| mit | 2,449,549,004,156,473,000 | 50.870968 | 356 | 0.64148 | false | 3.258359 | false | false | false |
Tiloon/Object-Recognition | src/desc.py | 1 | 2184 | import numpy
import math
class descriptor:
def __init__(self):
self.size_sub_squares = 8
self.eps = 0.00001
def create_descriptors(self, features, img):
descriptors = {}
floatImg = img.astype(numpy.float64)
desNum = len(features)
for i in range(desNum):
x, y = features[i][0], features[i][1]
w, h = img.shape[0], img.shape[1]
if self.size_sub_squares < x < w - 2 * self.size_sub_squares \
and self.size_sub_squares < y < h - 2 * self.size_sub_squares:
descriptors[(x, y)] = self.create_descriptor(x, y, floatImg)
return descriptors
def create_descriptor(self, x, y, img):
hists = [self.gradHist(x - 8, y - 8, img),
self.gradHist(x - 8, y, img),
self.gradHist(x - 8, y + 8, img),
self.gradHist(x - 8, y + 16, img),
self.gradHist(x, y - 8, img),
self.gradHist(x, y, img),
self.gradHist(x, y + 8, img),
self.gradHist(x, y + 16, img),
self.gradHist(x + 8, y - 8, img),
self.gradHist(x + 8, y, img),
self.gradHist(x + 8, y + 8, img),
self.gradHist(x + 8, y + 16, img),
self.gradHist(x + 16, y - 8, img),
self.gradHist(x + 16, y, img),
self.gradHist(x + 16, y + 8, img),
self.gradHist(x + 16, y + 16, img)]
return [col for hist in hists for col in hist] # group hists by values
def gradHist(self, x, y, img):
P = math.pi
localDir = [0] * 18
for b in range(x - 8, x):
for c in range(y - 8, y):
m, t = self.gradient_properties(b, c, img)
localDir[int(round((18 * t) / P, 0)) + 8] += m
return localDir
def gradient_properties(self, x, y, img):
norm = math.sqrt((img[x + 1, y] - img[x - 1, y]) ** 2 + (img[x, y + 1] - img[x, y - 1]) ** 2)
orientation = math.atan((img[x, y + 1] - img[x, y - 1]) / (img[x + 1, y] - img[x - 1, y] + self.eps))
return norm, orientation | mit | 7,932,203,387,093,352,000 | 38.017857 | 109 | 0.473443 | false | 3.230769 | false | false | false |
nish10z/CONCUSS | lib/coloring/basic/optimization_interval.py | 3 | 3058 | #!/usr/bin/python
#
# This file is part of CONCUSS, https://github.com/theoryinpractice/concuss/,
# and is Copyright (C) North Carolina State University, 2015. It is licensed
# under the three-clause BSD license; see LICENSE.
#
from lib.util.memorized import memorized
import sys
import copy
import random
# @memorized(['g', 'trans', 'frat', 'col', 'i'])
def optimization_interval(orig, g, trans, frat, col, i, treeDepth, mobj):
# print " remove transitive and fraternal edges"
# remove all transitive and fraternal edges of the last step
edges = {}
optcols = copy.deepcopy(col) # avoid side effects
col = copy.deepcopy(col) # avoid side effects
for (s, t) in trans.keys():
step = trans[(s, t)]
if (step == i):
g.remove_arc(s, t)
edges[(s, t)] = (True, trans[(s, t)])
del trans[(s, t)]
for (s, t) in frat.keys():
step = frat[(s, t)]
if (step == i):
g.remove_arc(s, t)
edges[(s, t)] = (False, frat[(s, t)])
del frat[(s, t)]
numbAdded = 0
numbAdd = len(edges) / 2
attempts = 0
resColors = 0
MAX_ATTEMPTS = 2
while True:
mod = len(edges)
ra = numbAdd
addedEdges = {}
for (s, t) in edges.keys():
isTrans, value = edges[(s, t)]
# add randomly 'numbAdd' edges from the list 'restEdges'
rand = random.randint(0, mod-1)
if (rand < ra):
g.add_arc(s, t, 0)
if isTrans:
trans[(s, t)] = value
else:
frat[(s, t)] = value
addedEdges[(s, t)] = isTrans
del edges[(s, t)]
ra -= 1
if (ra == 0):
break
mod -= 1
# end for
# sys.stdout.write(" check with " + str(numbAdded+numbAdd) + " edges")
newcol = mobj.col(orig, g, trans, frat, col)
correct, nodes = mobj.ctd(orig, g, newcol, treeDepth)
# sys.stdout.write(" -> " + str(correct))
if correct:
if len(newcol) < len(optcols):
optcols = copy.deepcopy(newcol)
numColors = len(newcol)
# sys.stdout.write(", colors: " + str(numColors) + '\n')
# else:
# sys.stdout.write('\n')
attempts += 1
if (correct or (attempts < MAX_ATTEMPTS)):
for ((s, t), isTrans) in addedEdges.iteritems():
if isTrans:
edges[(s, t)] = (True, trans[(s, t)])
del trans[(s, t)]
else:
edges[(s, t)] = (False, frat[(s, t)])
del frat[(s, t)]
g.remove_arc(s, t)
# end for
else:
numbAdded += numbAdd
if (correct or (attempts == MAX_ATTEMPTS)):
attempts = 0
numbAdd = numbAdd / 2
if (numbAdd == 0):
break
# end while
return optcols
# end def
| bsd-3-clause | -8,832,448,500,417,851,000 | 25.824561 | 79 | 0.478417 | false | 3.555814 | false | false | false |
pedroallenrevez/MemLinguo | memlingo/dictionary.py | 1 | 4495 | #!/usr/bin/env python
# future
# standard lib
# third-party
# memlingo
import memlingo.yandex as yandex
# local
# try/except
class WordDictionary:
'''WordDictionary contains all your known words from a given language.
It is a simple dictionary that maps <word, card>.
Attributes
----------
lang: str
Language of the current dictionary.
String must include the target translation, due to Yandex API in form:
"ru-en"
"jp-en"
ID: int
This unique identifier is used by the genanki library. It is needed
because Anki decks, have a unique identifier that can be referred to,
and is important when updating the deck.
words: dict <str, Card>
Contains all the words and relevant information
'''
class Card:
'''Card holds the relevant information on a word.
Cards hold the required information to build an AnkiCard.`
Attributes
----------
word: str
The word itself. How it is written. Equal to the dictionary key.
word_class: str
The class of the word. Can be noun, adjective, adverb, etc.
translations: [str]
A list of all possible translations of the word.
examples: [(str,str)]
A list of tuples, that contain pairs of translated sentences.
dirty_bit: int (0,1)
Used to discern if it's needed to export again.
'''
def __init__(self, word, wclass, translations, examples, bit=0):
self.word = word
self.word_class = wclass
self.translations = translations
self.examples = examples
self.dirty_bit = bit
def __init__(self, language, uniqueID):
self.lang = language
self.ID = uniqueID
self.words = {}
# TODO add_card shouldn't receive an api key
def add_card(self, api_key, word):
'''Adds a new word to the dictionary.
All information on a word is fetched - translations, examples, etc.
In the future, IPA transcribing, and sound files will be available.
Parameters
----------
api_key: str
The Yandex Api Key required to lookup a word.
word: str
The word that the user wants to add to the collection.
Returns
-------
Nil
Side-Effects
------------
Searches for word, gathers relevant information, and then adds the card
to the WordDictionary.
'''
# Word must be encoded to be stored in a valid format (e.g.: Russian,
# Japanese)
utf8word = word.encode('utf-8')
if utf8word in self.words:
# Panic
print("That word as already been added, skipping...\n")
return
jsonie = yandex.lookup(api_key, self.lang, word)
if jsonie is not None:
word = yandex.get_word(jsonie)
word_class = yandex.get_word_class(jsonie)
translations = yandex.get_translations(jsonie)
examples = yandex.get_examples(jsonie)
new_card = self.Card(word, word_class, translations, examples)
self.words[utf8word] = new_card
return
# TODO this shouldn't use api_key as well
def update(self, api_key, words):
'''Update the WordDictionary with given list of words.
Parameters
----------
api_key: str
The Yandex Api Key required to lookup a word.
words: [str]
A list of words that the user wants to add to the collection.
Returns
-------
Nil
Side-Effects
------------
Adds every word that is on Yandex, to the user collection.
'''
word_counter = 0
for word in words:
self.add_card(api_key, word)
word_counter += 1
print(str(word_counter) + " words added to collection\n")
return
def to_print(self):
'''Prints the dictioanry in a pretty manner.
Prints the queried word, wordclass, translations and example
'''
for key in self.words:
print(key.decode('utf-8'))
print(self.words[key].word_class)
print(self.words[key].translations)
print(self.words[key].examples)
print(self.words[key].dirty_bit)
print('\n')
if __name__ == "__main__":
newDick = WordDictionary("ru", 12345)
| mit | 8,505,630,829,982,515,000 | 30 | 79 | 0.575083 | false | 4.240566 | false | false | false |
sebastien17/MAVlink_plug | example/Test_GroundControl.py | 1 | 1814 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# This file is part of MAVlinkplug.
# MAVlinkplug is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# MAVlinkplug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with MAVlinkplug. If not, see <http://www.gnu.org/licenses/>.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(__name__ == '__main__'):
from mavlinkplug import set_mavlink_dialect
import mavlinkplug.Modules.MavConnection
import mavlinkplug.Modules.FileWriter
import mavlinkplug.Modules.TcpConnection
import mavlinkplug.Plug
set_mavlink_dialect('pixhawk')
#Creating plug
plug = mavlinkplug.Plug.Plug()
plug.start()
#Set a mavlink connection with MAVlink ready devices
mav_con_01 = mavlinkplug.Modules.MavConnection.MavConnection(plug.plug_info(), 'COM3', baud=115200)
#Set a output file
#file_output = mavlinkplug.Modules.FileWriter.FileWriter(plug.plug_info(), 'Test_GroundControl.log')
#Set a connection for GC
gc_connection = mavlinkplug.Modules.TcpConnection.TcpConnection(plug.plug_info(), ('127.0.0.1', 17562), mav_con_01.ident())
#Start all modules
#file_output.start()
gc_connection.start()
mav_con_01.start()
#Server forever
plug.server_forever() | bsd-3-clause | 4,902,761,369,695,845,000 | 35.3 | 127 | 0.661521 | false | 3.35305 | false | false | false |
kovidgoyal/kitty | kittens/diff/options/types.py | 1 | 6324 | # generated by gen-config.py DO NOT edit
# vim:fileencoding=utf-8
import typing
from kitty.conf.utils import KeyAction, KittensKeyMap
import kitty.conf.utils
from kitty.rgb import Color
import kitty.rgb
from kitty.types import ParsedShortcut
import kitty.types
option_names = ( # {{{
'added_bg',
'added_margin_bg',
'background',
'diff_cmd',
'filler_bg',
'foreground',
'highlight_added_bg',
'highlight_removed_bg',
'hunk_bg',
'hunk_margin_bg',
'map',
'margin_bg',
'margin_fg',
'margin_filler_bg',
'num_context_lines',
'pygments_style',
'removed_bg',
'removed_margin_bg',
'replace_tab_by',
'search_bg',
'search_fg',
'select_bg',
'select_fg',
'syntax_aliases',
'title_bg',
'title_fg') # }}}
class Options:
added_bg: Color = Color(red=230, green=255, blue=237)
added_margin_bg: Color = Color(red=205, green=255, blue=216)
background: Color = Color(red=255, green=255, blue=255)
diff_cmd: str = 'auto'
filler_bg: Color = Color(red=250, green=251, blue=252)
foreground: Color = Color(red=0, green=0, blue=0)
highlight_added_bg: Color = Color(red=172, green=242, blue=189)
highlight_removed_bg: Color = Color(red=253, green=184, blue=192)
hunk_bg: Color = Color(red=241, green=248, blue=255)
hunk_margin_bg: Color = Color(red=219, green=237, blue=255)
margin_bg: Color = Color(red=250, green=251, blue=252)
margin_fg: Color = Color(red=170, green=170, blue=170)
margin_filler_bg: typing.Optional[kitty.rgb.Color] = None
num_context_lines: int = 3
pygments_style: str = 'default'
removed_bg: Color = Color(red=255, green=238, blue=240)
removed_margin_bg: Color = Color(red=255, green=220, blue=224)
replace_tab_by: str = ' '
search_bg: Color = Color(red=68, green=68, blue=68)
search_fg: Color = Color(red=255, green=255, blue=255)
select_bg: Color = Color(red=180, green=213, blue=254)
select_fg: typing.Optional[kitty.rgb.Color] = Color(red=0, green=0, blue=0)
syntax_aliases: typing.Dict[str, str] = {'pyj': 'py', 'pyi': 'py', 'recipe': 'py'}
title_bg: Color = Color(red=255, green=255, blue=255)
title_fg: Color = Color(red=0, green=0, blue=0)
map: typing.List[typing.Tuple[kitty.types.ParsedShortcut, kitty.conf.utils.KeyAction]] = []
key_definitions: KittensKeyMap = {}
config_paths: typing.Tuple[str, ...] = ()
config_overrides: typing.Tuple[str, ...] = ()
def __init__(self, options_dict: typing.Optional[typing.Dict[str, typing.Any]] = None) -> None:
if options_dict is not None:
for key in option_names:
setattr(self, key, options_dict[key])
@property
def _fields(self) -> typing.Tuple[str, ...]:
return option_names
def __iter__(self) -> typing.Iterator[str]:
return iter(self._fields)
def __len__(self) -> int:
return len(self._fields)
def _copy_of_val(self, name: str) -> typing.Any:
ans = getattr(self, name)
if isinstance(ans, dict):
ans = ans.copy()
elif isinstance(ans, list):
ans = ans[:]
return ans
def _asdict(self) -> typing.Dict[str, typing.Any]:
return {k: self._copy_of_val(k) for k in self}
def _replace(self, **kw: typing.Any) -> "Options":
ans = Options()
for name in self:
setattr(ans, name, self._copy_of_val(name))
for name, val in kw.items():
setattr(ans, name, val)
return ans
def __getitem__(self, key: typing.Union[int, str]) -> typing.Any:
k = option_names[key] if isinstance(key, int) else key
try:
return getattr(self, k)
except AttributeError:
pass
raise KeyError(f"No option named: {k}")
defaults = Options()
defaults.map = [
# quit
(ParsedShortcut(mods=0, key_name='q'), KeyAction('quit')),
# quit
(ParsedShortcut(mods=0, key_name='ESCAPE'), KeyAction('quit')),
# scroll_down
(ParsedShortcut(mods=0, key_name='j'), KeyAction('scroll_by', (1,))),
# scroll_down
(ParsedShortcut(mods=0, key_name='DOWN'), KeyAction('scroll_by', (1,))),
# scroll_up
(ParsedShortcut(mods=0, key_name='k'), KeyAction('scroll_by', (-1,))),
# scroll_up
(ParsedShortcut(mods=0, key_name='UP'), KeyAction('scroll_by', (-1,))),
# scroll_top
(ParsedShortcut(mods=0, key_name='HOME'), KeyAction('scroll_to', ('start',))),
# scroll_bottom
(ParsedShortcut(mods=0, key_name='END'), KeyAction('scroll_to', ('end',))),
# scroll_page_down
(ParsedShortcut(mods=0, key_name='PAGE_DOWN'), KeyAction('scroll_to', ('next-page',))),
# scroll_page_down
(ParsedShortcut(mods=0, key_name=' '), KeyAction('scroll_to', ('next-page',))),
# scroll_page_up
(ParsedShortcut(mods=0, key_name='PAGE_UP'), KeyAction('scroll_to', ('prev-page',))),
# next_change
(ParsedShortcut(mods=0, key_name='n'), KeyAction('scroll_to', ('next-change',))),
# prev_change
(ParsedShortcut(mods=0, key_name='p'), KeyAction('scroll_to', ('prev-change',))),
# all_context
(ParsedShortcut(mods=0, key_name='a'), KeyAction('change_context', ('all',))),
# default_context
(ParsedShortcut(mods=0, key_name='='), KeyAction('change_context', ('default',))),
# increase_context
(ParsedShortcut(mods=0, key_name='+'), KeyAction('change_context', (5,))),
# decrease_context
(ParsedShortcut(mods=0, key_name='-'), KeyAction('change_context', (-5,))),
# search_forward
(ParsedShortcut(mods=0, key_name='/'), KeyAction('start_search', (True, False))),
# search_backward
(ParsedShortcut(mods=0, key_name='?'), KeyAction('start_search', (True, True))),
# next_match
(ParsedShortcut(mods=0, key_name='.'), KeyAction('scroll_to', ('next-match',))),
# next_match
(ParsedShortcut(mods=0, key_name='>'), KeyAction('scroll_to', ('next-match',))),
# prev_match
(ParsedShortcut(mods=0, key_name=','), KeyAction('scroll_to', ('prev-match',))),
# prev_match
(ParsedShortcut(mods=0, key_name='<'), KeyAction('scroll_to', ('prev-match',))),
# search_forward_simple
(ParsedShortcut(mods=0, key_name='f'), KeyAction('start_search', (False, False))),
# search_backward_simple
(ParsedShortcut(mods=0, key_name='b'), KeyAction('start_search', (False, True))),
]
| gpl-3.0 | 1,632,852,033,682,323,000 | 36.642857 | 99 | 0.618912 | false | 3.084878 | false | false | false |
KnoxMakers/KM-Laser | extensions/km_deps/libfuturize/fixes/fix_raise.py | 3 | 3884 | """Fixer for 'raise E, V'
From Armin Ronacher's ``python-modernize``.
raise -> raise
raise E -> raise E
raise E, 5 -> raise E(5)
raise E, 5, T -> raise E(5).with_traceback(T)
raise E, None, T -> raise E.with_traceback(T)
raise (((E, E'), E''), E'''), 5 -> raise E(5)
raise "foo", V, T -> warns about string exceptions
raise E, (V1, V2) -> raise E(V1, V2)
raise E, (V1, V2), T -> raise E(V1, V2).with_traceback(T)
CAVEATS:
1) "raise E, V, T" cannot be translated safely in general. If V
is not a tuple or a (number, string, None) literal, then:
raise E, V, T -> from future.utils import raise_
raise_(E, V, T)
"""
# Author: Collin Winter, Armin Ronacher, Mark Huang
# Local imports
from lib2to3 import pytree, fixer_base
from lib2to3.pgen2 import token
from lib2to3.fixer_util import Name, Call, is_tuple, Comma, Attr, ArgList
from libfuturize.fixer_util import touch_import_top
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type == token.STRING:
msg = "Python 3 does not support string exceptions"
self.cannot_convert(node, msg)
return
# Python 2 supports
# raise ((((E1, E2), E3), E4), E5), V
# as a synonym for
# raise E1, V
# Since Python 3 will not support this, we recurse down any tuple
# literals, always taking the first element.
if is_tuple(exc):
while is_tuple(exc):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
exc.prefix = u" "
if "tb" in results:
tb = results["tb"].clone()
else:
tb = None
if "val" in results:
val = results["val"].clone()
if is_tuple(val):
# Assume that exc is a subclass of Exception and call exc(*val).
args = [c.clone() for c in val.children[1:-1]]
exc = Call(exc, args)
elif val.type in (token.NUMBER, token.STRING):
# Handle numeric and string literals specially, e.g.
# "raise Exception, 5" -> "raise Exception(5)".
val.prefix = u""
exc = Call(exc, [val])
elif val.type == token.NAME and val.value == u"None":
# Handle None specially, e.g.
# "raise Exception, None" -> "raise Exception".
pass
else:
# val is some other expression. If val evaluates to an instance
# of exc, it should just be raised. If val evaluates to None,
# a default instance of exc should be raised (as above). If val
# evaluates to a tuple, exc(*val) should be called (as
# above). Otherwise, exc(val) should be called. We can only
# tell what to do at runtime, so defer to future.utils.raise_(),
# which handles all of these cases.
touch_import_top(u"future.utils", u"raise_", node)
exc.prefix = u""
args = [exc, Comma(), val]
if tb is not None:
args += [Comma(), tb]
return Call(Name(u"raise_"), args)
if tb is not None:
tb.prefix = ""
exc_list = Attr(exc, Name('with_traceback')) + [ArgList([tb])]
else:
exc_list = [exc]
return pytree.Node(syms.raise_stmt,
[Name(u"raise")] + exc_list,
prefix=node.prefix)
| gpl-3.0 | 1,069,032,594,874,147,500 | 35.299065 | 80 | 0.530896 | false | 3.73821 | false | false | false |
Catrodigious/OctoPrint-TAM | src/octoprint/server/__init__.py | 1 | 9654 | # coding=utf-8
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import flask
import tornado.wsgi
from sockjs.tornado import SockJSRouter
from flask import Flask, render_template, send_from_directory, make_response
from flask.ext.login import LoginManager
from flask.ext.principal import Principal, Permission, RoleNeed, identity_loaded, UserNeed
import os
import logging
import logging.config
SUCCESS = {}
NO_CONTENT = ("", 204)
app = Flask("octoprint")
debug = False
printer = None
gcodeManager = None
userManager = None
eventManager = None
loginManager = None
wifiManager = None
wifiInterface = "wlan0"
principals = Principal(app)
admin_permission = Permission(RoleNeed("admin"))
user_permission = Permission(RoleNeed("user"))
# only import the octoprint stuff down here, as it might depend on things defined above to be initialized already
from octoprint.server.util import LargeResponseHandler, ReverseProxied, restricted_access, PrinterStateConnection, admin_validator
from octoprint.printer import Printer, getConnectionOptions
from octoprint.settings import settings
import octoprint.gcodefiles as gcodefiles
import octoprint.util as util
import octoprint.users as users
import octoprint.events as events
import octoprint.timelapse
import octoprint._version
import octoprint.wifi as wifi
versions = octoprint._version.get_versions()
VERSION = versions['version']
BRANCH = versions['branch'] if 'branch' in versions else None
DISPLAY_VERSION = "%s (%s branch)" % (VERSION, BRANCH) if BRANCH else VERSION
del versions
@app.route("/")
def index():
return render_template(
"index.jinja2",
webcamStream=settings().get(["webcam", "stream"]),
enableTimelapse=(settings().get(["webcam", "snapshot"]) is not None and settings().get(["webcam", "ffmpeg"]) is not None),
enableGCodeVisualizer=settings().get(["gcodeViewer", "enabled"]),
enableTemperatureGraph=settings().get(["feature", "temperatureGraph"]),
enableSystemMenu=settings().get(["system"]) is not None and settings().get(["system", "actions"]) is not None and len(settings().get(["system", "actions"])) > 0,
enableAccessControl=userManager is not None,
enableSdSupport=settings().get(["feature", "sdSupport"]),
enableNetworkSettings = settings().get(["feature", "networkSettings"]),
firstRun=settings().getBoolean(["server", "firstRun"]) and (userManager is None or not userManager.hasBeenCustomized()),
debug=debug,
version=VERSION,
display_version=DISPLAY_VERSION,
stylesheet=settings().get(["devel", "stylesheet"]),
gcodeMobileThreshold=settings().get(["gcodeViewer", "mobileSizeThreshold"]),
gcodeThreshold=settings().get(["gcodeViewer", "sizeThreshold"])
)
@app.route("/robots.txt")
def robotsTxt():
return send_from_directory(app.static_folder, "robots.txt")
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
user = load_user(identity.id)
if user is None:
return
identity.provides.add(UserNeed(user.get_name()))
if user.is_user():
identity.provides.add(RoleNeed("user"))
if user.is_admin():
identity.provides.add(RoleNeed("admin"))
def load_user(id):
if userManager is not None:
return userManager.findUser(id)
return users.DummyUser()
#~~ startup code
class Server():
def __init__(self, configfile=None, basedir=None, host="0.0.0.0", port=5000, debug=False, allowRoot=False):
self._configfile = configfile
self._basedir = basedir
self._host = host
self._port = port
self._debug = debug
self._allowRoot = allowRoot
def run(self):
if not self._allowRoot:
self._checkForRoot()
global printer
global gcodeManager
global userManager
global eventManager
global loginManager
global debug
global wifiManager
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, FallbackHandler
debug = self._debug
# first initialize the settings singleton and make sure it uses given configfile and basedir if available
self._initSettings(self._configfile, self._basedir)
# then initialize logging
self._initLogging(self._debug)
logger = logging.getLogger(__name__)
logger.info("Starting OctoPrint %s" % DISPLAY_VERSION)
eventManager = events.eventManager()
gcodeManager = gcodefiles.GcodeManager()
printer = Printer(gcodeManager)
wifiManager = wifi.WifiManager(printer)
# configure timelapse
octoprint.timelapse.configureTimelapse()
# setup command triggers
events.CommandTrigger(printer)
if self._debug:
events.DebugEventListener()
if settings().getBoolean(["accessControl", "enabled"]):
userManagerName = settings().get(["accessControl", "userManager"])
try:
clazz = util.getClass(userManagerName)
userManager = clazz()
except AttributeError, e:
logger.exception("Could not instantiate user manager %s, will run with accessControl disabled!" % userManagerName)
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.secret_key = "k3PuVYgtxNm8DXKKTw2nWmFQQun9qceV"
loginManager = LoginManager()
loginManager.session_protection = "strong"
loginManager.user_callback = load_user
if userManager is None:
loginManager.anonymous_user = users.DummyUser
principals.identity_loaders.appendleft(users.dummy_identity_loader)
loginManager.init_app(app)
if self._host is None:
self._host = settings().get(["server", "host"])
if self._port is None:
self._port = settings().getInt(["server", "port"])
logger.info("Listening on http://%s:%d" % (self._host, self._port))
app.debug = self._debug
from octoprint.server.api import api
app.register_blueprint(api, url_prefix="/api")
self._router = SockJSRouter(self._createSocketConnection, "/sockjs")
def admin_access_validation(request):
"""
Creates a custom wsgi and Flask request context in order to be able to process user information
stored in the current session.
:param request: The Tornado request for which to create the environment and context
"""
wsgi_environ = tornado.wsgi.WSGIContainer.environ(request)
with app.request_context(wsgi_environ):
app.session_interface.open_session(app, flask.request)
loginManager.reload_user()
admin_validator(flask.request)
self._tornado_app = Application(self._router.urls + [
(r"/downloads/timelapse/([^/]*\.mpg)", LargeResponseHandler, {"path": settings().getBaseFolder("timelapse"), "as_attachment": True}),
(r"/downloads/files/local/([^/]*\.(gco|gcode|g))", LargeResponseHandler, {"path": settings().getBaseFolder("uploads"), "as_attachment": True}),
(r"/downloads/logs/([^/]*)", LargeResponseHandler, {"path": settings().getBaseFolder("logs"), "as_attachment": True, "access_validation": admin_access_validation}),
(r".*", FallbackHandler, {"fallback": WSGIContainer(app.wsgi_app)})
])
self._server = HTTPServer(self._tornado_app)
self._server.listen(self._port, address=self._host)
eventManager.fire(events.Events.STARTUP)
if settings().getBoolean(["serial", "autoconnect"]):
(port, baudrate) = settings().get(["serial", "port"]), settings().getInt(["serial", "baudrate"])
connectionOptions = getConnectionOptions()
if port in connectionOptions["ports"]:
printer.connect(port, baudrate)
try:
IOLoop.instance().start()
except KeyboardInterrupt:
logger.info("Goodbye!")
except:
logger.fatal("Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!")
logger.exception("Stacktrace follows:")
def _createSocketConnection(self, session):
global printer, gcodeManager, userManager, eventManager
return PrinterStateConnection(printer, gcodeManager, userManager, eventManager, session)
def _checkForRoot(self):
if "geteuid" in dir(os) and os.geteuid() == 0:
exit("You should not run OctoPrint as root!")
def _initSettings(self, configfile, basedir):
settings(init=True, basedir=basedir, configfile=configfile)
def _initLogging(self, debug):
config = {
"version": 1,
"formatters": {
"simple": {
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"file": {
"class": "logging.handlers.TimedRotatingFileHandler",
"level": "DEBUG",
"formatter": "simple",
"when": "D",
"backupCount": "1",
"filename": os.path.join(settings().getBaseFolder("logs"), "octoprint.log")
},
"serialFile": {
"class": "logging.handlers.RotatingFileHandler",
"level": "DEBUG",
"formatter": "simple",
"maxBytes": 2 * 1024 * 1024, # let's limit the serial log to 2MB in size
"filename": os.path.join(settings().getBaseFolder("logs"), "serial.log")
}
},
"loggers": {
#"octoprint.timelapse": {
# "level": "DEBUG"
#},
#"octoprint.events": {
# "level": "DEBUG"
#},
"SERIAL": {
"level": "CRITICAL",
"handlers": ["serialFile"],
"propagate": False
}
},
"root": {
"level": "INFO",
"handlers": ["console", "file"]
}
}
if debug:
config["root"]["level"] = "DEBUG"
logging.config.dictConfig(config)
if settings().getBoolean(["serial", "log"]):
# enable debug logging to serial.log
logging.getLogger("SERIAL").setLevel(logging.DEBUG)
logging.getLogger("SERIAL").debug("Enabling serial logging")
if __name__ == "__main__":
octoprint = Server()
octoprint.run()
| agpl-3.0 | -5,305,622,735,948,998,000 | 31.498316 | 172 | 0.708247 | false | 3.416637 | true | false | false |
cslarsen/dna-traits | py-dnatraits/dna_traits/health.py | 1 | 12313 | # -*- encoding: utf-8 -*-
"""
Used to infer some health related reports.
Use with caution, this code may contain errors!
Copyright (C) 2014, 2016 Christian Stigen Larsen
Distributed under the GPL v3 or later. See COPYING.
"""
from dna_traits.match import unphased_match, assert_european
from dna_traits.util import make_report
import dna_traits.odds as odds
def apoe_variants(genome):
"""APOE-variants (Alzheimer's)."""
rs429358 = genome.rs429358
rs7412 = genome.rs7412
# If both SNPs are phased we can resolve all ambiguities, and finding
# APOe-variants are straight-forward
if rs429358.phased and rs7412.phased:
assert(len(rs429358)==len(rs7412)==2)
apoe = {"CT": "e1",
"TT": "e2",
"TC": "e3",
"CC": "e4"}
variant = []
for n in [0,1]:
variant.append(apoe[str(rs429358)[n] + str(rs7412)[n]])
return "/".join(sorted(variant))
else:
# At least one SNP is non-phased; we can guess the result in all but
# one case
genotypes = "".join(sorted(str(rs429358)))
genotypes += "".join(sorted(str(rs7412)))
variants = {
"CCCC": "e4/e4",
"CCCT": "e1/e4",
"CCTT": "e1/e1",
"CTCC": "e3/e4",
"CTCT": "e1/e3 or e2/e4", # ambiguous
"CTTT": "e1/e2",
"TTCC": "e3/e3",
"TTCT": "e2/e3",
"TTTT": "e2/e2",
}
try:
return variants[genotypes]
except KeyError:
return "<Unknown variant: %s>" % genotypes
def rheumatoid_arthritis_risk(genome):
"""Rheumatoid arthritis."""
raise NotImplementedError()
OR = 0
# FIXME: Fix the OR calculation, it's a complete mess right now
# (attempt to use Mantel-Haenszel instead).
#
# We currently just give a score for each risk allele instead and give
# an thumbs up / down rating.
# These are not applicable for Asians
if genome.ethnicity == "european":
OR += genome.rs6457617.count("T")
if genome.rs2476601 == "GG": OR -= 1
if genome.rs3890745 == "CC": OR += -1
if genome.rs2327832 == "AG": OR += -1
# Only Europeans
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2636867/
OR += genome.rs3761847.count("G")
if genome.rs7574865 == "TT": OR += 1
if genome.rs1569723 == "AA": OR += 1
if genome.rs13031237 == "GT": OR += 1
# TODO: Implement rest, ignore scores, just give a "low/medium/high"
# OR.
if OR <= 2:
return "low risk??"
elif OR <= 4:
return "medium risk??"
else:
return "high risk??"
def chronic_kidney_disease(genome):
"""Chronic kidney disease (CKD).
Citations:
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=21082022
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=20686651
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=19430482
"""
# List of (OR, CI, P-value, variance inflaction factor)
ors = []
# Taken from the following GWAS:
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2912386/#pgen.1001039-Gretarsdottir2
if genome.ethnicity is None or genome.ethnicity=="european":
# TODO: Find out if the OR is per T-allele or just for the existence
# of one. Here I just see if there is one or more.
if genome.rs4293393.negative().count("T") > 0:
if genome.year_of_birth is None:
ors.append((1.25, 0.95, 4.1e-10, 1.15))
else:
# Stratified OR. Honestly, I think the P-values seem WAY too
# high for births later than 1940.
if genome.year_of_birth < 1920:
ors.append((1.19, 0.95, 0.045, 1.15))
elif genome.year_of_birth < 1930:
ors.append((1.31, 0.95, 4.1e-7, 1.15))
elif genome.year_of_birth < 1940:
ors.append((1.28, 0.95, 3.1e-5, 1.15))
elif genome.year_of_birth < 1950:
ors.append((1.16, 0.95, 0.12, 1.15))
else:
ors.append((1.09, 0.95, 0.57, 1.15))
# Taken from:
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2997674/
if genome.ethnicity is None or genome.ethnicity=="european":
# Table 3:
if genome.rs7805747.count("A") > 0:
ors.append((1.19, 0.95, 4.2e-12, None))
pass
if len(ors) > 0:
ORs = [d[0] for d in ors]
pvals = [d[2] for d in ors]
OR_mh, se, pp = odds.pooled_or(zip(ORs, pvals), 1.15)
rr = odds.relative_risk(OR_mh, 0.034)
return "%.2f relative risk, %.2f odds ratio (%d markers)" % (rr, OR_mh, len(ors))
else:
return "<No data>"
"""
rs4293393 AA european OR 1.08 (adjusted)
rs7805747 AG european OR 1.14 (adjusted)
rs7805747 AG european OR 0.96 (adjusted)
From:
http://www.plosgenetics.org/article/fetchObject.action?uri=info%3Adoi%2F10.1371%2Fjournal.pgen.1001039&representation=PDF
rs4293393-T associated with CKD, OR=1.25, P=4.1e-10. Association
stronger with older age groups. CI=1.17-1.35 (95%), N=3203 (no of cases)
Disregard year of birth (stronger association with old age).
See Köttgen.
Note sure if PER T-allele. Only think it's the existence of this allele.
Also, is it minus orientation?
SNPedia says, each G at this allele (actually A because snpedia uses
minus orientation) decrease risk with 24%.
From dbsnp, http://www.ncbi.nlm.nih.gov/SNP/snp_ref.cgi?rs=4293393
it seems that the illumina hapmap300 used in the study uses minus
orientation, because it can only have C/T alleles, while 23andme reports
the A-allele. So this means that A(+) or T(-) is the risk allele.
The reverse version (G+, C-) is protective of CKD actually.
Says:
Association analysis
For case-control association analysis, e.g. for CKD and kidney stones,
we utilized a standard likelihood ratio statistic, implemented in the
NEMO software [32] to calculate two-sided P values and odds ratios (ORs)
for each individual allele, assuming a multiplicative model for risk,
i.e. that the risk of the two alleles carried by a person multiplies
[36]. Allelic frequencies, rather than carrier frequencies, are
presented for the markers and P values are given after adjustment for
the relatedness of the subjects. When estimating genotype specific OR,
genotype frequencies in the population were estimated assuming
Hardy-Weinberg equilibrium.
Results from multiple case-control groups were combined using a
Mantel-Haenszel model [37] in which the groups were allowed to have
different population frequencies for alleles, haplotypes and genotypes
but were assumed to have common relative risks.
For the quantitative trait association analysis, e.g. for SCr and
cystatin C, we utilized a robust linear regression based on an M
estimator [38] as implemented in the rlm function of the R software
package [39]. An additive model for SNP effects was assumed in all
instances. All associations with quantitative traits were performed
adjusting for age and sex.
"""
def restless_leg_syndrome(genome):
"""Restless leg syndrome.
Only for European ancestry.
rs3923809 AA 1.26
AG 0.74
Citations:
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=17634447
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=17637780
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=11340155
"""
if genome.rs3923809 == "GG":
return "Normal risk"
elif genome.rs3923809 == "AG" or genome.rs3923809 == "GA":
return "Slightly increased risk"
elif genome.rs3923809 == "AA":
return "Twice as high risk for developing"
else:
return "<Unknown genotype for rs3923809 %s>" % genome.rs3923809
def scleroderma(genome):
"""Scleroderma (limited cutaneous type)."""
# TODO: Implement odds ratios, find all alleles
if genome.ethnicity is None or genome.ethnicity == "european":
if genome.rs7574865 == "TT":
return "Higher odds"
if genome.rs7574865.count("T") > 0:
return "Slight risk"
return "<Unknown>"
else:
return "<Unknown for this ethnicity>"
def hypothyroidism(genome):
"""Hypothyroidism.
Studies:
http://dx.doi.org/10.1371/journal.pone.0034442
"""
if genome.ethnicity is not None and genome.ethnicity != "european":
raise ValueError("Only applicable to Europeans")
# TODO: Use a better score metric and use weighting and ORs.
# TODO: Try to use interval arithmetic as well, for fun.
scores = {
"rs7850258": {"GG": 0.5, "AG": 0, "AA": -0.5, None: 0},
"rs2476601": {"GG": 1, "AG": 0.5, "AA": 0, None: 0},
"rs3184504": {"TT": 0.5, "CT": 0, "CC": -0.5, None: 0},
"rs4915077": {"CC": 1, "CT": 0.5, "TT": 0, None: 0},
"rs2517532": {"GG": 0.5, "AG": 0, "AA": -0.5, None: 0},
}
hi = sum(map(lambda l: max(l.values()), scores.values()))
lo = sum(map(lambda l: min(l.values()), scores.values()))
score = 0.0
for rsid, genotypes in scores.items():
score += unphased_match(genome[rsid], genotypes)
if score > 0:
s = "About %.1f%% higher risk than baseline\n" % (100.0*score/hi)
s += "(%.1f vs %.1f of %.1f points)\n" % (score, lo, hi)
s += "Test is unweighted, see 23andMe for more info"
return s
elif score < 0:
s = "About %.1f%% lower risk than baseline\n" % 100.0*score/lo
s += "(%.1f vs %.1f of %.1f points)\n" % (score, lo, hi)
s += "Test is unweighted, see 23andMe for more info"
return s
else:
return "Typical risk"
def stroke(genome):
"""Stroke."""
return unphased_match(genome.rs12425791, {
"AA": "Moderately increased risk of having a stroke",
"AG": "Slightly increased risk of having a stroke",
"GG": "Typical risk of having a stroke",
None: "Unable to determine"})
def exfoliation_glaucoma(genome):
"""Exfoliation glaucoma."""
assert_european(genome)
OR = unphased_match(genome.rs2165241, {
"CT": 0.79,
})
raise NotImplementedError()
def migraines(genome):
"""Migranes."""
assert_european(genome)
s = []
s.append(unphased_match(genome.rs2651899, {
"CC": "Slightly higher odds of migraines",
"CT": "Typical odds of migraines",
"TT": "Slightly lower odds of migraines",
None: "Unable to determine"}))
s.append(unphased_match(genome.rs10166942, {
"TT": "Typical odds of migraines",
"CT": "Slightly lower odds of migraines",
"CC": "Slightly lower odds of migraines",
None: "Unable to determine"}))
s.append(unphased_match(genome.rs11172113, {
"TT": "Slightly higher odds of migraines",
"CT": "Typical odds of migraines",
"CC": "Slightly lower odds of migraines",
None: "Unable to determine"}))
return "\n".join(s)
def breast_cancer(genome):
"""Breast cancer."""
if not genome.female:
raise ValueError("Only applicable for females")
s = []
s.append(unphased_match(genome.rs1219648, {
"AA": "Typical odds",
"AG": "Slightly higher odds",
"GG": "Moderately higher odds",
None: "Unable to determine (see rs2420946 instead)"}))
s.append(unphased_match(genome.rs3803662, {
"AA": "Moderately increased odds",
"AG": "?",
"GG": "Typical odds",
None: "Unable to determine"}))
s.append("Note: There are MANY more SNPs to test here...")
# TODO: Add remaining SNPs
return "\n".join(s)
def health_report(genome):
"""Infers some health results."""
return make_report(genome, [
apoe_variants,
breast_cancer,
chronic_kidney_disease,
hypothyroidism,
migraines,
restless_leg_syndrome,
rheumatoid_arthritis_risk,
scleroderma,
stroke,
])
| gpl-3.0 | -7,864,582,680,967,866,000 | 34.686957 | 125 | 0.608512 | false | 3.137615 | false | false | false |
mhance/physics | Snowmass/DelphesReader/scripts/make_limits.py | 2 | 20360 | #!/usr/bin/env python
"""
Author: Sourabh Dube
Make XML files for one channel, with the right uncertainties
"""
import os,sys,commands,subprocess
import argparse
import ROOT
from ROOT import TH1F,TFile
def SetupWorkspace(backgrounds,
sign,
data,
lumiuncer,
discovery,
uncertainty):
if discovery:
opprefix = "DP_onechan_discovery_"
rootfile = "counting_exp_data_discovery_DP.root"
chanfile = "DP_onechan_discovery.xml"
else:
opprefix = "DP_onechan_limit_"
rootfile = "counting_exp_data_limit_DP.root"
chanfile = "DP_onechan_limit.xml"
#
# Write Main Top XML file
#
mainXMLdata = """\
<!DOCTYPE Combination SYSTEM "../share/HistFactorySchema.dtd">
<Combination OutputFilePrefix="./tmp_limits_results/%s" >
<Input>./tmp_limits/%s</Input>
<Measurement Name="DPLSMM" Lumi="1." LumiRelErr="%f" BinLow="0" BinHigh="2" >
<POI>mu</POI>
</Measurement>
</Combination>
""" % (opprefix, chanfile, lumiuncer)
if discovery:
script = open('tmp_limits/top_discovery.xml','w')
else:
script = open('tmp_limits/top_limit.xml','w')
script.write(mainXMLdata)
script.close()
#
# Write Channel XML
#
chanXMLdata = """\
<!DOCTYPE Channel SYSTEM '../share/HistFactorySchema.dtd'>
<Channel Name="channel1" InputFile="./tmp_limits_data/%s">
<Data HistoName="data" HistoPath="" />
<Sample Name="signal" HistoPath="" HistoName="signal">
<NormFactor Name="mu" High="20." Low="0." Val="1." Const="True" />
</Sample>
""" % rootfile
# <OverallSys Name="lumi" High="1.028" Low="0.972" />
# <OverallSys Name="PDFacc" High="1.05" Low="0.95" />
# <OverallSys Name="acc_truth" High="1.15" Low="0.85" />
setupWSfile = TFile("tmp_limits_data/%s" % rootfile,"RECREATE")
doSingleBGModel=False
if not doSingleBGModel:
for key,value in backgrounds.iteritems():
chanXMLdata+="""\
<Sample Name="%s" HistoPath="" NormalizeByTheory="True" HistoName="%s">
<OverallSys Name="%s" Low="%f" High="%f"/>
</Sample>
""" % (key,key,key+"_norm",1.-float(uncertainty),1.+float(uncertainty))
hist = TH1F(key,key+" hist",1,0,1)
hist.Fill(0.5,value)
hist.Write(key)
else:
BGtotal=0
for key,value in backgrounds.iteritems():
BGtotal+=value
key="BG"
hist = TH1F(key,key+" hist",1,0,1)
hist.Fill(0.5,BGtotal)
hist.Write(key)
chanXMLdata+="""\
<Sample Name="%s" HistoPath="" NormalizeByTheory="True" HistoName="%s">
<OverallSys Name="%s" Low="%f" High="%f"/>
</Sample>
""" % (key,key,key+"_norm",1.-float(uncertainty),1.+float(uncertainty))
chanXMLdata+="""\
</Channel>
"""
script = open('tmp_limits/'+chanfile,'w')
script.write(chanXMLdata)
script.close()
hist = TH1F("signal", "signal hist", 1,0,1)
hist.Fill(0.5,sign)
hist.Write("signal")
hist = TH1F("data", "data hist", 1,0,1)
hist.Fill(0.5,data)
hist.Write("data")
setupWSfile.Close()
if discovery:
os.system("hist2workspace tmp_limits/top_discovery.xml > tmp_limits/setup_discovery.log 2>&1")
else:
os.system("hist2workspace tmp_limits/top_limit.xml > tmp_limits/setup_limit.log 2>&1")
def run_limit(line,
backgrounds,
lumiuncer,
toys,
points,
mulow,
muhigh,
uncertainty):
cleanup = """\
mkdir -p tmp_limits
mkdir -p tmp_limits_data
mkdir -p tmp_limits_results
rm -f tmp_limits/*
rm -f tmp_limits_data/*
rm -f tmp_limits_results/*
"""
os.system(cleanup)
fullcls = 0
if toys>0:
fullcls = 1
# figure out how much signal we have
words_list = line.split()
label=words_list[0]
sign=float(words_list[1])
# and how much background
totalbg=0
for key,value in backgrounds.iteritems():
totalbg = totalbg+value
data=totalbg
# quick check to see if we should even bother with limits.
if sign <= 1.:
if (fullcls==0):
print "%s : -2sig = %1.4f, -1sig = %1.4f, Median Exp = %1.4f, +1sig = %1.4f, +2sig = %1.4f, p0 = %1.3e (%1.4f sigma)" % (label,
10,
10,
10,
10,
10,
10,
0);
else:
print data,sign,'==RESFRQ==',10,10,10,10,10,10
return
scale=1.
# This does nasty things in the WinoBino grid... found it necessary for
# the GMSB signals, but not so much here.
if False:
if sign > 1000*totalbg:
scale = 3000.
elif sign > 100*totalbg:
scale = 300.
elif sign > 10*totalbg:
scale = 30.
elif sign > totalbg:
scale = 3.
sign = sign/scale
print "setting up workspace with %f signal events %f background events." % (sign,totalbg)
SetupWorkspace(backgrounds,sign,data,lumiuncer,False,uncertainty)
SetupWorkspace(backgrounds,sign,data+sign,lumiuncer,True,uncertainty)
cmd2 = """\
./bin/runCEws -f %i -t %i -p %i -l %f -h %f >& tmp_limits/limit.log
""" % (fullcls,toys,points,mulow,muhigh)
#print cmd2
os.system(cmd2)
cmd3 = """\
grep "==RESULT==" tmp_limits/limit.log
"""
cmd4 = """\
grep "computed upper limit" tmp_limits/limit.log | awk '{print $6}'
"""
cmd5 = """\
grep "expected limit (median) " tmp_limits/limit.log | awk '{print $4}'
"""
cmd6 = """\
grep "expected limit (+1 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd7 = """\
grep "expected limit (-1 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd8 = """\
grep "expected limit (+2 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd9 = """\
grep "expected limit (-2 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
if (fullcls==0):
# os.system(cmd3)
p = os.popen(cmd3)
res = p.readline()
ressplit = res.split()
p.close()
printEventLimits=True
if not printEventLimits:
scale=(1/scale)*100.
else:
scale=(1/scale)
p = os.popen("grep \"DISCOVERY\" tmp_limits/limit.log" )
res2 = p.readline()
res2split = res2.split()
p.close()
if len(res2split) > 2:
if float(res2split[1]) < 1e-20:
res2split[2] = "10"
if len(res2split) > 2 and len(ressplit) > 6:
print "%s : -2sig = %1.4f, -1sig = %1.4f, Median Exp = %1.4f, +1sig = %1.4f, +2sig = %1.4f, p0 = %1.3e (%1.4f sigma)" % (label,
scale*float(ressplit[6]),
scale*float(ressplit[4]),
scale*float(ressplit[2]),
scale*float(ressplit[3]),
scale*float(ressplit[5]),
float(res2split[1]),
float(res2split[2]));
else:
p = os.popen(cmd4)
res1 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd5)
res2 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd6)
res3 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd7)
res4 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd8)
res5 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd9)
res6 = (p.readline()).rstrip()
p.close()
print data,sign,'==RESFRQ==',res1,res2,res3,res4,res5,res6
def SetupWorkspaceOpt(optresults,
lumiuncer,
discovery,
uncertainty,
flatBGUnc,
useSingleBGModel):
if discovery:
opprefix = "susy_discovery_"
else:
opprefix = "susy_limit_"
#
# Write Main Top XML file
#
if discovery:
script = open('tmp_limits/top_discovery.xml','w')
else:
script = open('tmp_limits/top_limit.xml','w')
script.write("""\
<!DOCTYPE Combination SYSTEM "../share/HistFactorySchema.dtd">
<Combination OutputFilePrefix="./tmp_limits_results/%s" >
""" % opprefix)
# --------------------------------------------------------
# parse optresults
goodchannels=0
forcetoys=False
for line in open(optresults,"r"):
l=line.split()
if float(l[3])<0.5:
continue
else:
goodchannels=goodchannels+1
rootfile=opprefix+"_chan_"+l[2]+".root"
chanfile="chan_%s.xml" % l[2]
script.write("""
<Input>./tmp_limits/%s</Input>
""" % chanfile)
# write the channel data
chan=open("./tmp_limits/%s" % chanfile, 'w')
chan.write("""\
<!DOCTYPE Channel SYSTEM '../share/HistFactorySchema.dtd'>
<Channel Name="channel_%s" InputFile="./tmp_limits_data/%s">
<Data HistoName="data" HistoPath="" />
<Sample Name="signal" HistoPath="" HistoName="signal">
<NormFactor Name="mu" High="20." Low="0." Val="1." Const="True" />
</Sample>
""" % (l[2],rootfile))
setupWSfile = TFile("tmp_limits_data/%s" % rootfile,"RECREATE")
bglabels=["Bj", "tt", "tB", "tj", "ttB"]
if "100TeV" in optresults:
bglabels+=["QCD"]
totalbg=0.
for i in range(len(bglabels)):
# only do this if the backgrounds are non-zero
if float(l[i+7]) > 0.00:
bgval=float(l[i+7])
totalbg+=bgval
if useSingleBGModel:
continue
hist = TH1F(bglabels[i],bglabels[i]+" hist",1,0,1)
if bgval<0.001:
bgval=0.001
#hist.Fill(0.5,bgval)
hist.SetBinContent(1,bgval)
hist.Write(bglabels[i])
chan.write("""\
<Sample Name="%s" HistoPath="" NormalizeByTheory="True" HistoName="%s">
""" % (bglabels[i],bglabels[i]))
if bglabels[i]!="Bj" or flatBGUnc:
chan.write("""\
<OverallSys Name="%s" Low="%f" High="%f"/>
""" % (bglabels[i]+"_norm",1.-float(uncertainty),1.+float(uncertainty)))
else:
reluncZll=((bgval*0.5)**0.5)/(bgval*0.5)
if ((reluncZll**2.+(float(uncertainty)/2.)**2.)**0.5)<float(uncertainty):
chan.write("""\
<OverallSys Name="%s_bin_%s" Low="%f" High="%f"/>
<OverallSys Name="%s" Low="%f" High="%f"/>
""" % (bglabels[i]+"_Zll",l[2],1.-reluncZll,1.+reluncZll, bglabels[i]+"_norm",1.-float(uncertainty)/2.,1.+float(uncertainty)/2.))
else:
chan.write("""\
<OverallSys Name="%s" Low="%f" High="%f"/>
""" % (bglabels[i]+"_norm",1.-float(uncertainty),1.+float(uncertainty)))
chan.write("""\
</Sample>
""")
if useSingleBGModel:
hist = TH1F("BG","BG"+" hist",1,0,1)
hist.SetBinContent(1,totalbg)
hist.Write("BG")
chan.write("""\
<Sample Name="%s" HistoPath="" NormalizeByTheory="True" HistoName="%s">
""" % ("BG","BG"))
chan.write("""\
<OverallSys Name="%s" Low="%f" High="%f"/>
""" % ("BG"+"_norm",1.-float(uncertainty),1.+float(uncertainty)))
chan.write("""\
</Sample>
""")
hist = TH1F("signal", "signal hist", 1,0,1)
#hist.Fill(0.5,float(l[5]))
hist.SetBinContent(1,float(l[5]))
hist.Write("signal")
hist = TH1F("data", "data hist", 1,0,1)
if not discovery:
#hist.Fill(0.5,totalbg)
hist.SetBinContent(1,totalbg)
hist.SetBinError(1,totalbg**0.5)
else:
#hist.Fill(0.5,totalbg+float(l[5]))
hist.SetBinContent(1,(totalbg+float(l[5])))
hist.SetBinError(1,(totalbg+float(l[5]))**0.5)
hist.Write("data")
setupWSfile.Close()
chan.write("""\
</Channel>
""")
chan.close()
if float(l[3])>1.0 and (float(l[5])<5 or totalbg<5):
forcetoys=True
# --------------------------------------------------------
script.write("""
<Measurement Name="DPLSMM" Lumi="1." LumiRelErr="%f" BinLow="0" BinHigh="2" >
<POI>mu</POI>
</Measurement>
</Combination>
""" % (lumiuncer))
script.close()
if discovery:
os.system("hist2workspace tmp_limits/top_discovery.xml > tmp_limits/setup_discovery.log 2>&1")
else:
os.system("hist2workspace tmp_limits/top_limit.xml > tmp_limits/setup_limit.log 2>&1")
return goodchannels,forcetoys
def run_limit_opt(optresultsfile,
lumiuncer,
toys,
points,
mulow,
muhigh,
uncertainty,
flatBGUnc,
useSingleBGModel):
cleanup = """\
mkdir -p tmp_limits
mkdir -p tmp_limits_data
mkdir -p tmp_limits_results
rm -f tmp_limits/*
rm -f tmp_limits_data/*
rm -f tmp_limits_results/*
"""
os.system(cleanup)
goodchannels,forcetoys=SetupWorkspaceOpt(optresultsfile,lumiuncer,False,uncertainty,flatBGUnc,useSingleBGModel)
SetupWorkspaceOpt(optresultsfile,lumiuncer, True,uncertainty,flatBGUnc,useSingleBGModel)
fullcls = 0
if forcetoys and toys<1000 and False:
toys=1000
if points>20:
points=20
if toys>0:
fullcls = 1
if goodchannels>0:
cmd2 = """\
./bin/runCEws -f %i -t %i -p %i -l %f -h %f -L tmp_limits_results/susy_limit__combined_DPLSMM_model.root -D tmp_limits_results/susy_discovery__combined_DPLSMM_model.root -n combined >& tmp_limits/limit.log
""" % (fullcls,toys,int(points),mulow,muhigh)
print cmd2
os.system(cmd2)
else:
cmd2="echo \"==RESULT== 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\" > tmp_limits/limit.log"
os.system(cmd2)
cmd2="echo \"==DISCOVERY== 0.5 0.0\" >> tmp_limits/limit.log"
os.system(cmd2)
cmd3 = """\
grep "==RESULT==" tmp_limits/limit.log
"""
cmd4 = """\
grep "computed upper limit" tmp_limits/limit.log | awk '{print $6}'
"""
cmd5 = """\
grep "expected limit (median) " tmp_limits/limit.log | awk '{print $4}'
"""
cmd6 = """\
grep "expected limit (+1 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd7 = """\
grep "expected limit (-1 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd8 = """\
grep "expected limit (+2 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd9 = """\
grep "expected limit (-2 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
# os.system(cmd3)
p = os.popen(cmd3)
res = p.readline()
ressplit = res.split()
p.close()
scale=1.
printEventLimits=True
if not printEventLimits:
scale=(1/scale)*100.
else:
scale=(1/scale)
p = os.popen("grep \"DISCOVERY\" tmp_limits/limit.log" )
res2 = p.readline()
res2split = res2.split()
p.close()
if len(res2split) > 2:
if float(res2split[1]) < 1e-20:
res2split[2] = "10"
if len(res2split) > 2 and len(ressplit) > 6:
print "%s : -2sig = %1.4f, -1sig = %1.4f, Median Exp = %1.4f, +1sig = %1.4f, +2sig = %1.4f, p0 = %1.3e (%1.4f sigma)" % ("dummy",
scale*float(ressplit[6]),
scale*float(ressplit[4]),
scale*float(ressplit[2]),
scale*float(ressplit[3]),
scale*float(ressplit[5]),
float(res2split[1]),
float(res2split[2]));
def main(argv):
parser = argparse.ArgumentParser(description="Command line arguments")
parser.add_argument("--background" , action='store', default="")
parser.add_argument("--toys" , action='store', default=0)
parser.add_argument("--signal" , action='store', default="")
parser.add_argument("--mulow" , action='store', default=0)
parser.add_argument("--muhigh" , action='store', default=5)
parser.add_argument("--points" , action='store', default=100)
parser.add_argument("--lumiUnc" , action='store', default=.028)
parser.add_argument("--uncertainty" , action='store', default=0.20)
parser.add_argument("--prefix" , action='store', default="test")
parser.add_argument("--optresults" , action='store', default="")
parser.add_argument("--flatBGUnc" , action='store_true')
parser.add_argument("--singleBGModel", action='store_true')
args=parser.parse_args()
if args.optresults != "":
run_limit_opt(args.optresults,
args.lumiUnc,
args.toys,
args.points,
args.mulow,
args.muhigh,
args.uncertainty,
args.flatBGUnc,
args.singleBGModel)
else:
backgrounds={}
bgfile = open(args.background)
for bg in bgfile.xreadlines():
bgsplit = bg.split()
if len(bgsplit) < 2:
continue
backgrounds[bgsplit[0]] = float(bgsplit[1])
sigfile = open(args.signal)
for line in sigfile.xreadlines():
run_limit(line,
backgrounds,
args.lumiUnc,
args.toys,
args.points,
args.mulow,
args.muhigh,
args.uncertainty)
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-2.0 | 2,355,134,792,932,228,000 | 33.803419 | 207 | 0.460658 | false | 3.747469 | false | false | false |
pimoroni/python-sparkfun-zx | zx.py | 1 | 1180 | import smbus
ADDR = 0x10
bus = smbus.SMBus(1)
__debug = False
SWIPE_RIGHT = 0x01
SWIPE_LEFT = 0x02
SWIPE_UP = 0x03
HOVER = 0x05
HOVER_LEFT = 0x06
HOVER_RIGHT = 0x07
HOVER_UP = 0x08
def gesture_name(gesture):
if gesture is None or gesture > HOVER_UP:
return None
return [
None,
'Swipe Right',
'Swipe Left',
'Swipe Up',
None,
'Hover',
'Hover Left',
'Hover Right',
'Hover Up'
][gesture]
def gesture_available():
status = bus.read_byte_data(ADDR, 0x00)
if __debug: print("Status: {:08b}".format(status))
return (status & 0b00011100) > 0
def position_available():
status = bus.read_byte_data(ADDR, 0x00)
return (status & 0b00000001) > 0
def get_x():
return bus.read_byte_data(ADDR, 0x08)
def get_z():
return bus.read_byte_data(ADDR, 0x0a)
def get_position():
return get_z(), get_x()
def get_gesture():
gesture = bus.read_byte_data(ADDR, 0x04)
if gesture in [HOVER, HOVER_LEFT, HOVER_RIGHT, HOVER_UP, SWIPE_LEFT, SWIPE_RIGHT, SWIPE_UP]:
return gesture
return None
def get_speed():
return bus.read_byte_data(ADDR, 0x05)
| bsd-3-clause | 194,380,839,664,205,600 | 17.4375 | 96 | 0.608475 | false | 2.712644 | false | false | false |
deepmind/deepmind-research | byol/utils/optimizers.py | 1 | 6161 | # Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of LARS Optimizer with optax."""
from typing import Any, Callable, List, NamedTuple, Optional, Tuple
import jax
import jax.numpy as jnp
import optax
import tree as nest
# A filter function takes a path and a value as input and outputs True for
# variable to apply update and False not to apply the update
FilterFn = Callable[[Tuple[Any], jnp.ndarray], jnp.ndarray]
def exclude_bias_and_norm(path: Tuple[Any], val: jnp.ndarray) -> jnp.ndarray:
"""Filter to exclude biaises and normalizations weights."""
del val
if path[-1] == "b" or "norm" in path[-2]:
return False
return True
def _partial_update(updates: optax.Updates,
new_updates: optax.Updates,
params: optax.Params,
filter_fn: Optional[FilterFn] = None) -> optax.Updates:
"""Returns new_update for params which filter_fn is True else updates."""
if filter_fn is None:
return new_updates
wrapped_filter_fn = lambda x, y: jnp.array(filter_fn(x, y))
params_to_filter = nest.map_structure_with_path(wrapped_filter_fn, params)
def _update_fn(g: jnp.ndarray, t: jnp.ndarray, m: jnp.ndarray) -> jnp.ndarray:
m = m.astype(g.dtype)
return g * (1. - m) + t * m
return jax.tree_multimap(_update_fn, updates, new_updates, params_to_filter)
class ScaleByLarsState(NamedTuple):
mu: jnp.ndarray
def scale_by_lars(
momentum: float = 0.9,
eta: float = 0.001,
filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation:
"""Rescales updates according to the LARS algorithm.
Does not include weight decay.
References:
[You et al, 2017](https://arxiv.org/abs/1708.03888)
Args:
momentum: momentum coeficient.
eta: LARS coefficient.
filter_fn: an optional filter function.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params: optax.Params) -> ScaleByLarsState:
mu = jax.tree_multimap(jnp.zeros_like, params) # momentum
return ScaleByLarsState(mu=mu)
def update_fn(updates: optax.Updates, state: ScaleByLarsState,
params: optax.Params) -> Tuple[optax.Updates, ScaleByLarsState]:
def lars_adaptation(
update: jnp.ndarray,
param: jnp.ndarray,
) -> jnp.ndarray:
param_norm = jnp.linalg.norm(param)
update_norm = jnp.linalg.norm(update)
return update * jnp.where(
param_norm > 0.,
jnp.where(update_norm > 0,
(eta * param_norm / update_norm), 1.0), 1.0)
adapted_updates = jax.tree_multimap(lars_adaptation, updates, params)
adapted_updates = _partial_update(updates, adapted_updates, params,
filter_fn)
mu = jax.tree_multimap(lambda g, t: momentum * g + t,
state.mu, adapted_updates)
return mu, ScaleByLarsState(mu=mu)
return optax.GradientTransformation(init_fn, update_fn)
class AddWeightDecayState(NamedTuple):
"""Stateless transformation."""
def add_weight_decay(
weight_decay: float,
filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation:
"""Adds a weight decay to the update.
Args:
weight_decay: weight_decay coeficient.
filter_fn: an optional filter function.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_) -> AddWeightDecayState:
return AddWeightDecayState()
def update_fn(
updates: optax.Updates,
state: AddWeightDecayState,
params: optax.Params,
) -> Tuple[optax.Updates, AddWeightDecayState]:
new_updates = jax.tree_multimap(lambda g, p: g + weight_decay * p, updates,
params)
new_updates = _partial_update(updates, new_updates, params, filter_fn)
return new_updates, state
return optax.GradientTransformation(init_fn, update_fn)
LarsState = List # Type for the lars optimizer
def lars(
learning_rate: float,
weight_decay: float = 0.,
momentum: float = 0.9,
eta: float = 0.001,
weight_decay_filter: Optional[FilterFn] = None,
lars_adaptation_filter: Optional[FilterFn] = None,
) -> optax.GradientTransformation:
"""Creates lars optimizer with weight decay.
References:
[You et al, 2017](https://arxiv.org/abs/1708.03888)
Args:
learning_rate: learning rate coefficient.
weight_decay: weight decay coefficient.
momentum: momentum coefficient.
eta: LARS coefficient.
weight_decay_filter: optional filter function to only apply the weight
decay on a subset of parameters. The filter function takes as input the
parameter path (as a tuple) and its associated update, and return a True
for params to apply the weight decay and False for params to not apply
the weight decay. When weight_decay_filter is set to None, the weight
decay is not applied to the bias, i.e. when the variable name is 'b', and
the weight decay is not applied to nornalization params, i.e. the
panultimate path contains 'norm'.
lars_adaptation_filter: similar to weight decay filter but for lars
adaptation
Returns:
An optax.GradientTransformation, i.e. a (init_fn, update_fn) tuple.
"""
if weight_decay_filter is None:
weight_decay_filter = lambda *_: True
if lars_adaptation_filter is None:
lars_adaptation_filter = lambda *_: True
return optax.chain(
add_weight_decay(
weight_decay=weight_decay, filter_fn=weight_decay_filter),
scale_by_lars(
momentum=momentum, eta=eta, filter_fn=lars_adaptation_filter),
optax.scale(-learning_rate),
)
| apache-2.0 | 7,526,814,321,664,008,000 | 31.771277 | 80 | 0.678624 | false | 3.57574 | false | false | false |
madscatt/zazzie_1.5 | trunk/sassie/build/gui_mimic_pdbrx.py | 1 | 1026 | import sys, os
import logging
sys.path.append('./')
import sassie.build.pdb_rx as pdb_rx
import sassie.util.sasconfig as sasconfig
import sassie.interface.input_filter as input_filter
import multiprocessing
svariables = {}
#### user input ####
#### user input ####
#### user input ####
runname = 'run_0'
pdbfile = 'testing/data/5E3L.pdb'
topfile = os.path.join(sasconfig.__bin_path__,'toppar','top_all27_prot_na.inp')
use_defaults = False
#### end user input ####
#### end user input ####
#### end user input ####
logging.basicConfig()
svariables['runname'] = (runname,'string')
svariables['pdbfile'] = (pdbfile,'string')
svariables['topfile'] = (topfile,'string')
svariables['defaults'] = (use_defaults,'boolean')
error,variables = input_filter.type_check_and_convert(svariables)
if(len(error)>0):
print 'error = ',error
sys.exit()
txtQueue = multiprocessing.JoinableQueue()
scan = pdb_rx.PDBRx()
scan.main(variables,txtQueue)
this_text = txtQueue.get(True, timeout=0.1)
| gpl-3.0 | 7,903,234,656,523,687,000 | 22.318182 | 79 | 0.669591 | false | 3.147239 | false | true | false |
spilgames/novacek | novacek/auth.py | 1 | 2979 | #!/usr/bin/env python
#
# vim: set expandtab:ts=4:sw=4
#
# Authors: Jasper Capel
# Robert van Leeuwen
#
# Funtion: Handles authentication to various OpenStack API's and
# other authentication based (Keystone) functions
#
# This software is released under the terms of the Apache License.
#
from keystoneclient.v2_0.client import Client as keystonec
from neutronclient.v2_0.client import Client as neutronc
from novaclient.v3.client import Client as novac
import ConfigParser
import os
### AUTH FUNCTIONS ###
def get_os_credentials(filename='/etc/nova/nova.conf'):
'''Attempts to get credentials from an openstack config if it exists, otherwise from env'''
if os.path.exists(filename):
c = ConfigParser.RawConfigParser()
s = 'DEFAULT'
c.read(filename)
creds = {'username': c.get(s, 'neutron_admin_username'),
'password': c.get(s, 'neutron_admin_password'),
'tenant_name': c.get(s, 'neutron_admin_tenant_name'),
'region_name': c.get(s, 'os_region_name'),
'auth_url': c.get(s, 'neutron_admin_auth_url')}
else:
creds = {'username': os.getenv('OS_USERNAME'),
'password': os.getenv('OS_PASSWORD'),
'tenant_name': os.getenv('OS_TENANT_NAME'),
'region_name': os.getenv('OS_REGION_NAME', 'ams1'),
'auth_url': os.getenv('OS_AUTH_URL')}
return creds
def get_keystonesession(credentials=None):
if not credentials:
credentials = get_os_credentials()
from keystoneclient.auth.identity import v2
from keystoneclient import session
auth = v2.Password(username=credentials['username'],
password=credentials['password'],
tenant_name=credentials['tenant_name'],
auth_url=credentials['auth_url'])
return session.Session(auth=auth)
def get_keystoneclient(session):
'''Returns keystoneclient instance'''
return keystonec(session=session)
def get_neutronclient(session):
'''Returns neutronclient instance'''
creds = get_os_credentials()
return neutronc(username=creds['username'],
password=creds['password'],
tenant_name=creds['tenant_name'],
auth_url=creds['auth_url'])
def get_novaclient(session):
# Version of novaclient we use doesn't support using existing session
creds = get_os_credentials()
return novac(creds['username'], creds['password'], creds['tenant_name'], creds['auth_url'], region_name=creds['region_name'])
def get_tenants(session):
keystone = get_keystoneclient(session)
return keystone.tenants.list()
def get_tenant_email(session, tid):
keystone = get_keystoneclient(session)
return keystone.tenants.get(tid)
def show_creds():
credentials = get_os_credentials()
for cred in credentials:
print "export OS_" + cred.upper() + "=" + credentials[cred]
| apache-2.0 | -6,892,060,041,972,486,000 | 34.047059 | 129 | 0.642497 | false | 3.914586 | false | false | false |
eroicaleo/LearningPython | HandsOnML/ch09/house_gd.py | 1 | 1695 | #!/usr/bin/env python
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
learning_rate = 0.01
n_epochs = 10000
def scaler_norm(a):
return StandardScaler().fit(a).transform(a)
housing = fetch_california_housing()
m, n = housing.data.shape
housing_data_norm = scaler_norm(housing.data)
housing_data_plus_bias = np.c_[np.ones((m, 1)), housing_data_norm]
y_norm = scaler_norm(housing.target.reshape(-1, 1))
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(y_norm, dtype=tf.float32, name='y')
XT = tf.transpose(X)
theta = tf.Variable(tf.random_uniform([n+1, 1], -1.0, 1.0), dtype=tf.float32, name='theta')
y_pred = tf.matmul(X, theta)
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
gradients = 2 / m * tf.matmul(XT, error)
training_op = tf.assign(theta, theta - learning_rate * gradients)
init = tf.global_variables_initializer()
print('#'*80)
print('## Gradient descent')
print('#'*80)
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch', epoch, 'MSE = ', mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
print('#'*80)
print('## Verifying with equation')
print('#'*80)
theta_cal = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)
y_pred_cal = tf.matmul(X, theta_cal)
error_cal = y_pred_cal - y
mse_cal = tf.reduce_mean(tf.square(error_cal), name='mse')
with tf.Session() as sess:
init.run()
theta_cal_val, mse_cal = sess.run([theta_cal, mse_cal])
print(theta_cal_val, mse_cal)
| mit | 2,936,218,098,030,359,600 | 28.224138 | 91 | 0.672566 | false | 2.76509 | false | false | false |
hoytak/lazyrunner | lazyrunner/pnstructures.py | 1 | 34464 | from treedict import TreeDict
from parameters import applyPreset
from collections import defaultdict
from os.path import join, abspath, exists, split
from os import makedirs
import hashlib, base64, weakref, sys, gc, logging
from itertools import chain
from collections import namedtuple
from pmodule import isPModule, getPModuleClass
from diskio import saveResults, loadResults
################################################################################
# Stuff to manage the cache
class PNodeModuleCacheContainer(object):
def __init__(self, pn_name, name,
local_key, dependency_key,
specific_key = None,
is_disk_writable = True,
is_persistent = True):
self.__pn_name = pn_name
self.__name = name
self.__specific_key = specific_key
self.__local_key = local_key
self.__dependency_key = dependency_key
self.__is_disk_writable = is_disk_writable
self.__is_non_persistent = not is_persistent
self.__obj = None
self.__obj_is_loaded = False
self.__disk_save_hook = None
self.__non_persistent_hook = None
def getFilename(self):
def v(t):
return str(t) if t is not None else "null"
return join(v(self.__pn_name), v(self.__name),
"%s-%s-%s.dat" % (v(self.__local_key), v(self.__dependency_key),
v(self.__specific_key)) )
def getKeyAsString(self):
return '-'.join( (str(t) if t is not None else "N")
for t in [self.__pn_name, self.__name,
self.__local_key,
self.__dependency_key,
self.__specific_key])
def getCacheKey(self):
# The specific cache
return (self.__pn_name, self.__local_key, self.__dependency_key)
def getObjectKey(self):
return (self.__name, self.__specific_key)
def isNonPersistent(self):
return self.__is_non_persistent
def getNonPersistentKey(self):
assert self.__is_non_persistent
return (self.__pn_name, self.__name)
def setObject(self, obj):
assert not self.__obj_is_loaded
self.__obj_is_loaded = True
self.__obj = obj
if self.__disk_save_hook is not None:
self.__disk_save_hook(self)
self.__disk_save_hook = None
if self.__non_persistent_hook is not None:
self.__non_persistent_hook(self)
self.__non_persistent_hook = None
def isLocallyEqual(self, pnc):
return self.__name == pnc.__name and self.__specific_key == pnc.__specific_key
def setObjectSaveHook(self, hook):
self.__disk_save_hook = hook
def setNonPersistentObjectSaveHook(self, hook):
assert self.__is_non_persistent
self.__non_persistent_hook = hook
def getObject(self):
assert self.__obj_is_loaded
return self.__obj
def objectIsLoaded(self):
return self.__obj_is_loaded
def disableDiskWriting(self):
self.__is_disk_writable = False
self.__disk_save_hook = None
def isDiskWritable(self):
return self.__is_disk_writable
def objRefCount(self):
return sys.getrefcount(self.__obj)
class PNodeModuleCache(object):
__slots__ = ["reference_count", "cache"]
def __init__(self):
self.reference_count = 0
self.cache = {}
class _PNodeNonPersistentDeleter(object):
def __init__(self, common):
self.common = common
def __call__(self, container):
np_key = container.getNonPersistentKey()
try:
old_container = self.common.non_persistant_pointer_lookup[np_key]
except KeyError:
old_container = None
if old_container is not None:
try:
del self.common.cache_lookup[old_container.getCacheKey()].cache[old_container.getObjectKey()]
except KeyError:
pass
self.common.non_persistant_pointer_lookup[np_key] = container
# This class holds the runtime environment for the pnodes
class PNodeCommon(object):
def __init__(self, opttree):
self.log = logging.getLogger("RunCTRL")
# This is for node filtering, i.e. eliminating duplicates
self.pnode_lookup = weakref.WeakValueDictionary()
self.non_persistant_pointer_lookup = weakref.WeakValueDictionary()
self.non_persistant_deleter = _PNodeNonPersistentDeleter(self)
# This is for local cache lootup
self.cache_lookup = defaultdict(PNodeModuleCache)
self.cache_directory = opttree.cache_directory
self.disk_read_enabled = opttree.disk_read_enabled
self.disk_write_enabled = opttree.disk_write_enabled
self.opttree = opttree
def getResults(self, parameters, names):
if type(names) is str:
single = True
names = [names]
else:
single = False
def getPN(n):
if type(n) is not str:
raise TypeError("Module name not a string.")
pn = PNode(self, parameters, n, 'results')
pn.initialize()
pn = self.registerPNode(pn)
pn.increaseParameterReference()
pn.increaseResultReference()
return pn
pn_list = [getPN(n) for n in names]
assert len(set(id(pn) for pn in pn_list)) == len(set(names))
ret_list = [pn.pullUpToResults().result for pn in pn_list]
if single:
assert len(ret_list) == 1
return ret_list[0]
else:
return ret_list
def registerPNode(self, pn):
# see if it's a duplicate
key = (pn.name, pn.key)
if key in self.pnode_lookup:
pnf = self.pnode_lookup[key]
if not pn.is_only_parameter_dependency:
pnf.is_only_parameter_dependency = False
pn_ret = pnf
else:
self.pnode_lookup[key] = pn_ret = pn
pn_ret.buildReferences()
return pn_ret
def deregisterPNode(self, pn):
key = (pn.name, pn.key)
assert self.pnode_lookup[key] is pn
del self.pnode_lookup[key]
def _getCache(self, pn, use_local, use_dependencies, should_exist):
key = (pn.name if pn is not None else None,
pn.local_key if use_local else None,
pn.dependency_key if use_dependencies else None)
if should_exist:
assert key in self.cache_lookup
return key, self.cache_lookup[key]
def increaseCachingReference(self, pn):
# print ("increasing reference, name = %s, key = %s, local_key = %s, dep_key = %s"
# % (pn.name, pn.key, pn.local_key, pn.dependency_key))
for t in [(None, False, False),
(pn, True, False),
(pn, False, True),
(pn, False, False),
(pn, True, True)]:
key, cache = self._getCache(*(t + (False,)))
cache.reference_count += 1
def decreaseCachingReference(self, pn):
# print ("decreasing reference, name = %s, key = %s, local_key = %s, dep_key = %s"
# % (pn.name, pn.key, pn.local_key, pn.dependency_key))
for t in [(None, False, False),
(pn, True, False),
(pn, False, True),
(pn, False, False),
(pn, True, True)]:
key, cache = self._getCache(*(t + (True,)))
cache.reference_count -= 1
assert cache.reference_count >= 0
# Clear the cache if it's no longer needed
if cache.reference_count == 0:
# if len(cache.cache) != 0:
# print "Clearing cache %s. objects in the cache are:" % str(key)
# for v in cache.cache.itervalues():
# print "%s: ref_count = %d" % (v.getObjectKey(), v.objRefCount())
del self.cache_lookup[key]
def loadContainer(self, container, no_local_caching = False):
assert not container.objectIsLoaded()
if not no_local_caching:
cache = self.cache_lookup[container.getCacheKey()]
c = cache.cache
obj_key = container.getObjectKey()
if obj_key in c:
return c[obj_key]
else:
c[obj_key] = container
if container.isNonPersistent():
container.setNonPersistentObjectSaveHook(self.non_persistant_deleter)
# now see if it can be loaded from disk
self._loadFromDisk(container)
return container
def _loadFromDisk(self, container):
if not container.isDiskWritable():
return
if self.disk_read_enabled:
filename = abspath(join(self.cache_directory, container.getFilename()))
self.log.debug("Trying to load %s from %s" % (container.getKeyAsString(), filename))
if exists(filename):
error_loading = False
try:
pt = loadResults(self.opttree, filename)
except Exception, e:
self.log.error("Exception Raised while loading %s: \n%s"
% (filename, str(e)))
error_loading = True
if not error_loading:
self.log.debug("--> Object successfully loaded.")
container.setObject(pt)
return
else:
pass # go to the disk write enabled part
else:
self.log.debug("--> File does not exist.")
if self.disk_write_enabled and container.isDiskWritable():
container.setObjectSaveHook(self._saveToDisk)
def _saveToDisk(self, container):
assert self.disk_write_enabled and container.isDiskWritable()
filename = join(self.cache_directory, container.getFilename())
obj = container.getObject()
self.log.debug("Saving object %s to %s." % (container.getKeyAsString(), filename))
try:
saveResults(self.opttree, filename, obj)
assert exists(filename)
except Exception, e:
self.log.error("Exception raised attempting to save object to cache: \n%s" % str(e))
try:
remove(filename)
except Exception:
pass
def _debug_referencesDone(self):
import gc
gc.collect()
print "**************** running check*****************"
for pn in self.pnode_lookup.values():
if pn.result_reference_count != 0 or pn.module_reference_count != 0 or pn.module_access_reference_count != 0:
print (("Nonzero references, (%d, %d, %d), name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (pn.result_reference_count, pn.module_reference_count, pn.module_access_reference_count,
pn.name, pn.key,
pn.local_key, pn.dependency_key))
for t in [(None, False, False),
(pn, True, False),
(pn, False, True),
(pn, False, False),
(pn, True, True)]:
key, cache = self._getCache(*(t + (False,)))
if cache.reference_count != 0:
print (("Nonzero (%d) cache reference, name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (cache.reference_count,
"null" if t[0] is None else pn.name,
pn.key,
"null" if not t[1] else pn.local_key,
"null" if not t[2] else pn.dependency_key))
if hasattr(pn, "module") and pn.module is not None:
print (("Non-None module, (%d, %d, %d), name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (pn.result_reference_count, pn.module_reference_count, pn.module_access_reference_count,
pn.name, pn.key,
pn.local_key, pn.dependency_key))
if hasattr(pn, "results_container") and pn.results_container is not None:
print (("Non-None results, (%d, %d, %d), name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (pn.result_reference_count, pn.module_reference_count, pn.module_access_reference_count,
pn.name, pn.key,
pn.local_key, pn.dependency_key))
if hasattr(pn, "child_pull_dict"):
print (("Child pull dict bad!!!, (%d, %d, %d), name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (pn.result_reference_count, pn.module_reference_count, pn.module_access_reference_count,
pn.name, pn.key,
pn.local_key, pn.dependency_key))
_Null = "null"
_PulledResult = namedtuple('PulledResult', ['parameters', 'result'])
_PulledModule = namedtuple('PulledModule', ['parameters', 'result', 'module'])
class PNode(object):
def __init__(self, common, parameters, name, p_type):
# print ">>>>>>>>>>>>>>>>>>>> INIT: %s <<<<<<<<<<<<<<<<<<<<" % name
self.common = common
self.parameters = parameters.copy()
self.parameters.attach(recursive = True)
self.name = name
self.is_pmodule = isPModule(name)
if p_type in ["module", "results"]:
if not self.is_pmodule:
raise ValueError("%s is not a recognized processing module." % name)
else:
if p_type != "parameters":
raise ValueError( ("p_type must be either 'module', 'results', "
"or 'parameters' (not '%s').") % p_type)
# Parameters don't hold references to other objects
self.is_only_parameter_dependency = (p_type == "parameters")
##################################################
# Get the preprocessed parameters
if name not in self.parameters:
self.parameters.makeBranch(name)
if self.is_pmodule:
p_class = self.p_class = getPModuleClass(self.name)
self.parameters[name] = pt = p_class._preprocessParameters(self.parameters)
pt.attach(recursive = True)
pt.freeze()
self.parameter_key = self.parameters.hash(name)
h = hashlib.md5()
h.update(str(p_class._getVersion()))
h.update(self.parameter_key)
self.local_key = base64.b64encode(h.digest(), "az")[:8]
self.results_reported = False
self.full_key = self.parameters.hash()
# Reference counting isn't used in the parameter classes
self.parameter_reference_count = 0
self.result_reference_count = 0
self.module_reference_count = 0
self.module_access_reference_count = 0
self.dependent_modules_pulled = False
self.children_have_reference = False
else:
self.parameter_key = self.parameters.hash(name)
self.parameter_reference_count = 0
########################################
# Setup
def initialize(self):
# This extra step is needed as the child pnodes must be
# consolidated into the right levels first
assert self.is_pmodule
def _processDependencySet(p_type, dl):
rs = {}
def add(s, parameters, first_order, name_override):
t = type(s)
if t is str:
if s != self.name:
# delay the creation until we know we need it
h = self.full_key if parameters is self.parameters else parameters.hash()
rs[(s, h)] = (s if first_order else name_override, parameters, s, p_type)
elif t is list or t is tuple or t is set:
for se in s:
add(se, parameters, first_order, name_override)
elif getattr(s, "__parameter_container__", False):
add(s.name, s._getParameters(parameters), False, s._getLoadName())
else:
raise TypeError("Dependency type not recognized.")
add(dl, self.parameters, True, None)
return rs
# Initializes the results above the dependencies
# get the verbatim children specifications and lists of
# dependencies
m_dep, r_dep, p_dep = self.p_class._getDependencies(self.parameters)
# these are (name, hash) : pnode dicts
self.module_dependencies = _processDependencySet("module", m_dep)
self.result_dependencies = _processDependencySet("results", r_dep)
self.parameter_dependencies = _processDependencySet("parameters", p_dep)
# print "init-3: %s-%s has ref count %d" % (self.name, self.key, sys.getrefcount(self))
# Now go through and push the dependencies down
self.result_dependencies.update(self.module_dependencies)
self.parameter_dependencies.update(self.result_dependencies)
# And go through and instantiate all of the remaining ones
for k, t in self.parameter_dependencies.items():
pn = PNode(self.common, *t[1:])
self.parameter_dependencies[k] = v = (t[0], pn)
if k in self.result_dependencies:
self.result_dependencies[k] = v
if k in self.module_dependencies:
self.module_dependencies[k] = v
# Go through and instantiate all the children
for n, pn in self.result_dependencies.itervalues():
pn.initialize()
# Now go through and eliminate duplicates
for k, (n, pn) in self.result_dependencies.items():
pnf = self.common.registerPNode(pn)
if pnf is not pn:
self.result_dependencies[k] = (n, pnf)
self.parameter_dependencies[k] = (n, pnf)
if k in self.module_dependencies:
self.module_dependencies[k] = (n, pnf)
########################################
# don't need to propegate parameter dependencies to children,
# computing the hash as well
h = hashlib.md5()
for (n, th), (ln, pn) in sorted(self.parameter_dependencies.iteritems()):
h.update(n)
h.update(pn.parameter_key)
for (n, th), (ln, pn) in sorted(self.result_dependencies.iteritems()):
h.update(n)
h.update(pn.key)
self.dependency_key = base64.b64encode(h.digest(), "az")[:8]
h.update(self.local_key)
self.key = base64.b64encode(h.digest(), "az")[:8]
# Load the parameter tree
self.dependency_parameter_tree = TreeDict()
for (n, th), (ln, pn) in sorted(self.parameter_dependencies.iteritems()):
if ln is not None:
self.dependency_parameter_tree[ln] = pn.pullParameterPreReferenceCount()
self.dependency_parameter_tree[self.name] = self.parameters[self.name]
self.is_disk_writable = self.p_class._allowsCaching(self.dependency_parameter_tree)
self.is_result_disk_writable = (False if not self.is_disk_writable else
self.p_class._allowsResultCaching(self.dependency_parameter_tree))
def buildReferences(self):
if not self.is_only_parameter_dependency and not self.children_have_reference:
########################################
# Do reference counting with all the children
for k, (n, pn) in self.parameter_dependencies.items():
pn.increaseParameterReference()
for k, (n, pn) in self.result_dependencies.items():
pn.increaseResultReference()
for k, (n, pn) in self.module_dependencies.items():
pn.increaseModuleReference()
self.children_have_reference = True
def dropUnneededReferences(self):
if self.children_have_reference:
########################################
# Do reference counting with all the children
for k, (n, pn) in self.module_dependencies.items():
pn.decreaseModuleReference()
for k, (n, pn) in self.result_dependencies.items():
pn.decreaseResultReference()
for k, (n, pn) in self.parameter_dependencies.items():
pn.decreaseParameterReference()
self.children_have_reference = False
##################################################
# Instantiating things
def _instantiate(self, need_module):
if not hasattr(self, "results_container"):
# Attempt to load the results from cache
self.results_container = self.common.loadContainer(
PNodeModuleCacheContainer(
pn_name = self.name,
name = "__results__",
local_key = self.local_key,
dependency_key = self.dependency_key,
is_disk_writable = self.is_result_disk_writable),
no_local_caching = True)
have_loaded_results = self.results_container.objectIsLoaded()
# we're done if the results are loaded and that's all we need
if have_loaded_results:
self._reportResults(self.results_container.getObject())
if self.module_reference_count == 0:
assert not need_module
self.dropUnneededReferences()
return
if not need_module:
return
else:
have_loaded_results = self.results_container.objectIsLoaded()
# Okay, not done yet
########################################
# This pulls all the dependency parts
# Create the dependency parts
self.child_pull_dict = {}
global _Null
modules = TreeDict()
results = TreeDict()
params = TreeDict()
for k, (load_name, pn) in self.module_dependencies.iteritems():
self.child_pull_dict[k] = p,r,m = pn.pullUpToModule()
if load_name is not None:
params[load_name], results[load_name], modules[load_name] = p,r,m
modules.freeze()
for k, (load_name, pn) in self.result_dependencies.iteritems():
if k in self.child_pull_dict:
if load_name is not None:
params[load_name], results[load_name] = self.child_pull_dict[k][:2]
else:
p, r = pn.pullUpToResults()
self.child_pull_dict[k] = (p, r, _Null)
if load_name is not None:
params[load_name], results[load_name] = p, r
results.freeze()
# parameters are easy
for k, (load_name, pn) in self.parameter_dependencies.iteritems():
if k in self.child_pull_dict:
if load_name is not None:
params[load_name] = self.child_pull_dict[k][0]
else:
p = pn.pullParameters()
self.child_pull_dict[k] = (p, _Null, _Null)
if load_name is not None:
params[load_name] = p
params[self.name] = self.parameters[self.name]
params.freeze()
# Now we've pulled all we need!
self.children_have_reference = False
self.increaseModuleAccessCount()
# Now instantiate the module
self.module = self.p_class(self, params, results, modules)
if not have_loaded_results:
r = self.module.run()
if type(r) is TreeDict:
r.freeze()
self.results_container.setObject(r)
self._reportResults(r)
else:
r = self.results_container.getObject()
self.module._setResults(r)
self.dependent_modules_pulled = True
self.decreaseModuleAccessCount()
##################################################
# Interfacing stuff
def _checkModuleDeletionAllowances(self):
mac_zero = (self.module_access_reference_count == 0)
mrc_zero = (self.module_reference_count == 0)
rrc_zero = (self.result_reference_count == 0)
if mrc_zero and mac_zero and self.dependent_modules_pulled:
# Get rid of everything but the results
self.module._destroy()
del self.module
# propegate all the dependencies
for k, (load_name, pn) in self.module_dependencies.iteritems():
pn.decreaseModuleAccessCount()
if hasattr(self, "additional_module_nodes_accessed"):
for pn in self.additional_module_nodes_accessed:
pn.decreaseModuleAccessCount()
del self.additional_module_nodes_accessed
# This is gauranteed to exist if all the code is right
del self.child_pull_dict
self.dependent_modules_pulled = False
def _checkDeletability(self):
if not self.is_only_parameter_dependency:
assert self.module_reference_count <= self.parameter_reference_count
assert self.result_reference_count <= self.parameter_reference_count
if self.parameter_reference_count == 0 and (
self.is_only_parameter_dependency or self.module_access_reference_count == 0):
# Clean out the heavy parts in light of everything
if not self.is_only_parameter_dependency:
self.common.deregisterPNode(self)
self.module_dependencies.clear()
self.result_dependencies.clear()
self.parameter_dependencies.clear()
def increaseParameterReference(self):
if not self.is_only_parameter_dependency:
assert self.module_reference_count <= self.parameter_reference_count
assert self.result_reference_count <= self.parameter_reference_count
assert type(self.parameters) is TreeDict
self.parameter_reference_count += 1
def decreaseParameterReference(self):
assert self.parameter_reference_count >= 1
self.parameter_reference_count -= 1
if not self.is_only_parameter_dependency:
assert self.module_reference_count <= self.parameter_reference_count
assert self.result_reference_count <= self.parameter_reference_count
if self.parameter_reference_count == 0:
self._checkDeletability()
def increaseResultReference(self):
self.result_reference_count += 1
def decreaseResultReference(self):
assert self.result_reference_count >= 1
self.result_reference_count -= 1
assert self.module_reference_count <= self.result_reference_count
if self.result_reference_count == 0:
try:
del self.results_container
except AttributeError:
pass
self.dropUnneededReferences()
def increaseModuleAccessCount(self):
self.module_access_reference_count += 1
self.common.increaseCachingReference(self)
def decreaseModuleAccessCount(self):
assert self.module_access_reference_count >= 1
self.module_access_reference_count -= 1
self.common.decreaseCachingReference(self)
if self.module_access_reference_count == 0:
self._checkModuleDeletionAllowances()
self._checkDeletability()
def increaseModuleReference(self):
self.module_reference_count += 1
self.common.increaseCachingReference(self)
def decreaseModuleReference(self):
assert self.module_reference_count >= 1
self.module_reference_count -= 1
self.common.decreaseCachingReference(self)
if self.module_reference_count == 0:
self._checkModuleDeletionAllowances()
def pullParameterPreReferenceCount(self):
return self.parameters[self.name]
def pullParameters(self):
assert self.parameter_reference_count >= 1
p = self.parameters[self.name]
self.decreaseParameterReference()
return p
def pullUpToResults(self):
assert self.result_reference_count >= 1
if not hasattr(self, "results_container"):
self._instantiate(False)
r = self.results_container.getObject()
ret = _PulledResult(self.parameters[self.name], r)
rc = self.results_container
self.decreaseResultReference()
self.decreaseParameterReference()
return ret
def pullUpToModule(self):
# print "Pulling module for module %s." % self.name
assert self.module_reference_count >= 0
if not hasattr(self, "module") or not hasattr(self, "results_container"):
self._instantiate(True)
r = self.results_container.getObject()
self._reportResults(r)
ret = _PulledModule(self.parameters[self.name], r, self.module)
self.increaseModuleAccessCount()
self.decreaseModuleReference()
self.decreaseResultReference()
self.decreaseParameterReference()
return ret
################################################################################
# Loading cache stuff
def getCacheContainer(self, obj_name, key, ignore_module, ignore_local,
ignore_dependencies, is_disk_writable, is_persistent):
container = PNodeModuleCacheContainer(
pn_name = None if ignore_module else self.name,
name = obj_name,
local_key = None if ignore_local else self.local_key,
dependency_key = None if ignore_dependencies else self.dependency_key,
specific_key = key,
is_disk_writable = is_disk_writable and self.is_disk_writable,
is_persistent = is_persistent)
return self.common.loadContainer(container)
def _resolveRequestInfo(self, r):
# first get the key
if type(r) is str:
name = r
ptree = self.parameters
key = self.full_key
elif getattr(r, "__parameter_container__", False):
name = r.name
ptree = r._getParameters(self.parameters)
key = ptree.hash()
else:
raise TypeError("Requested %s must be specified as a string or "
"a parameter container class like 'Delta'.")
return name, ptree, key
def getSpecific(self, r_type, r):
name, ptree, key = self._resolveRequestInfo(r)
lookup_key = (name, key)
if lookup_key in self.child_pull_dict:
params, results, module = self.child_pull_dict[lookup_key]
global _Null
if r_type == "results" and results is not _Null:
return results
elif r_type == "module" and module is not _Null:
return module
elif r_type == "parameters":
return params
else:
assert False
if r_type == "results":
return self.common.getResults(ptree, name)
elif r_type == "module":
pn = PNode(self.common, ptree, name, 'module')
pn.initialize()
pn = self.common.registerPNode(pn)
pn.increaseParameterReference()
pn.increaseResultReference()
pn.increaseModuleReference()
if hasattr(self, "additional_module_nodes_accessed"):
self.additional_module_nodes_accessed.append(pn)
else:
self.additional_module_nodes_accessed = [pn]
return pn.pullUpToModule().module
elif r_type == "parameters":
pn = PNode(self.common, ptree, name, 'parameters')
pn.initialize()
pn = self.common.registerPNode(pn)
pn.increaseParameterReference()
return pn.pullParameters()
else:
assert False
##################################################
# Result Reporting stuff
def _reportResults(self, results):
if not self.results_reported:
try:
self.p_class.reportResults(self.parameters, self.parameters[self.name], results)
except TypeError, te:
rrf = self.p_class.reportResults
def raiseTypeError():
raise TypeError(("reportResults method in '%s' must be @classmethod "
"and take global parameter tree, local parameter tree, "
"and result tree as arguments.") % name)
# See if it was due to incompatable signature
from robust_inspect import getcallargs
try:
getcallargs(rrf, parameters, p, r)
except TypeError:
raiseTypeError()
# Well, that wasn't the issue, so it's something internal; re-raise
raise
self.results_reported = True
| bsd-3-clause | -8,434,984,872,523,774,000 | 33.292537 | 121 | 0.539084 | false | 4.394797 | false | false | false |
KlubJagiellonski/Politikon | events/models.py | 1 | 30976 | # -*- coding: utf-8 -*-
import json
import logging
from collections import defaultdict
from dateutil.relativedelta import relativedelta
from math import exp
from unidecode import unidecode
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models, transaction
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.utils.encoding import python_2_unicode_compatible
from .elo import EloMatch
from .exceptions import UnknownOutcome, EventNotInProgress
from .managers import (
EventManager,
BetManager,
TeamResultManager,
TransactionManager,
)
from bladepolska.snapshots import SnapshotAddon
from bladepolska.site import current_domain
from django_elasticsearch.models import EsIndexable
from constance import config
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from taggit_autosuggest.managers import TaggableManager
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
class EventCategory(models.Model):
name = models.CharField(u'tytuł wydarzenia', max_length=255, unique=True)
slug = models.SlugField(verbose_name=_('Slug url'), unique=True)
class Meta:
verbose_name = u'kategoria'
verbose_name_plural = u'kategorie'
def __str__(self):
return self.name
@python_2_unicode_compatible
class Event(EsIndexable, models.Model):
"""
Event model represents exactly real question which you can answer YES or NO.
"""
IN_PROGRESS, CANCELLED, FINISHED_YES, FINISHED_NO = range(1, 5)
EVENT_OUTCOME_CHOICES = (
(IN_PROGRESS, u'w trakcie'),
(CANCELLED, u'anulowane'),
(FINISHED_YES, u'rozstrzygnięte na TAK'),
(FINISHED_NO, u'rozstrzygnięte na NIE'),
)
EVENT_FINISHED_TYPES = (CANCELLED, FINISHED_YES, FINISHED_NO)
BOOLEAN_OUTCOME_DICT = {
FINISHED_YES: True,
FINISHED_NO: False
}
BEGIN_PRICE = 50
FACTOR_B = 10
PRIZE_FOR_WINNING = 100
CHART_MARGIN = 3
EVENT_SMALL_CHART_DAYS = 14
EVENT_BIG_CHART_DAYS = 28
SMALL_IMAGE_WIDTH = 340
SMALL_IMAGE_HEIGHT = 250
BIG_IMAGE_WIDTH = 1250
BIG_IMAGE_HEIGHT = 510
snapshots = SnapshotAddon(fields=[
'current_buy_for_price',
'current_buy_against_price',
'current_sell_for_price',
'current_sell_against_price',
'Q_for',
'Q_against',
'B'
])
title = models.CharField(u'tytuł wydarzenia', max_length=255)
short_title = models.CharField(
verbose_name=u'tytuł promocyjny wydarzenia', max_length=255, default='', blank=True
)
description = models.TextField(u'pełny opis wydarzenia', default='')
categories = models.ManyToManyField('events.EventCategory', verbose_name=u'kategorie', blank=True)
is_featured = models.BooleanField(u'wyróżniony', default=False)
is_published = models.BooleanField(u'opublikowano', default=True)
twitter_tag = models.CharField(
verbose_name=u'tag twittera', max_length=32, null=True, blank=True, default='',
validators=[
RegexValidator(
regex=r'^([^\s]+)$',
message=u'Tag twittera nie może zawierać spacji',
code='invalid_twitter_tag'
),
]
)
title_fb_yes = models.CharField(
u'tytuł na TAK obiektu FB', max_length=255, default='', blank=True, null=True
)
title_fb_no = models.CharField(
u'tytuł na NIE obiektu FB', max_length=255, default='', blank=True, null=True
)
small_image = ProcessedImageField(
help_text=u'mały obrazek {0}x{1}'.format(SMALL_IMAGE_WIDTH, SMALL_IMAGE_HEIGHT),
upload_to='events_small',
processors=[ResizeToFill(SMALL_IMAGE_WIDTH, SMALL_IMAGE_HEIGHT)],
null=True,
blank=False,
)
big_image = ProcessedImageField(
help_text=u'duży obrazek {0}x{1}'.format(BIG_IMAGE_WIDTH, BIG_IMAGE_HEIGHT),
upload_to='events_big',
processors=[ResizeToFill(BIG_IMAGE_WIDTH, BIG_IMAGE_HEIGHT)],
null=True,
blank=False,
)
# głosowanie do rozstrzygania wydarzeń
vote_yes_count = models.PositiveIntegerField(u'głosów na tak', default=0)
vote_no_count = models.PositiveIntegerField(u'głosów na nie', default=0)
vote_cancel_count = models.PositiveIntegerField(u'głosów na anuluj', default=0)
outcome = models.PositiveIntegerField(u'rozstrzygnięcie', choices=EVENT_OUTCOME_CHOICES, default=1)
outcome_reason = models.TextField(u'uzasadnienie wyniku', default='', blank=True)
created_date = models.DateTimeField(auto_now_add=True, verbose_name=u'data utworzenia')
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=u'utworzone przez', null=True, related_name='created_by'
)
estimated_end_date = models.DateTimeField(u'przewidywana data rozstrzygnięcia', null=True, blank=False)
end_date = models.DateTimeField(u'data rozstrzygnięcia', null=True, blank=True)
current_buy_for_price = models.IntegerField(
u'cena nabycia akcji zdarzenia', default=BEGIN_PRICE
)
current_buy_against_price = models.IntegerField(
u'cena nabycia akcji zdarzenia przeciwnego', default=BEGIN_PRICE
)
current_sell_for_price = models.IntegerField(
u'cena sprzedaży akcji zdarzenia', default=BEGIN_PRICE
)
current_sell_against_price = models.IntegerField(
u'cena sprzedaży akcji zdarzenia przeciwnego', default=BEGIN_PRICE
)
last_transaction_date = models.DateTimeField(u'data ostatniej transakcji', null=True)
Q_for = models.IntegerField(u'zakładów na TAK', default=0)
Q_against = models.IntegerField(u'zakładów na NIE', default=0)
turnover = models.IntegerField(u'obrót', default=0, db_index=True)
absolute_price_change = models.IntegerField(
u'zmiana ceny (wartość absolutna)', db_index=True, default=0
)
price_change = models.IntegerField(u'zmiana ceny', default=0)
# constant for calculating event change
# probably: how you need to increment quantity, to change price
B = models.FloatField(u'stała B', default=FACTOR_B)
objects = EventManager()
tags = TaggableManager(blank=True)
class Meta:
verbose_name = 'wydarzenie'
verbose_name_plural = 'wydarzenia'
def __str__(self):
return self.title
def save(self, *args, **kwargs):
"""
Recalculate prices for event
:param kwargs:
"""
if not self.pk:
self.recalculate_prices()
super(Event, self).save(*args, **kwargs)
def get_absolute_url(self):
return 'http://%(domain)s%(url)s' % {
'domain': current_domain(),
'url': reverse('events:event_detail', kwargs={'pk': self.pk})
}
def get_relative_url(self):
return '/event/%(id)d-%(title)s' % {'id': self.id, 'title': slugify(unidecode(self.title))}
def get_absolute_facebook_object_url(self):
return 'http://%(domain)s%(url)s' % {
'domain': current_domain(),
'url': reverse('events:event_facebook_object_detail', kwargs={'event_id': self.id})
}
def get_small_embed_url(self):
return 'http://%(domain)s%(url)s' % {
'domain': current_domain(),
'url': reverse('events:event_embed_detail', kwargs={'pk': self.id})
}
@staticmethod
def autocomplete_search_fields():
return ("id__iexact", "title__icontains", "short_title__icontains")
@property
def is_in_progress(self):
return self.outcome == Event.IN_PROGRESS
@property
def publish_channel(self):
return 'event_%d' % self.id
@property
def event_dict(self):
return {
'event_id': self.id,
'buy_for_price': self.current_buy_for_price,
'buy_against_price': self.current_buy_against_price,
'sell_for_price': self.current_sell_for_price,
'sell_against_price': self.current_sell_against_price,
}
@property
def finish_date(self):
"""
If event is not finished then estimated_end_date, else end_date
:return: finish date
:rtype: datetime
"""
if self.is_in_progress:
return self.estimated_end_date
else:
return self.end_date
@property
def to_be_resolved(self):
"""
Return True if event is waiting to be resolved.
"""
return timezone.now() >= self.finish_date
def price_for_outcome(self, outcome, direction=True):
if (direction, outcome) not in Bet.BET_OUTCOMES_TO_PRICE_ATTR:
raise UnknownOutcome()
attr = Bet.BET_OUTCOMES_TO_PRICE_ATTR[(direction, outcome)]
return getattr(self, attr)
def get_event_small_chart(self):
"""
Get last transactions price for every day from small event range
:return: chart points of EVENT_SMALL_CHART_DAYS days
:rtype: {int, [], []}
"""
return self.__get_chart_points(self.EVENT_SMALL_CHART_DAYS)
def get_event_big_chart(self):
"""
Get last transactions price for every day from big event range
:return: chart points of EVENT_BIG_CHART_DAYS days
:rtype: {int, [], []}
"""
return self.__get_chart_points(self.EVENT_BIG_CHART_DAYS)
def get_JSON_small_chart(self):
return json.dumps(self.get_event_small_chart())
def get_JSON_big_chart(self):
return json.dumps(self.get_event_big_chart())
@transaction.atomic
def __get_chart_points(self, days):
"""
Get last transactions price for every day;
:param days: number of days in past on chart
:type days: int
:return: chart points
:rtype: {int, [], []}
"""
last_date = self.end_date if self.end_date else timezone.now()
first_date = max(last_date - relativedelta(days=days), self.created_date)
labels = []
points = []
snapshots = self.snapshots.filter(
snapshot_of_id=self.id,
created_at__gte=first_date,
created_at__lte=last_date,
created_at__hour=0
).order_by('created_at')
additional_points = min(days - len(snapshots), Event.CHART_MARGIN)
step_date = first_date - relativedelta(days=additional_points)
for point in range(additional_points):
labels.append(u'{0} {1}'.format(step_date.day, _(step_date.strftime('%B'))))
step_date += relativedelta(days=1)
points.append(Event.BEGIN_PRICE)
for snapshot in snapshots:
labels.append(u'{0} {1}'.format(snapshot.created_at.day, _(snapshot.created_at.strftime('%B'))))
last_price = snapshot.current_buy_for_price
points.append(last_price)
return {
'id': self.id,
'labels': labels,
'points': points
}
def get_user_bet_object(self, user):
"""
find not empty just only one bet object or None
:param user: logged user
:type user: User
:return: normally it should returns one bet where bet.has > 0
:rtype: Bet or None
"""
bets = self.bets.filter(user=user, has__gt=0).order_by('-id')
if bets.exists():
return bets[0]
def get_user_bet(self, user):
"""
get bet summary for user; user maybe anonymous.
:param user: logged user or anonymous
:type user: User
:return: data for one bet display
:rtype: {}
"""
# Using 'true' and 'false' because some keys are designed for json
bet_line = {
'is_user': False,
'has': 0,
'avgPrice': 0,
'outcome': None, # note: None is the same as False
'buyNO': 'true', # default option is buy bet
'buyYES': 'true', # default option is buy bet
'priceYES': self.current_buy_for_price,
'priceNO': self.current_buy_against_price,
}
if user.pk:
bet_line['is_user'] = True
bet = self.get_user_bet_object(user)
if bet:
bet_line['id'] = bet.pk # it is only for debugging purpose
bet_line['has'] = bet.has
bet_line['avgPrice'] = bet.bought_avg_price
bet_line['outcome'] = bet.outcome # True - YES False - NO
if bet.outcome:
# you have bet for YES, you can sell them
bet_line['buyNO'] = 'false' # that means you sell bet YES
bet_line['priceYES'] = self.current_buy_for_price
bet_line['priceNO'] = self.current_sell_for_price
bet_line['outcome_str'] = 'true'
else:
# you have bet for NO, you can sell them
bet_line['buyYES'] = 'false' # that means you sell bet NO
bet_line['priceYES'] = self.current_sell_against_price
bet_line['priceNO'] = self.current_buy_against_price
bet_line['outcome_str'] = 'false'
return bet_line
def get_bet_social(self):
"""
Get users who bought this event
:return: Dict with 4 keys: 2 QuerySet with YES users and NO users, 2
integers with counts
:rtype: dict{}
"""
response = {}
bet_social_yes = Bet.objects.filter(
event=self,
outcome=True, # bought YES
has__gt=0,
)
response['yes_count'] = bet_social_yes.count()
response['yes_bets'] = bet_social_yes[:6]
bet_social_no = Bet.objects.filter(
event=self,
outcome=False, # bought NO
has__gt=0,
)
response['no_count'] = bet_social_no.count()
response['no_bets'] = bet_social_no[:6]
return response
def increment_quantity(self, outcome, by_amount):
"""
Used when operation buy or sell occurs
:param outcome: event outcome - YES or NO; True for YES
:type outcome: bool
:param by_amount: operations count, usually 1
:type by_amount: int
:return:
"""
if outcome not in Bet.BET_OUTCOMES_TO_QUANTITY_ATTR:
raise UnknownOutcome()
attr = Bet.BET_OUTCOMES_TO_QUANTITY_ATTR[outcome]
setattr(self, attr, getattr(self, attr) + by_amount)
self.recalculate_prices()
def increment_turnover(self, by_amount):
"""
Turnover increases +1 when operation buy or sell occurs
:param by_amount: operations count, usually 1
:type by_amount: int
"""
self.turnover += by_amount
def recalculate_prices(self):
"""
Calculate 4 prices for event
"""
factor = 100.
B = self.B
Q_for = self.Q_for
Q_against = self.Q_against
Q_for_sell = max(0, Q_for - 1)
Q_against_sell = max(0, Q_against - 1)
e_for_buy = exp(Q_for / B)
e_against_buy = exp(Q_against / B)
e_for_sell = exp(Q_for_sell / B)
e_against_sell = exp(Q_against_sell / B)
buy_for_price = e_for_buy / float(e_for_buy + e_against_buy)
buy_against_price = e_against_buy / float(e_for_buy + e_against_buy)
sell_for_price = e_for_sell / float(e_for_sell + e_against_buy)
sell_against_price = e_against_sell / float(e_for_buy + e_against_sell)
self.current_buy_for_price = round(factor * buy_for_price, 0)
self.current_buy_against_price = round(factor * buy_against_price, 0)
self.current_sell_for_price = round(factor * sell_for_price, 0)
self.current_sell_against_price = round(factor * sell_against_price, 0)
def vote_yes(self):
self.vote_yes_count += 1
if self.vote_yes_count >= config.VOICES_TO_RESOLVE:
self.finish_yes()
self.save()
return self.vote_yes_count
def vote_no(self):
self.vote_no_count += 1
if self.vote_no_count >= config.VOICES_TO_RESOLVE:
self.finish_no()
self.save()
return self.vote_no_count
def vote_cancel(self):
self.vote_cancel_count += 1
if self.vote_cancel_count >= config.VOICES_TO_RESOLVE:
self.cancel()
self.save()
return self.vote_cancel_count
@transaction.atomic
def __finish(self, outcome):
"""
Set Event finish status
:param outcome: outcome status; EVENT_OUTCOME_CHOICES
:type outcome: Choices
"""
if self.outcome != self.IN_PROGRESS:
raise EventNotInProgress("Wydarzenie zostało już rozwiązane.")
self.outcome = outcome
self.end_date = timezone.now()
self.save()
@transaction.atomic
def __finish_teams_outcome(self, teams_with_bets):
team_results = []
for team in teams_with_bets:
bets = teams_with_bets[team]
team_results.append(TeamResult(
team=team,
event=self,
initial_elo=team.get_elo(),
rewarded_total=sum(bet.rewarded_total for bet in bets),
prev_result=team.get_last_result(),
bets_count=len(bets),
))
elo_match = EloMatch()
team_results = sorted(
team_results,
key=lambda x: (x.rewarded_total, x.bets_count),
reverse=True
)
prev_result = None
for result in team_results:
place = team_results.index(result) + 1
# Set draws
if (
prev_result
and (prev_result.rewarded_total, prev_result.bets_count) ==
(result.rewarded_total, result.bets_count)
):
place = next(
player.place for player in elo_match.players
if player.idx == prev_result.team.id
)
elo_match.add_player(
idx=result.team.id,
place=place,
elo=result.initial_elo,
)
prev_result = result
elo_match.calculate_elos()
for team_result in team_results:
team_result.elo = elo_match.get_elo(team_result.team.id)
team_result.save()
return team_results
@transaction.atomic
def __finish_with_outcome(self, outcome):
"""
main finish status
:param outcome: outcome status; EVENT_OUTCOME_CHOICES
:type outcome: Choices
"""
self.__finish(outcome)
teams_with_bets = defaultdict(list)
for bet in Bet.objects.filter(event=self):
if bet.outcome == self.BOOLEAN_OUTCOME_DICT[outcome]:
bet.rewarded_total = self.PRIZE_FOR_WINNING * bet.has
bet.user.total_cash += bet.rewarded_total
Transaction.objects.create(
user=bet.user,
event=self,
type=Transaction.EVENT_WON_PRIZE,
quantity=bet.has,
price=self.PRIZE_FOR_WINNING
)
if bet.user.team:
teams_with_bets[bet.user.team].append(bet)
# update portfolio value
bet.user.portfolio_value -= bet.get_invested()
bet.user.save()
# This cause display event in "latest outcome"
bet.is_new_resolved = True
bet.save()
if len(teams_with_bets) > 1:
team_results = self.__finish_teams_outcome(teams_with_bets)
for team_result in team_results:
(
Bet.objects
.get_team_bets_for_events(team_result.team, [self])
.update(team_result=team_result)
)
@transaction.atomic
def finish_yes(self):
"""
if event is finished on YES then prizes calculate
"""
self.__finish_with_outcome(self.FINISHED_YES)
@transaction.atomic
def finish_no(self):
"""
if event is finished on NO then prizes calculate
"""
self.__finish_with_outcome(self.FINISHED_NO)
@transaction.atomic
def cancel(self):
"""
refund for users on cancel event.
"""
self.__finish(self.CANCELLED)
users = {}
for t in Transaction.objects.filter(event=self).order_by('user'):
if t.user not in users:
users.update({
t.user: 0
})
if t.type in Transaction.BUY_SELL_TYPES:
# for transaction type BUY the price is below 0 that means refund should be
# other side. For BUY (buy is always -) refund should be (+) (EVENT_CANCELLED_REFUND)
# but for BUY and SELL with profit refund should be (-) (EVENT_CANCELLED_DEBIT)
users[t.user] -= t.quantity * t.price
for user, refund in users.iteritems():
if refund == 0:
continue
user.total_cash += refund
user.save()
if refund > 0:
transaction_type = Transaction.EVENT_CANCELLED_REFUND
else:
transaction_type = Transaction.EVENT_CANCELLED_DEBIT
Transaction.objects.create(
user=user,
event=self,
type=transaction_type,
price=refund
)
class TeamResult(models.Model):
"""
Result of team after event is resolved
"""
objects = TeamResultManager()
team = models.ForeignKey(
'accounts.Team', related_name='results', related_query_name='result'
)
prev_result = models.OneToOneField(
'self', on_delete=models.PROTECT, null=True
)
elo = models.IntegerField(u'ranking', null=True, blank=True)
initial_elo = models.IntegerField(u'początkowy ranking', default=1400)
rewarded_total = models.IntegerField(
u'nagroda za wynik', default=0, null=False
)
event = models.ForeignKey(
Event, related_query_name='team_result', related_name='team_results'
)
bets_count = models.PositiveIntegerField(u'liczba zakładów')
created = models.DateTimeField(
auto_now_add=True, verbose_name=u'utworzono'
)
class Meta:
verbose_name = u'rezultat drużyny'
verbose_name_plural = u'rezultaty drużyn'
class SolutionVote(models.Model):
"""
Vote for event resolve
"""
class Meta:
unique_together = ('user', 'event')
YES, NO, CANCEL = range(1, 4)
VOTE_OUTCOME_CHOICES = (
(YES, u'rozwiązanie na TAK'),
(NO, u'rozwiązanie na NIE'),
(CANCEL, u'anulowanie wydarzenia')
)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
event = models.ForeignKey(Event)
outcome = models.IntegerField(u'rozwiązanie wydarzenia', choices=VOTE_OUTCOME_CHOICES, null=True)
@python_2_unicode_compatible
class Bet(models.Model):
"""
Created when user choose YES or NO for event.
"""
class Meta:
verbose_name = u'zakład'
verbose_name_plural = u'zakłady'
YES = True
NO = False
BET_OUTCOME_CHOICES = (
(YES, u'udziały na TAK'),
(NO, u'udziały na NIE'),
)
BUY = True
SELL = False
BET_OUTCOMES_TO_PRICE_ATTR = {
(BUY, YES): 'current_buy_for_price',
(BUY, NO): 'current_buy_against_price',
(SELL, YES): 'current_sell_for_price',
(SELL, NO): 'current_sell_against_price'
}
BET_OUTCOMES_TO_QUANTITY_ATTR = {
True: 'Q_for',
False: 'Q_against'
}
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=False, related_name='bets', related_query_name='bet'
)
event = models.ForeignKey(Event, null=False, related_name='bets', related_query_name='bet')
outcome = models.BooleanField(u'zakład na TAK', choices=BET_OUTCOME_CHOICES)
# most important param: how many bets user has.
has = models.PositiveIntegerField(u'posiadane zakłady', default=0, null=False)
bought = models.PositiveIntegerField(u'kupione zakłady', default=0, null=False)
sold = models.PositiveIntegerField(u'sprzedane zakłady', default=0, null=False)
bought_avg_price = models.FloatField(u'kupione po średniej cenie', default=0, null=False)
sold_avg_price = models.FloatField(u'sprzedane po średniej cenie', default=0, null=False)
# this field is probably for the biggest rewards
rewarded_total = models.IntegerField(u'nagroda za wynik', default=0, null=False)
# this is used to show event in my wallet.
is_new_resolved = models.BooleanField(u'ostatnio rozstrzygnięte', default=False, null=False)
team_result = models.ForeignKey(
TeamResult, null=True, related_name='bets', related_query_name='bet'
)
objects = BetManager()
@property
def bet_dict(self):
"""
Dictionary with bet values
:return: bet vaules
:rtype: {}
"""
return {
'bet_id': self.id,
'event_id': self.event.id,
'user_id': self.user.id,
'outcome': self.outcome,
'has': self.has,
'bought': self.bought,
'sold': self.sold,
'bought_avg_price': self.bought_avg_price,
'sold_avg_price': self.sold_avg_price,
'rewarded_total': self.rewarded_total,
}
def __str__(self):
return u'zakłady %s na %s' % (self.user, self.event)
def current_event_price(self):
"""
Get current price for event. Price depend on bet.outcome
:return: current price
:rtype: int
"""
if self.outcome:
return self.event.current_buy_for_price
else:
return self.event.current_buy_against_price
def is_won(self):
"""
winning bet when bet has outcome True and event.outcome is 3
(FINISHED_YES) or
when bet has outcome False and event.outcome is 4 (FINISHED_NO)
:return: True if won
:rtype: bool
"""
if self.outcome and self.event.outcome == Event.FINISHED_YES:
return True
elif not self.outcome and self.event.outcome == Event.FINISHED_NO:
return True
return False
def get_wallet_change(self):
"""
Get amount won or lose after event finished. For events in progress
get amount possible to win.
:return: more or less than zero
:rtype: int
"""
# TODO: NAPRAWDE NIE WIEM
if self.is_won() or self.event.outcome == Event.IN_PROGRESS:
return self.get_won() - self.get_invested()
else:
return -self.get_invested()
def get_invested(self):
"""
How many invested in this bet
:return: price above zero
:rtype: float
"""
# TODO: NO NIE WIEM
if self.event.outcome == Event.CANCELLED:
return 0
return round(self.has * self.bought_avg_price, 0)
def get_won(self):
"""
Get amount won or possibility to win.
:return: price
:rtype: int
"""
if self.is_won() or self.event.outcome == Event.IN_PROGRESS:
return self.has * Event.PRIZE_FOR_WINNING
else:
return 0
def is_finished_yes(self):
"""
Result for bet
:return: True if event resolved for YES
:rtype: bool
"""
return self.event.outcome == Event.FINISHED_YES
def is_finished_no(self):
"""
Result for bet
:return: True if event resolved for NO
:rtype: bool
"""
return self.event.outcome == Event.FINISHED_NO
def is_cancelled(self):
"""
Result for bet
:return: True if canceled bet
:rtype: bool
"""
return self.event.outcome == Event.CANCELLED
@python_2_unicode_compatible
class Transaction(models.Model):
"""
Operation buy or sell or other for user and event
"""
class Meta:
ordering = ['-date']
verbose_name = 'transakcja'
verbose_name_plural = 'transakcje'
BUY_YES, SELL_YES, BUY_NO, SELL_NO, \
EVENT_CANCELLED_REFUND, EVENT_CANCELLED_DEBIT, \
EVENT_WON_PRIZE, TOPPED_UP, BONUS = range(1, 10)
TRANSACTION_TYPE_CHOICES = (
(BUY_YES, u'zakup udziałów na TAK'),
(SELL_YES, u'sprzedaż udziałów na TAK'),
(BUY_NO, u'zakup udziałów na NIE'),
(SELL_NO, u'sprzedaż udziałów na NIE'),
(EVENT_CANCELLED_REFUND, u'zwrot po anulowaniu wydarzenia'),
(EVENT_CANCELLED_DEBIT, u'obciążenie konta po anulowaniu wydarzenia'),
(EVENT_WON_PRIZE, u'wygrana po rozstrzygnięciu wydarzenia'),
(TOPPED_UP, u'doładowanie konta przez aplikację'),
(BONUS, u'bonus')
)
# Transactions changing event price: BUY_YES, SELL_YES, BUY_NO, SELL_NO
BUY_SELL_TYPES = (BUY_YES, SELL_YES, BUY_NO, SELL_NO)
EVENT_SOLVED_TYPES = (EVENT_CANCELLED_REFUND, EVENT_CANCELLED_DEBIT, EVENT_WON_PRIZE)
BONUS_TYPES = (TOPPED_UP, BONUS)
YES_OUTCOME = (BUY_YES, SELL_YES)
NO_OUTCOME = (BUY_NO, SELL_NO)
BUY_TYPES = (BUY_YES, BUY_NO)
SELL_TYPES = (SELL_YES, SELL_NO)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=False, related_name='transactions',
related_query_name='transaction'
)
event = models.ForeignKey(
Event, null=True, related_name='transactions', related_query_name='transaction'
)
type = models.PositiveIntegerField(
"rodzaj transakcji", choices=TRANSACTION_TYPE_CHOICES, default=1
)
date = models.DateTimeField('data', auto_now_add=True)
quantity = models.PositiveIntegerField(u'ilość', default=1)
price = models.IntegerField(u'cena jednostkowa', default=0, null=False)
objects = TransactionManager()
def __str__(self):
return u'{} przez {}'.format(self.get_type_display(), self.user)
@property
def total_cash(self):
"""
Get total price for all quantity in transaction: total won, total bought, total sold
:return: total amount
:rtype: int
"""
return self.quantity * self.price
@property
def total_wallet(self):
"""
Get total price for all quantity in transaction: total won, total bought, total sold
:return: total amount
:rtype: int
"""
return -1 * self.quantity * self.price
| gpl-2.0 | -519,453,585,463,597,000 | 32.918771 | 108 | 0.591942 | false | 3.552132 | false | false | false |
baverman/scribes-goodies | scribes_helpers/scribes/helpers/signals.py | 1 | 2297 | from gsignals import weak_connect, connect_all as gsignals_connect_all
from gsignals.signals import attach_signal_connect_info
from SCRIBES.TriggerManager import TriggerManager as CoreTriggerManager
def connect_all(obj, *managers, **external_gobjects):
for m in managers:
if isinstance(m, TriggerManager):
m.connect_triggers(obj)
else:
m.connect_signals(obj)
gsignals_connect_all(obj, **external_gobjects)
class Trigger(object):
"""
Unbounded trigger (special signal emited by keyboard shortcut)
Can be used as decorator to mark methods for feature connecting.
"""
def __init__(self, name, accelerator="", description="", category="",
error=True, removable=True):
self.name = name
self.accelerator = accelerator
self.description = description
self.category = category
self.error = error
self.removable = removable
def __call__(self, func=None, after=False, idle=False):
return attach_signal_connect_info('triggers_to_connect', self, func, after, idle)
def create(self, manager):
return manager.create_trigger(self.name, self.accelerator, self.description,
self.category, self.error, self.removable)
class TriggerManager(object):
'''
Auto disconnected trigger manager
Wraps SCRIBES.TriggerManager and calls remove_triggers on object deletion
'''
def __init__(self, editor):
self.manager = CoreTriggerManager(editor)
self.triggers = {}
def __del__(self):
self.triggers.clear()
self.manager.remove_triggers()
def connect_triggers(self, obj):
'''
Connects object methods marked by trigger decorator
'''
for attr, value in obj.__class__.__dict__.iteritems():
for trigger, connect_params in getattr(value, 'triggers_to_connect', ()):
self.connect(trigger, obj, attr, **connect_params)
def connect(self, trigger, obj, attr, after, idle):
if trigger.name not in self.triggers:
self.triggers[trigger.name] = trigger.create(self.manager)
weak_connect(self.triggers[trigger.name], 'activate', obj, attr, after=after, idle=idle)
| mit | 3,299,106,395,166,423,600 | 34.338462 | 96 | 0.641707 | false | 4.238007 | false | false | false |
stephane-martin/salt-debian-packaging | salt-2016.3.2/salt/transport/ipc.py | 1 | 25403 | # -*- coding: utf-8 -*-
'''
IPC transport classes
'''
# Import Python libs
from __future__ import absolute_import
import logging
import socket
import msgpack
import weakref
import time
# Import Tornado libs
import tornado
import tornado.gen
import tornado.netutil
import tornado.concurrent
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
# Import Salt libs
import salt.transport.client
import salt.transport.frame
log = logging.getLogger(__name__)
# 'tornado.concurrent.Future' doesn't support
# remove_done_callback() which we would have called
# in the timeout case. Due to this, we have this
# callback function outside of FutureWithTimeout.
def future_with_timeout_callback(future):
if future._future_with_timeout is not None:
future._future_with_timeout._done_callback(future)
class FutureWithTimeout(tornado.concurrent.Future):
def __init__(self, io_loop, future, timeout):
super(FutureWithTimeout, self).__init__()
self.io_loop = io_loop
self._future = future
if timeout is not None:
if timeout < 0.1:
timeout = 0.1
self._timeout_handle = self.io_loop.add_timeout(
self.io_loop.time() + timeout, self._timeout_callback)
else:
self._timeout_handle = None
if hasattr(self._future, '_future_with_timeout'):
# Reusing a future that has previously been used.
# Due to this, no need to call add_done_callback()
# because we did that before.
self._future._future_with_timeout = self
if self._future.done():
future_with_timeout_callback(self._future)
else:
self._future._future_with_timeout = self
self._future.add_done_callback(future_with_timeout_callback)
def _timeout_callback(self):
self._timeout_handle = None
# 'tornado.concurrent.Future' doesn't support
# remove_done_callback(). So we set an attribute
# inside the future itself to track what happens
# when it completes.
self._future._future_with_timeout = None
self.set_exception(tornado.ioloop.TimeoutError())
def _done_callback(self, future):
try:
if self._timeout_handle is not None:
self.io_loop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
self.set_result(future.result())
except Exception as exc:
self.set_exception(exc)
class IPCServer(object):
'''
A Tornado IPC server very similar to Tornado's TCPServer class
but using either UNIX domain sockets or TCP sockets
'''
def __init__(self, socket_path, io_loop=None, payload_handler=None):
'''
Create a new Tornado IPC server
:param str/int socket_path: Path on the filesystem for the
socket to bind to. This socket does
not need to exist prior to calling
this method, but parent directories
should.
It may also be of type 'int', in
which case it is used as the port
for a tcp localhost connection.
:param IOLoop io_loop: A Tornado ioloop to handle scheduling
:param func payload_handler: A function to customize handling of
incoming data.
'''
self.socket_path = socket_path
self._started = False
self.payload_handler = payload_handler
# Placeholders for attributes to be populated by method calls
self.sock = None
self.io_loop = io_loop or IOLoop.current()
self._closing = False
def start(self):
'''
Perform the work necessary to start up a Tornado IPC server
Blocks until socket is established
'''
# Start up the ioloop
log.trace('IPCServer: binding to socket: {0}'.format(self.socket_path))
if isinstance(self.socket_path, int):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setblocking(0)
self.sock.bind(('127.0.0.1', self.socket_path))
# Based on default used in tornado.netutil.bind_sockets()
self.sock.listen(128)
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
self._started = True
@tornado.gen.coroutine
def handle_stream(self, stream):
'''
Override this to handle the streams as they arrive
:param IOStream stream: An IOStream for processing
See http://tornado.readthedocs.org/en/latest/iostream.html#tornado.iostream.IOStream
for additional details.
'''
@tornado.gen.coroutine
def _null(msg):
raise tornado.gen.Return(None)
def write_callback(stream, header):
if header.get('mid'):
@tornado.gen.coroutine
def return_message(msg):
pack = salt.transport.frame.frame_msg(
msg,
header={'mid': header['mid']},
raw_body=True,
)
yield stream.write(pack)
return return_message
else:
return _null
unpacker = msgpack.Unpacker()
while not stream.closed():
try:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
body = framed_msg['body']
self.io_loop.spawn_callback(self.payload_handler, body, write_callback(stream, framed_msg['head']))
except tornado.iostream.StreamClosedError:
log.trace('Client disconnected from IPC {0}'.format(self.socket_path))
break
except Exception as exc:
log.error('Exception occurred while handling stream: {0}'.format(exc))
def handle_connection(self, connection, address):
log.trace('IPCServer: Handling connection to address: {0}'.format(address))
try:
stream = IOStream(
connection,
io_loop=self.io_loop,
)
self.io_loop.spawn_callback(self.handle_stream, stream)
except Exception as exc:
log.error('IPC streaming error: {0}'.format(exc))
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if self._closing:
return
self._closing = True
if hasattr(self.sock, 'close'):
self.sock.close()
def __del__(self):
self.close()
class IPCClient(object):
'''
A Tornado IPC client very similar to Tornado's TCPClient class
but using either UNIX domain sockets or TCP sockets
This was written because Tornado does not have its own IPC
server/client implementation.
:param IOLoop io_loop: A Tornado ioloop to handle scheduling
:param str/int socket_path: A path on the filesystem where a socket
belonging to a running IPCServer can be
found.
It may also be of type 'int', in which
case it is used as the port for a tcp
localhost connection.
'''
# Create singleton map between two sockets
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, socket_path, io_loop=None):
io_loop = io_loop or tornado.ioloop.IOLoop.current()
if io_loop not in IPCClient.instance_map:
IPCClient.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = IPCClient.instance_map[io_loop]
# FIXME
key = str(socket_path)
if key not in loop_instance_map:
log.debug('Initializing new IPCClient for path: {0}'.format(key))
new_client = object.__new__(cls)
# FIXME
new_client.__singleton_init__(io_loop=io_loop, socket_path=socket_path)
loop_instance_map[key] = new_client
else:
log.debug('Re-using IPCClient for {0}'.format(key))
return loop_instance_map[key]
def __singleton_init__(self, socket_path, io_loop=None):
'''
Create a new IPC client
IPC clients cannot bind to ports, but must connect to
existing IPC servers. Clients can then send messages
to the server.
'''
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self.socket_path = socket_path
self._closing = False
self.stream = None
self.unpacker = msgpack.Unpacker()
def __init__(self, socket_path, io_loop=None):
# Handled by singleton __new__
pass
def connected(self):
return self.stream is not None and not self.stream.closed()
def connect(self, callback=None, timeout=None):
'''
Connect to the IPC socket
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done(): # pylint: disable=E0203
future = self._connecting_future # pylint: disable=E0203
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect, timeout=timeout)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
return future
@tornado.gen.coroutine
def _connect(self, timeout=None):
'''
Connect to a running IPCServer
'''
if isinstance(self.socket_path, int):
sock_type = socket.AF_INET
sock_addr = ('127.0.0.1', self.socket_path)
else:
sock_type = socket.AF_UNIX
sock_addr = self.socket_path
self.stream = None
if timeout is not None:
timeout_at = time.time() + timeout
while True:
if self._closing:
break
if self.stream is None:
self.stream = IOStream(
socket.socket(sock_type, socket.SOCK_STREAM),
io_loop=self.io_loop,
)
try:
log.trace('IPCClient: Connecting to socket: {0}'.format(self.socket_path))
yield self.stream.connect(sock_addr)
self._connecting_future.set_result(True)
break
except Exception as e:
if self.stream.closed():
self.stream = None
if timeout is None or time.time() > timeout_at:
if self.stream is not None:
self.stream.close()
self.stream = None
self._connecting_future.set_exception(e)
break
yield tornado.gen.sleep(1)
def __del__(self):
self.close()
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if self._closing:
return
self._closing = True
if self.stream is not None and not self.stream.closed():
self.stream.close()
class IPCMessageClient(IPCClient):
'''
Salt IPC message client
Create an IPC client to send messages to an IPC server
An example of a very simple IPCMessageClient connecting to an IPCServer. This
example assumes an already running IPCMessage server.
IMPORTANT: The below example also assumes a running IOLoop process.
# Import Tornado libs
import tornado.ioloop
# Import Salt libs
import salt.config
import salt.transport.ipc
io_loop = tornado.ioloop.IOLoop.current()
ipc_server_socket_path = '/var/run/ipc_server.ipc'
ipc_client = salt.transport.ipc.IPCMessageClient(ipc_server_socket_path, io_loop=io_loop)
# Connect to the server
ipc_client.connect()
# Send some data
ipc_client.send('Hello world')
'''
# FIXME timeout unimplemented
# FIXME tries unimplemented
@tornado.gen.coroutine
def send(self, msg, timeout=None, tries=None):
'''
Send a message to an IPC socket
If the socket is not currently connected, a connection will be established.
:param dict msg: The message to be sent
:param int timeout: Timeout when sending message (Currently unimplemented)
'''
if not self.connected():
yield self.connect()
pack = salt.transport.frame.frame_msg(msg, raw_body=True)
yield self.stream.write(pack)
class IPCMessageServer(IPCServer):
'''
Salt IPC message server
Creates a message server which can create and bind to a socket on a given
path and then respond to messages asynchronously.
An example of a very simple IPCServer which prints received messages to
a console:
# Import Tornado libs
import tornado.ioloop
# Import Salt libs
import salt.transport.ipc
import salt.config
opts = salt.config.master_opts()
io_loop = tornado.ioloop.IOLoop.current()
ipc_server_socket_path = '/var/run/ipc_server.ipc'
ipc_server = salt.transport.ipc.IPCMessageServer(opts, io_loop=io_loop
stream_handler=print_to_console)
# Bind to the socket and prepare to run
ipc_server.start(ipc_server_socket_path)
# Start the server
io_loop.start()
# This callback is run whenever a message is received
def print_to_console(payload):
print(payload)
See IPCMessageClient() for an example of sending messages to an IPCMessageServer instance
'''
class IPCMessagePublisher(object):
'''
A Tornado IPC Publisher similar to Tornado's TCPServer class
but using either UNIX domain sockets or TCP sockets
'''
def __init__(self, opts, socket_path, io_loop=None):
'''
Create a new Tornado IPC server
:param dict opts: Salt options
:param str/int socket_path: Path on the filesystem for the
socket to bind to. This socket does
not need to exist prior to calling
this method, but parent directories
should.
It may also be of type 'int', in
which case it is used as the port
for a tcp localhost connection.
:param IOLoop io_loop: A Tornado ioloop to handle scheduling
'''
self.opts = opts
self.socket_path = socket_path
self._started = False
# Placeholders for attributes to be populated by method calls
self.sock = None
self.io_loop = io_loop or IOLoop.current()
self._closing = False
self.streams = set()
def start(self):
'''
Perform the work necessary to start up a Tornado IPC server
Blocks until socket is established
'''
# Start up the ioloop
log.trace('IPCMessagePublisher: binding to socket: {0}'.format(self.socket_path))
if isinstance(self.socket_path, int):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setblocking(0)
self.sock.bind(('127.0.0.1', self.socket_path))
# Based on default used in tornado.netutil.bind_sockets()
self.sock.listen(128)
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
self._started = True
@tornado.gen.coroutine
def _write(self, stream, pack):
try:
yield stream.write(pack)
except tornado.iostream.StreamClosedError:
log.trace('Client disconnected from IPC {0}'.format(self.socket_path))
self.streams.discard(stream)
except Exception as exc:
log.error('Exception occurred while handling stream: {0}'.format(exc))
if not stream.closed():
stream.close()
self.streams.discard(stream)
def publish(self, msg):
'''
Send message to all connected sockets
'''
if not len(self.streams):
return
pack = salt.transport.frame.frame_msg(msg, raw_body=True)
for stream in self.streams:
self.io_loop.spawn_callback(self._write, stream, pack)
def handle_connection(self, connection, address):
log.trace('IPCServer: Handling connection to address: {0}'.format(address))
try:
if self.opts['ipc_write_buffer'] > 0:
log.trace('Setting IPC connection write buffer: {0}'.format((self.opts['ipc_write_buffer'])))
stream = IOStream(
connection,
io_loop=self.io_loop,
max_write_buffer_size=self.opts['ipc_write_buffer']
)
else:
stream = IOStream(
connection,
io_loop=self.io_loop
)
self.streams.add(stream)
except Exception as exc:
log.error('IPC streaming error: {0}'.format(exc))
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if self._closing:
return
self._closing = True
for stream in self.streams:
stream.close()
self.streams.clear()
if hasattr(self.sock, 'close'):
self.sock.close()
def __del__(self):
self.close()
class IPCMessageSubscriber(IPCClient):
'''
Salt IPC message subscriber
Create an IPC client to receive messages from IPC publisher
An example of a very simple IPCMessageSubscriber connecting to an IPCMessagePublisher.
This example assumes an already running IPCMessagePublisher.
IMPORTANT: The below example also assumes the IOLoop is NOT running.
# Import Tornado libs
import tornado.ioloop
# Import Salt libs
import salt.config
import salt.transport.ipc
# Create a new IO Loop.
# We know that this new IO Loop is not currently running.
io_loop = tornado.ioloop.IOLoop()
ipc_publisher_socket_path = '/var/run/ipc_publisher.ipc'
ipc_subscriber = salt.transport.ipc.IPCMessageSubscriber(ipc_server_socket_path, io_loop=io_loop)
# Connect to the server
# Use the associated IO Loop that isn't running.
io_loop.run_sync(ipc_subscriber.connect)
# Wait for some data
package = ipc_subscriber.read_sync()
'''
def __singleton_init__(self, socket_path, io_loop=None):
super(IPCMessageSubscriber, self).__singleton_init__(
socket_path, io_loop=io_loop)
self._read_sync_future = None
self._read_stream_future = None
self._sync_ioloop_running = False
self.saved_data = []
@tornado.gen.coroutine
def _read_sync(self, timeout):
exc_to_raise = None
ret = None
try:
while True:
if self._read_stream_future is None:
self._read_stream_future = self.stream.read_bytes(4096, partial=True)
if timeout is None:
wire_bytes = yield self._read_stream_future
else:
future_with_timeout = FutureWithTimeout(
self.io_loop, self._read_stream_future, timeout)
wire_bytes = yield future_with_timeout
self._read_stream_future = None
# Remove the timeout once we get some data or an exception
# occurs. We will assume that the rest of the data is already
# there or is coming soon if an exception doesn't occur.
timeout = None
self.unpacker.feed(wire_bytes)
first = True
for framed_msg in self.unpacker:
if first:
ret = framed_msg['body']
first = False
else:
self.saved_data.append(framed_msg['body'])
if not first:
# We read at least one piece of data
break
except tornado.ioloop.TimeoutError:
# In the timeout case, just return None.
# Keep 'self._read_stream_future' alive.
ret = None
except tornado.iostream.StreamClosedError as exc:
log.trace('Subscriber disconnected from IPC {0}'.format(self.socket_path))
self._read_stream_future = None
exc_to_raise = exc
except Exception as exc:
log.error('Exception occurred in Subscriber while handling stream: {0}'.format(exc))
self._read_stream_future = None
exc_to_raise = exc
if self._sync_ioloop_running:
# Stop the IO Loop so that self.io_loop.start() will return in
# read_sync().
self.io_loop.spawn_callback(self.io_loop.stop)
if exc_to_raise is not None:
raise exc_to_raise # pylint: disable=E0702
raise tornado.gen.Return(ret)
def read_sync(self, timeout=None):
'''
Read a message from an IPC socket
The socket must already be connected.
The associated IO Loop must NOT be running.
:param int timeout: Timeout when receiving message
:return: message data if successful. None if timed out. Will raise an
exception for all other error conditions.
'''
if self.saved_data:
return self.saved_data.pop(0)
self._sync_ioloop_running = True
self._read_sync_future = self._read_sync(timeout)
self.io_loop.start()
self._sync_ioloop_running = False
ret_future = self._read_sync_future
self._read_sync_future = None
return ret_future.result()
@tornado.gen.coroutine
def _read_async(self, callback):
while not self.connected():
try:
yield self.connect()
except tornado.iostream.StreamClosedError:
log.trace('Subscriber closed stream on IPC {0} before connect'.format(self.socket_path))
except Exception as exc:
log.error('Exception occurred while Subscriber connecting: {0}'.format(exc))
while not self.stream.closed():
try:
self._read_stream_future = self.stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_stream_future
self._read_stream_future = None
self.unpacker.feed(wire_bytes)
for framed_msg in self.unpacker:
body = framed_msg['body']
self.io_loop.spawn_callback(callback, body)
except tornado.iostream.StreamClosedError:
log.trace('Subscriber disconnected from IPC {0}'.format(self.socket_path))
break
except Exception as exc:
log.error('Exception occurred while Subscriber handling stream: {0}'.format(exc))
def read_async(self, callback):
'''
Asynchronously read messages and invoke a callback when they are ready.
:param callback: A callback with the received data
'''
self.io_loop.spawn_callback(self._read_async, callback)
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if not self._closing:
IPCClient.close(self)
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
if self._read_sync_future is not None:
self._read_sync_future.exc_info()
if self._read_stream_future is not None:
self._read_stream_future.exc_info()
def __del__(self):
self.close()
| apache-2.0 | 184,623,737,381,527,700 | 34.47905 | 119 | 0.578829 | false | 4.420219 | false | false | false |
Alex-Ian-Hamilton/sunpy | sunpy/tests/setup_command.py | 1 | 4158 | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 7 19:36:08 2014
@author: Stuart Mumford
This file is designed to be imported and ran only via setup.py, hence it's
dependency on astropy_helpers which will be available in that context.
"""
from __future__ import absolute_import, division, print_function
import os
from astropy_helpers.commands.test import AstropyTest
from astropy_helpers.compat import _fix_user_options
class SunPyTest(AstropyTest):
description = 'Run the tests for this package'
user_options = [
# Package to test
('package=', 'P',
"The name of a specific package to test, e.g. 'io' or 'utils'. "
"If nothing is specified, all default tests are run."),
# Print all the things
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
# plugins to enable
('plugins=', 'p',
'Plugins to enable when running pytest.'),
# Run online tests?
('online', 'R',
'Also run tests that do require a internet connection.'),
# Run only online tests?
('online-only', None,
'Only run test that do require a internet connection.'),
# Run tests that check figure generation
('figure', None,
'Run tests that compare figures against stored hashes.'),
# Calculate test coverage
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('cov-report=', None,
'Specify the type of coverage report to generate. (Default terminal)'),
# Run tests in parallel
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If negative, all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
# Pass additional cli args to pytest
('args=', 'a',
'Additional arguments to be passed to pytest.')
]
user_options = _fix_user_options(user_options)
package_name = ''
def initialize_options(self):
self.package = ''
#self.test_path = None
self.verbose_results = False
self.plugins = None
self.args = None
self.online = False
self.online_only = False
self.figure = False
self.coverage = False
self.cov_report = 'term' if self.coverage else None
self.docs_path = os.path.abspath('doc')
self.parallel = 0
self.temp_root = None
def _validate_required_deps(self):
"""
This method checks that any required modules are installed before
running the tests.
"""
try:
import sunpy
except ImportError:
raise ImportError(
"The 'test' command requires the sunpy package to be "
"installed and importable.")
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
online = self.online
offline = not self.online_only
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.self_test('
'modulename={1.package!r}, '
'args={1.args!r}, '
'verbose={1.verbose_results!r}, '
'parallel={1.parallel!r}, '
'online={online!r}, '
'offline={offline!r}, '
'figure={figure!r}, '
'coverage={1.coverage!r}, '
'cov_report={1.cov_report!r})); '
'{cmd_post}'
'sys.exit(result)')
x = cmd.format('pass',
self,
online=online,
offline=offline,
figure=self.figure,
cmd_pre=cmd_pre,
cmd_post=cmd_post)
return x
| bsd-2-clause | 7,418,370,620,876,246,000 | 32.804878 | 80 | 0.550505 | false | 4.340292 | true | false | false |
johnlinp/telegram-good-timing-bot | goodtiming/core/bot.py | 1 | 1942 | import goodtiming.core.i18n
from goodtiming.core.parser import CompositeParser
from goodtiming.core.processor import CompositeProcessor
from goodtiming.core.renderer import CompositeRenderer
import goodtiming.core.database
import goodtiming.modules.addtodo
import goodtiming.modules.reporttiming
import goodtiming.modules.done
import goodtiming.modules.show
import goodtiming.modules.huh
class Bot:
def __init__(self, language):
self.database = goodtiming.core.database.Database()
modules = [
goodtiming.modules.addtodo.AddTodoModule(),
goodtiming.modules.reporttiming.ReportTimingModule(),
goodtiming.modules.done.DoneModule(),
goodtiming.modules.show.ShowModule(),
goodtiming.modules.huh.HuhModule(),
]
sub_parsers = []
sub_processors = []
sub_renderers = []
for module in modules:
sub_parsers.extend(module.parsers())
sub_processors.extend(module.processors())
sub_renderers.extend(module.renderers())
self.parser = CompositeParser(sub_parsers)
self.processor = CompositeProcessor(sub_processors)
self.renderer = CompositeRenderer(sub_renderers)
def start(self, doer_id):
try:
self.database.execute('INSERT INTO doer (doer_id) VALUES (%s)', (doer_id,))
except goodtiming.core.database.DatabaseUniqueViolation:
pass
return _('Welcome!\nType \"buy some socks when I am at grocery store\" or type /help to see the usage.')
def help(self):
return _('I can understand the following patterns:\n\n1. <do something> when I am <some timing>\n2. I am <some timing>\n3. The one about <something> is done')
def chat(self, message, doer_id):
request = self.parser.parse(message)
response = self.processor.process(request, doer_id)
return self.renderer.render(response)
| bsd-3-clause | -7,889,959,222,115,885,000 | 35.641509 | 166 | 0.677652 | false | 3.995885 | false | false | false |
pjdelport/django | django/contrib/auth/tests/context_processors.py | 5 | 6705 | import os
from django.conf import global_settings
from django.contrib.auth import authenticate
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.context_processors import PermWrapper, PermLookupDict
from django.db.models import Q
from django.test import TestCase
from django.test.utils import override_settings
class MockUser(object):
def has_module_perms(self, perm):
if perm == 'mockapp':
return True
return False
def has_perm(self, perm):
if perm == 'mockapp.someperm':
return True
return False
class PermWrapperTests(TestCase):
"""
Test some details of the PermWrapper implementation.
"""
class EQLimiterObject(object):
"""
This object makes sure __eq__ will not be called endlessly.
"""
def __init__(self):
self.eq_calls = 0
def __eq__(self, other):
if self.eq_calls > 0:
return True
self.eq_calls += 1
return False
def test_permwrapper_in(self):
"""
Test that 'something' in PermWrapper works as expected.
"""
perms = PermWrapper(MockUser())
# Works for modules and full permissions.
self.assertTrue('mockapp' in perms)
self.assertFalse('nonexisting' in perms)
self.assertTrue('mockapp.someperm' in perms)
self.assertFalse('mockapp.nonexisting' in perms)
def test_permlookupdict_in(self):
"""
No endless loops if accessed with 'in' - refs #18979.
"""
pldict = PermLookupDict(MockUser(), 'mockapp')
with self.assertRaises(TypeError):
self.EQLimiterObject() in pldict
@skipIfCustomUser
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
USE_TZ=False, # required for loading the fixture
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
urls = 'django.contrib.auth.tests.urls'
fixtures = ['context-processors-users.xml']
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_perm_in_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perm_in_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
query = Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
| bsd-3-clause | -3,832,255,872,866,182,000 | 38.910714 | 81 | 0.646085 | false | 4.328599 | true | false | false |
mcouthon/benes | core/matrix.py | 1 | 9583 | import math
import numpy
from numpy import linalg as LA
import sympy
from sympy.core.symbol import Dummy
from sympy.simplify.simplify import nsimplify
import calculations
import itertools
import q_vec
#import scipy
#import scipy.linalg
class matrix_factory(object):
@staticmethod
def get_probability_matrix(n, q, isSymbolic):
"""
:param n: vector size
:param q: tuple size
:param isSymbolic: determines wether caclulcation will be Symbolic or float128 precision
:return: returns a matrix instace of size (n)_q with benesh probabilities
"""
matrix_instance = matrix()
matrix_instance.n = n
matrix_instance.q = q
size = int(math.floor(math.factorial(n) / math.factorial(n-q))) # (n)_q
matrix_instance.r = size # rows
matrix_instance.c = size # cols
matrix_instance.isSymbolic = isSymbolic
matrix_instance.matrix_type = 'BENESH'
if (isSymbolic == True): # choose matrix type
matrix_instance.m=sympy.Matrix(numpy.zeros([matrix_instance.r,matrix_instance.c]))
else:
matrix_instance.m=numpy.zeros([matrix_instance.r,matrix_instance.c],dtype=numpy.float64)
matrix_instance.indicesToVectors = []
matrix_instance.vectorsToIndices = {}
i = 0 # build map vector <-> matrix index
for v in itertools.permutations(range(n), q):
matrix_instance.indicesToVectors.append(v)
matrix_instance.vectorsToIndices[v] = i
i = i + 1
for i in range(0, matrix_instance.r): # init matrix with base values
alpha = matrix_instance.indicesToVectors[i]
for j in range(0, matrix_instance.c):
beta = matrix_instance.indicesToVectors[j]
matrix_instance.m[i, j] = calculations.calculate_benes(alpha, beta, n)
return matrix_instance
@staticmethod
def get_probability_disk_matrix(n, q, isSymbolic):
"""
using disk memory and not RAM memory.
:param n: vector size
:param q: tuple size
:param isSymbolic: determines wether caclulcation will be Symbolic or float128 precision
:return: returns a matrix instace of size (n)_q with benesh probabilities
"""
import h5py
matrix_instance = matrix()
matrix_instance.n = n
matrix_instance.q = q
size = int(math.floor(math.factorial(n) / math.factorial(n-q))) # (n)_q
matrix_instance.r = size # rows
matrix_instance.c = size # cols
matrix_instance.isSymbolic = isSymbolic
matrix_instance.matrix_type = 'BENESH'
if (isSymbolic == True): # choose matrix type
matrix_instance.m=sympy.Matrix(numpy.zeros([matrix_instance.r,matrix_instance.c]))
else:
f = h5py.File("/tmp/mytestfile.hdf5", "w")
matrix_instance.f = f
matrix_instance.m = f.create_dataset("mydataset",
(matrix_instance.r,matrix_instance.c),
dtype=numpy.float64)
# numpy.zeros([matrix_instance.r,matrix_instance.c],dtype=numpy.float64)
matrix_instance.indicesToVectors = []
matrix_instance.vectorsToIndices = {}
i = 0 # build map vector <-> matrix index
for v in itertools.permutations(range(n), q):
matrix_instance.indicesToVectors.append(v)
matrix_instance.vectorsToIndices[v] = i
i = i + 1
for i in range(0, matrix_instance.r): # init matrix with base values
alpha = matrix_instance.indicesToVectors[i]
for j in range(0, matrix_instance.c):
beta = matrix_instance.indicesToVectors[j]
matrix_instance.m[i, j] = calculations.calculate_benes(alpha, beta, n)
return matrix_instance
@staticmethod
def get_reduced_matrix(n, q, isSymbolic):
qv = q_vec.q_vec(n, q)
columns = qv.build_reduced_matrix()
matrix_instance = matrix()
matrix_instance.n = n
matrix_instance.q = q
if (isSymbolic == True): # choose matrix type
matrix_instance.m=sympy.Matrix(numpy.matrix(columns))
else:
matrix_instance.m=numpy.matrix(columns)
#matrix_instance.m = numpy.matrix(columns)
size = int(math.floor(math.factorial(n) / math.factorial(n-q))) # (n)_q
matrix_instance.r = len(columns) # rows
matrix_instance.c = len(columns) # cols
matrix_instance.isSymbolic = isSymbolic
matrix_instance.matrix_type = 'REDUCED'
return matrix_instance
class matrix(object):
"""
matrix class, wrapper for linear algebra calculations in the project
"""
def __init__(self):
return
def get_size(self):
return self.r
def get_symbol_by_index(self,i):
return self.indicesToVectors[i]
def get_probability_for_symbols(self, t1, t2):
"""
return the probability to move from symbol (q-tuple or type) , from the matrix
:param t1: symbolic tuple (q-tuple or type)
:param t2:
:return:
"""
if (self.matrix_type == 'BENESH'):
i = self.vectorsToIndices[t1]
j = self.vectorsToIndices[t2]
elif (self.matrix_type == 'REDUCED'):
i = 0;
j = 0;
return self.m[i,j]
def get_eigenvalues(self):
"""
returns the eigenvalues of the matrix,
using the appropriate libraries, based on the symbolism
:return:
"""
if (self.isSymbolic == True):
w = self.m.eigenvals()
else:
w,v = LA.eigh(self.m)
#w,v = scipy.linalg.eig(self.m)
return w;
def get_diagonal(self):
"""
returns the diagonal form of the matrix
:return:
"""
if (self.isSymbolic == True):
P, D = self.m.diagonalize();
return D
else:
w, v = LA.eigh(self.m)
P = numpy.matrix(v)
D = numpy.transpose(P) * self.m * P
return D
def getMatrixPower(self, p, compute_diagonal=True):
"""
Diagonlizes the matrix, and exponentiates it efficiently.
returns the matrix p-th power.
:param p:
:return:
"""
if compute_diagonal:
if (self.isSymbolic == False):
w, v = LA.eigh(self.m)
P = numpy.matrix(v)
D = numpy.transpose(P) * self.m * P
for i in range (0,self.r):
D[i,i]=pow(D[i,i],p)
D = P * D * numpy.transpose(P)
return D
else:
P, D = self.m.diagonalize();
for i in range (0,self.r):
D[i,i]=pow(D[i,i],p)
D = P * D * P^(-1)
return D
else:
return self.m^p
def get_eigenvalue_set(self):
"""
returns a set of eigenvalues for the matrix
:return:
"""
return set(self.get_eigenvalues())
def get_round_eigevalue_set(self):
"""
returns a set of rounded (decimal precsion) eigenvalues
:return:
:return:
"""
if (self.isSymbolic == True):
return self.get_eigenvalues()
else:
return set(numpy.round(self.get_eigenvalues(), 4))
"""
Benesh probabilities utils
"""
@staticmethod
def fromBaseN(n,t):
"""
:param n: - the base
:param t: - tuple representin coordinates in base n
:return: - decimal number
"""
sum = 0
p = len(t) - 1
for i in t:
sum += i*(pow(n,p))
p = p - 1
return sum
@staticmethod
def toBaseN(n,q,d):
"""
:param n: base we work in
:param q: number of digits in the vector
:param d: decimal number to move to new base as tuple
:return:
"""
l = [0]*(q)
for i in range(0,q):
l[i] = int(d%n)
d=math.floor(d/n)
l.reverse()
return tuple(l)
def custom_charpoly(self, **flags):
"""
custom charpoly
"""
if (self.isSymbolic == True):
self.m = self.m._new(self.m.rows, self.m.cols,[nsimplify(v, rational=True) for v in self.m])
max_denom = 0;
for i in range (0,self.m.rows):
for j in range (0,self.m.cols):
if self.m[i,j] > max_denom:
max_denom = self.m[i,j].q
print max_denom
self.m *= max_denom
flags.pop('simplify', None) # pop unsupported flag
return self.m.berkowitz_charpoly(Dummy('x'))
else:
numpy.rint(self.m)
return numpy.rint(numpy.poly(self.m))
| gpl-3.0 | -6,711,100,729,631,826,000 | 33.471223 | 110 | 0.514244 | false | 4.033249 | false | false | false |
sidnarayanan/BAdNet | train/images/utils.py | 1 | 3654 | import numpy as np
# import seaborn
from collections import namedtuple
from keras import backend as K
from keras.engine.topology import Layer
from scipy.interpolate import interp1d
## Loss functions
dice_smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + dice_smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + dice_smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
## Layers and ops
## plotting tools
# class H1:
# '''Wrapper around numpy histogram
# '''
# def __init__(self,hist):
# self.bin_edges = hist[1]
# self.n_bins = self.bin_edges.shape[0]-1
# self.content = hist[0]
# def find_bin(self,x):
# if x < self.bin_edges[0]:
# return -1
# for ib in self.xrange(self.n_bins):
# if x>= self.bin_edges[ib]:
# return ib
# return self.n_bins
# def get_bin(self,ib):
# if ib<0 or ib>=self.n_bins:
# return 0
# return self.content[ib]
# def integral(self,lo=None,hi=None):
# if not lo:
# lo = 0
# if not hi:
# hi = self.n_bins
# widths = np.diff(self.bin_edges[lo:hi+1])
# return np.sum(self.content[lo:hi] * widths)
#
#
# def plot_hists(props, hists):
# plt.clf()
# bins = props['bins']
# for h in hists:
# plt.hist(h['vals'], bins=bins, weights=h['weights']/np.sum(h['weights']),
# histtype='step', # fill=False,
# color=h['color'], label=h['label'])
# if 'xlabel' in props:
# plt.xlabel(props['xlabel'])
# if 'ylabel' in props:
# plt.ylabel(props['ylabel'])
# plt.legend(loc=0)
# plt.savefig(props['output']+'.png',bbox_inches='tight',dpi=300)
# plt.savefig(props['output']+'.pdf',bbox_inches='tight')
#
#
#
# Tagger = namedtuple('Tagger',['response','name','lo','hi','flip'])
#
# def create_roc(taggers, labels, weights, output, nbins=50):
# colors = ['k','r','g','b']
# plt.clf()
# wps = []
# for t in taggers:
# color = colors[0]
# del colors[0]
# h_sig = H1(np.histogram(t.response[labels==1],
# weights=weights[labels==1],
# bins=nbins,range=(t.lo,t.hi),
# density=True))
# h_bkg = H1(np.histogram(t.response[labels==0],
# weights=weights[labels==0],
# bins=nbins,range=(t.lo,t.hi),
# density=True))
#
# epsilons_sig = []
# epsilons_bkg = []
# for ib in xrange(nbins):
# if t.flip:
# esig = h_sig.integral(hi=ib)
# ebkg = h_bkg.integral(hi=ib)
# else:
# esig = h_sig.integral(lo=ib)
# ebkg = h_bkg.integral(lo=ib)
# epsilons_sig.append(esig)
# epsilons_bkg.append(ebkg)
#
# interp = interp1d(epsilons_bkg,
# np.arange(t.lo,t.hi,float(t.hi-t.lo)/nbins))
# wps.append(interp(0.05))
#
# plt.plot(epsilons_sig, epsilons_bkg, color+'-',label=t.name)
# plt.axis([0,1,0.001,1])
# plt.yscale('log')
# plt.legend(loc=0)
# plt.ylabel('Background fake rate')
# plt.xlabel('Signal efficiency')
# plt.savefig(output+'.png',bbox_inches='tight',dpi=300)
# plt.savefig(output+'.pdf',bbox_inches='tight')
#
# return wps
| mit | 6,423,772,911,152,429,000 | 31.336283 | 96 | 0.511768 | false | 2.886256 | false | false | false |
nicolashainaux/mathmaker | mathmaker/lib/document/content/geometry/intercept_theorem_butterfly.py | 1 | 10506 | # -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import random
from mathmaker.lib import shared
from mathmaker.lib.tools.wording import setup_wording_format_of
from mathmaker.lib.core.root_calculus import Value
from mathmaker.lib.core.base_calculus import Item
from mathmaker.lib.document.content import component
ALL_LENGTHS_TO_CALCULATE = ['oneside', 'twosides']
class sub_object(component.structure):
def __init__(self, build_data, picture='true', **options):
super().setup("minimal", **options)
if build_data[0] < 11:
raise ValueError('build_data[0] == {} whereas it should be '
'>= 11'.format(str(build_data[0])))
build_data = (build_data[0] / 10, ) + build_data[1:]
super().setup("numbers", nb=build_data,
shuffle_nbs=False, **options)
super().setup("length_units", **options)
super().setup("intercept_theorem_figure", butterfly=True, **options)
if self.variant == 'default':
variant = ['random', 'random']
else:
if self.variant.count('_') != 1:
raise ValueError('XMLFileFormatError: the variant for '
'intercept_theorem_butterfly '
'shoud contain one _')
variant = self.variant.split(sep='_')
valid_variant = [['random', 'oneside', 'twosides'],
['random', 'all', 'twocouples']]
for v, valid, n in zip(variant, valid_variant,
['first', 'second', 'third']):
if v not in valid:
raise ValueError('XMLFileFormatError: Invalid {} part of the '
'variant. It should be in: {}'
.format(n, str(valid)))
if variant[0] == 'random':
if variant[1] == 'twocouples':
variant[0] = 'oneside'
else:
variant[0] = random.choice(['oneside', 'twosides'])
if variant[1] == 'random':
if variant[0] == 'twosides':
variant[1] = 'twocouples'
else:
variant[1] == random.choice(['all', 'twocouples'])
if variant == ['twosides', 'twocouples']:
raise ValueError('XMLFileFormatError: The twosides_twocouples '
'variant is impossible.')
# The order is:
# small[0] small[1] small[2] side[0] side[1] side[2]
labels_configurations = {
'oneside_all': [
['?', True, True, True, True, True],
[True, '?', True, True, True, True],
[True, True, '?', True, True, True],
[True, True, True, '?', True, True],
[True, True, True, True, '?', True],
[True, True, True, True, True, '?']
],
'oneside_twocouples': [
['?', True, False, True, True, False],
[False, True, '?', False, True, True],
[True, True, False, True, '?', False],
[False, True, True, False, '?', True],
['?', False, True, True, False, True],
[True, False, '?', True, False, True],
[True, '?', False, True, True, False],
[False, '?', True, False, True, True],
[False, True, True, False, True, '?'],
[True, True, False, '?', True, False],
[True, False, True, True, False, '?'],
[True, False, True, '?', False, True],
],
'twosides_all': [
['?', '?', True, True, True, True],
['?', True, '?', True, True, True],
[True, '?', '?', True, True, True],
['?', True, True, True, '?', True],
['?', True, True, True, True, '?'],
[True, '?', True, '?', True, True],
[True, '?', True, True, True, '?'],
[True, True, '?', True, '?', True],
[True, True, '?', '?', True, True],
[True, True, True, '?', '?', True],
[True, True, True, '?', True, '?'],
[True, True, True, True, '?', '?'],
]
}
variant_key = '_'.join(variant)
labels_conf = random.choice(labels_configurations[variant_key])
self.figure.setup_labels(labels_conf,
segments_list=self.figure.small
+ self.figure.side)
lengths_to_calculate = [s.length_name
for s in self.figure.small + self.figure.side
if s.label == Value('?')]
self.line1 = self.figure.small[1].length_name
self.line2 = self.figure.side[1].length_name
self.length1_name = lengths_to_calculate[0]
if len(lengths_to_calculate) == 2:
self.length2_name = lengths_to_calculate[1]
if len(lengths_to_calculate) == 1:
self.wording = _('The drawn figure is out of shape. {newline} '
'The lengths are given in {length_unit}. '
'{newline} '
'The {line1} is parallel to {line2}. {newline} '
'{newline} '
'Determine the length of {length1_name}.')
else:
self.wording = _('The drawn figure is out of shape. {newline} '
'The lengths are given in {length_unit}. '
'{newline} '
'The {line1} is parallel to {line2}. {newline} '
'{newline} '
'Determine the lengths of {length1_name} '
'and {length2_name}.')
setup_wording_format_of(self)
self.ratios = shared.machine.write_math_style1(
self.figure.ratios_equalities().into_str())
self.ratios_substituted = shared.machine.write_math_style1(
self.figure.ratios_equalities_substituted().into_str())
self.resolution0 = self.figure.ratios_equalities_substituted()\
.into_crossproduct_equation(Item(lengths_to_calculate[0]))\
.auto_resolution(dont_display_equations_name=True,
skip_first_step=True,
skip_fraction_simplification=True,
decimal_result=2,
unit=self.length_unit,
underline_result=True)
lengths_resolutions_part = _('hence: {resolution0} ')
if len(lengths_to_calculate) == 2:
self.resolution1 = self.figure.ratios_equalities_substituted()\
.into_crossproduct_equation(Item(lengths_to_calculate[1]))\
.auto_resolution(dont_display_equations_name=True,
skip_first_step=True,
skip_fraction_simplification=True,
decimal_result=2,
unit=self.length_unit,
underline_result=True)
lengths_resolutions_part = shared.machine.write(
lengths_resolutions_part + _('and: {resolution1} '),
multicolumns=2)
ans_variant = options.get('ans_variant', 'default')
ans_texts = {
'default': _('As: {line1} {parallel_to} {line2}, '
'{main_vertex_name} {belongs_to} {chunk0_length_name}'
' and '
'{main_vertex_name} {belongs_to} {chunk1_length_name}'
', then by the intercept theorem: {newline} '
'{ratios} '
'thus: {ratios_substituted} '),
'alternative1': _('As {line1} is parallel to {line2}, '
'and as the line {chunk0_length_name} cuts '
'the line {chunk1_length_name} at point '
'{main_vertex_name}, '
'then by the intercept theorem: {newline} '
'{ratios} '
'thus: {ratios_substituted} '),
'alternative2': _('As: {line1} is parallel to {line2}, '
'and as {point0_name}, {main_vertex_name} and '
'{vertex1_name} on one hand, '
'{point1_name}, {main_vertex_name} and '
'{vertex2_name} on the other hand,'
'are aligned in the same order, '
'then by the intercept theorem: {newline} '
'{ratios} '
'thus: {ratios_substituted} ')
}
self.answer_wording = ans_texts[ans_variant] + lengths_resolutions_part
setup_wording_format_of(self, w_prefix='answer_')
def q(self, **options):
return shared.machine.write_layout(
(1, 2),
[10, 10],
[self.wording.format(**self.wording_format),
shared.machine.insert_picture(self.figure,
scale=0.7,
top_aligned_in_a_tabular=True)])
def a(self, **options):
return self.answer_wording.format(**self.answer_wording_format)
# TODO: create the "js" answer (for interactive pdf)
# def js_a(self, **kwargs):
# return [self......jsprinted]
| gpl-3.0 | 337,846,521,828,428,540 | 45.078947 | 79 | 0.488007 | false | 4.199041 | false | false | false |
ireapps/coding-for-journalists | 6_from_apis/completed/fun_with_sqlite_done.py | 1 | 1985 | # SQLite is a lightweight database manager that's part of Python's standard
# library, so it's a good example of how to hook a script up to a database.
# If you work in MySQL or Postgres, there are libraries you can use to make
# a connection and gain similar functionality.
import sqlite3
# Connect to a test database; if one doesn't exist, it will be created on
# the fly. We also fire up a cursor to poke, prod and manipulate our
# database.
conn = sqlite3.connect('my_test.sqlite')
c = conn.cursor()
# Right now it's an empty database with no tables and no data. Let's create
# basic one that holds some CEO information.
c.execute(
'CREATE TABLE ceos '
'(ceo_name text, company text, salary int)')
# NOTE: with scripts, somestimes it's a good idea to preface a CREATE
# TABLE query with IF NOT EXISTS, that way you won't get an operational
# error.
# Let's insert three CEO names, companies and salaries into our ceos table.
c.execute(
"INSERT INTO ceos "
"VALUES ('John Smith', 'Acme, Inc.', '275000'), "
"('Libby Rogers', 'AstroTech', '1200000'), "
"('Darla Jones', 'Ballard Partners', '942000')")
# When we alter a table, we have to commit those changes.
conn.commit()
# Let's run a quick query that gives us everything in the table.
c.execute(
"SELECT * FROM ceos")
# The database has run the query and gives it back to use as a list of tuples
# for each row. We have to fetch this information.
result = c.fetchall()
print result
# Try fetchall() again; it should be empty and will be until we run another
# query.
c.fetchall()
# Let's try another basic query: a sum of the salaries.
c.execute(
"SELECT SUM(salary) FROM ceos")
result2 = c.fetchall()
print result2
# One more: companies that start with 'A,' sorted in descending order by
# salary
c.execute(
"SELECT * FROM ceos "
"WHERE company LIKE 'A%' "
"ORDER BY salary DESC")
result3 = c.fetchall()
print result3
| mit | 9,214,594,952,591,105,000 | 32.083333 | 77 | 0.689673 | false | 3.550984 | false | false | false |
bearing/dosenet-analysis | D3S_analysis/radon_variation_analysis.py | 1 | 19367 | import importlib
import io
import os
import csv
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import matplotlib.dates as mdates
from dateutil.parser import parse
from datetime import datetime
from datetime import timedelta
# Python 2 and 3: easiest option
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import pytz
from matplotlib.backends.backend_pdf import PdfPages
import weather_data_tools as weather
importlib.reload(weather)
import spectra_fitting_tools as fitter
importlib.reload(fitter)
#--------------------------------------------------------------------------#
# Process input data
#--------------------------------------------------------------------------#
def make_int(lst):
'''
Makes all entries of a list an integer
'''
y = []
for i in lst:
y.append(int(i))
return y
def make_array(lst):
'''
Makes list into an array. Also splices out the irrelevant stuff
for a spectra
'''
y = np.asarray(make_int(lst[12:]))
return y
def get_times(rows, n, tstart, tstop):
'''
Get list of times for data: determines time as the midpoint between the upper and lower bounds in the integration window
Arguments:
- full list of inputs from data csv
- number of hours to integrate for each data point
- start/stop dates
Returns:
- list of times
'''
ndays = (tstop - tstart).days
entries = 12*n
nintervals = (24/n)
i = 0
counter = 0
times = []
while counter < ndays*nintervals:
integration = rows[(i*entries)+1:((i+1)*entries)+1]
i+=1
time_range = []
datatz = parse(integration[-1][1]).tzinfo
if (parse(integration[-1][1])<tstop.replace(tzinfo=datatz)) and \
(parse(integration[0][1])>tstart.replace(tzinfo=datatz)):
for j in integration:
time_range.append(parse(j[1]))
times.append(time_range[int(len(time_range)/2)])
counter+=1
return times
def get_arrays(values_w_errs):
vals = np.asarray([i[0] for i in values_w_errs])
errs = np.asarray([i[1] for i in values_w_errs])
return vals,errs
def varify_data(means,sigmas,amps):
# check for bad fits and use average of surrounding good fits
for i in range(len(means)):
if means[i][1] > 100 or math.isnan(means[i][0]):
print('Fit {} is bad!'.format(i))
j = 1
k = 1
if i<(len(means)-j):
while means[i+j][1] > 100:
j += 1
print('Trying {}+{} out of {}'.format(i,j,len(means)))
if i >= (len(means)-j):
print('Abort!')
break
if i>k:
while means[i-k][1] > 100 or math.isnan(means[i-k][0]):
k += 1
if i<k:
break
if i>k and i<(len(means)-j):
print('Averaging over {} and {}'.format(i-k,i+j))
means[i][0] = (means[i+j][0]+means[i-k][0])/2.0
means[i][1] = (means[i+j][1]+means[i-k][1])/2.0
sigmas[i][0] = (sigmas[i+j][0]+sigmas[i-k][0])/2.0
sigmas[i][1] = (sigmas[i+j][1]+sigmas[i-k][1])/2.0
amps[i][0] = (amps[i+j][0]+amps[i-k][0])/2.0
amps[i][1] = (amps[i+j][1]+amps[i-k][1])/2.0
elif i<k and i<(len(means)-j):
print('Using {}'.format(i+j))
means[i][0] = means[i+j][0]
means[i][1] = means[i+j][1]
sigmas[i][0] = sigmas[i+j][0]
sigmas[i][1] = sigmas[i+j][1]
amps[i][0] = amps[i+j][0]
amps[i][1] = amps[i+j][1]
elif i>k and i>=(len(means)-j):
print('Using {}'.format(i-k))
means[i][0] = means[i-k][0]
means[i][1] = means[i-k][1]
sigmas[i][0] = sigmas[i-k][0]
sigmas[i][1] = sigmas[i-k][1]
amps[i][0] = amps[i-k][0]
amps[i][1] = amps[i-k][1]
else:
print('Nothing makes sense')
return means,sigmas,amps
def find_time_match(times,time,delta):
first = 0
last = len(times)-1
found = False
index = -1
if not time.tzinfo:
time = time.replace(tzinfo=times[0].tzinfo)
while first<=last and not found:
midpoint = int((first + last)/2)
list_time = times[midpoint]
if not list_time.tzinfo:
list_time = list_time.replace(tzinfo=time.tzinfo)
if abs(list_time-time) < delta :
index = midpoint
found = True
else:
if time < list_time:
last = midpoint-1
else:
first = midpoint+1
return index
def SelectDataTimeRange(start_time,stop_time,data,times):
dataarray = np.array(data)
timesarray = np.array(times)
indices = np.where((timesarray>=start_time)&(timesarray<=stop_times))
subdata = dataarray[indices]
subdatatimes = timesarray[indices]
return subdata, subdatatimes
def merge_data(times1,data1,times2,data2):
merged_data1 = []
merged_data2 = []
merged_times = []
for i in range(len(times1)):
time_index = find_time_match(times2,times1[i],timedelta(minutes=30))
if time_index >= 0:
merged_data1.append(data1[i])
merged_data2.append(data2[time_index])
merged_times.append(times1[i])
return merged_times,merged_data1,merged_data2
def inTimeRange(time_string,tstart,tstop):
time = tstart - timedelta(minutes=1)
if isinstance(time_string, str):
try:
time = parse(time_string)
except:
print('{} Not a time!'.format(time_string))
return False
elif isinstance(time_string, datetime):
time = time_string
# check that tzinfo is set for tz aware comparisons
if tstart.tzinfo==None:
tstart = tstart.replace(tzinfo=time.tzinfo)
if tstop.tzinfo==None:
tstop = tstop.replace(tzinfo=time.tzinfo)
#print('Checking {} > {} and < {} = {}'.format(time,tstart,tstop,(time > tstart and time < tstop)))
return (time > tstart and time < tstop)
def get_spectra(rows, nhours, tstart, tstop):
datatz = rows[-1][1].tzinfo
date_itr = tstart
times = []
spectra = []
counter = 0
# break data up into days to speed up range selection
while date_itr < tstop:
next_day = date_itr+timedelta(days=1)
daily_row = [row for row in rows if \
inTimeRange(row[1],date_itr,next_day)]
time_itr = date_itr
date_itr = next_day
while time_itr < date_itr:
time_next = time_itr+timedelta(hours=nhours)
integration = [row for row in rows if \
inTimeRange(row[1],time_itr,time_next)]
time_itr = time_next
if len(integration)==0:
continue
array_lst = []
for j in integration:
array_lst.append(make_array(j))
integrated = sum(array_lst)
spectra.append(integrated)
times.append(integration[int(len(integration)/2)][1])
return times, spectra
def get_calibrations(spectra, fit_function, fit_args):
counter = 0
calibrations = []
calibration_errs = []
energy_spectra = []
last_calib = 2.5 # default calibration
last_err = 0
for spectrum in spectra:
mean,simga,amp = fit_function(spectrum,counter,*fit_args)
calib = (1460)/(mean[0])
calib_err = (1460)/(mean[0])**2*np.sqrt(mean[1]**2)
if calib < 0 or calib > 10 or math.isnan(calib):
print('invalid calibration {}, using {}'.format(calib,last_calib))
calib = last_calib
calib_err = last_err
else:
last_calib = calib
last_err = calib_err
calibrations.append(calib)
calibration_errs.append(calib_err)
energy_spectrum = np.array(spectrum)*calib
energy_spectra.append(energy_spectrum)
counter += 1
return calibrations, calibration_errs
def calibrate_spectra(spectra, calibrations, times, nsum):
E_spectra = []
bin_times = []
spectra_sum = []
itimes = []
isum = 0
for i in range(len(spectra)):
# list of energies = channel number * calibration (assume linear)
energies = np.array(range(len(spectra[i])))*calibrations[i]
print(energies)
spectrum = np.zeros(600)
for j in range(len(spectra[i])):
count = spectra[i][j]
# energy bin width = 5keV
index = int(energies[j]/5)
spectrum[index] += count
if isum < nsum:
spectra_sum.append(spectrum)
itimes.append(times[i])
isum += 1
else:
E_spectra.append(sum(spectra_sum))
bin_times.append(itimes[int(len(itimes)/2)])
itimes = []
spectra_sum = []
isum = 0
return E_spectra, bin_times
def get_peak_fits(spectra, fit_function, fit_args):
means = []
sigmas = []
amps = []
counter = 0
for spectrum in spectra:
mean,sigma,amp = fit_function(spectrum,counter,*fit_args)
means.append(mean)
sigmas.append(sigma)
amps.append(amp)
counter += 1
means,sigmas,amps = varify_data(means,sigmas,amps)
return means,sigmas,amps
def get_peaks(rows, nhours, tstart, tstop, fit_function, fit_args):
'''
Applies double gaussian + expo fits to all data over some range of time
Arguments:
- full list of csv data input rows
- number of hours to integrate each calculation over
- start/stop times to run over
- peak fitting method
- arguments to be fed to the peak fitting method
Returns:
- lists of means,sigmas,amps from all gaussian fits
- each entry in list includes the value and uncertainty
'''
datatz = rows[-1][1].tzinfo
date_itr = tstart
times = []
means = []
sigmas = []
amps = []
counter = 0
# break data up into days to speed up range selection
while date_itr < tstop:
next_day = date_itr+timedelta(days=1)
daily_row = [row for row in rows if \
inTimeRange(row[1],date_itr,next_day)]
time_itr = date_itr
date_itr = next_day
while time_itr < date_itr:
time_next = time_itr+timedelta(hours=nhours)
integration = [row for row in rows if \
inTimeRange(row[1],time_itr,time_next)]
time_itr = time_next
if len(integration)==0:
continue
array_lst = []
for j in integration:
array_lst.append(make_array(j))
integrated = sum(array_lst)
mean,sigma,amp = fit_function(integrated,counter,*fit_args)
counter += 1
means.append(mean)
sigmas.append(sigma)
amps.append(amp)
times.append(integration[int(len(integration)/2)][1])
means,sigmas,amps = varify_data(means,sigmas,amps)
return times,means,sigmas,amps
def get_weather_data(location,nhours,start_day,stop_day):
tstart = parse(start_day)
tstop = parse(stop_day)
date_itr = tstart
times = []
temps = []
while date_itr < tstop:
data = weather.weather_station_data_scrape(location, date_itr)
time_itr = date_itr
date_itr = date_itr+timedelta(days=1)
if not data:
print('No weather data for {}'.format(date_itr))
while time_itr < date_itr:
time_next = time_itr+timedelta(hours=nhours)
integration = [row for row in data if \
inTimeRange(row[0],time_itr,time_next)]
time_itr = time_next
if len(integration)==0:
continue
times.append(integration[int(len(integration)/2)][0])
temps.append(np.mean(np.asarray([x[1] for x in integration])))
return times,temps
def cut_outliers(array):
mean, sigma = get_stats(array)
for i in range(len(array)):
if (array[i]>mean+5*sigma) or (array[i]<mean-5*sigma):
if i > 0 and i < len(array)-1:
array[i] = (array[i-1] + array[i+1])/2
elif i==0:
if (array[i+1]<mean+5*simga) and (array[i+1]>mean-5*simga):
array[i] = array[i+1]
else:
array[i] = mean
elif i==len(array)-1:
array[i] = array[i-1]
return array
def get_stats(array):
return np.mean(array), np.sqrt(np.var(array))
def make_plot(points,data,errs,xlbl,ylbl,tstr,style,clr,ymin=0,ymax=0):
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title(tstr)
plt.xlabel(xlbl)
plt.ylabel(ylbl)
if ymin and ymax:
plt.ylim(ymin,ymax)
ax.plot(points,data,style)
ax.errorbar(points,data,yerr=errs,fmt=style,ecolor=clr)
fig.autofmt_xdate()
def import_csv(url,start,stop):
print(url)
response = urlopen(url)
reader = csv.reader(io.TextIOWrapper(response))
rows = [row for row in reader if \
inTimeRange(row[1],parse(start),parse(stop))]
print('extracted {} entries from data url'.format(len(rows)))
# remove meta data
return rows
def select_data(rows,start_day,stop_day):
tstart = parse(start_day)
tstop = parse(stop_day)
for row in rows:
if isinstance(row[1], str):
row[1] = parse(row[1])
rows = [row for row in rows if \
inTimeRange(row[1],tstart,tstop)]
times, spectra = get_spectra(rows,1,tstart,tstop)
return times,spectra
def main(times,spectra,nhours,stationID=0,wtimes=[],temps=[]):
#---------------------------------------------------------------------#
# Get fit results for ndays integrating over nhours for Potassium
#---------------------------------------------------------------------#
# single_peak_fit args: channel lims, expo offset, plot flag
#args = [210,310,100,False]
#args = [180,280,100,True]
args = [360,780,7.0,100,False,'K']
calibs,calib_err = get_calibrations(spectra, fitter.single_peak_fit,args)
E_spectra, bin_times = calibrate_spectra(spectra,calibs,times,nhours)
args = [180,390,7.0,100,False,'K']
K_peaks, K_sigmas, K_amps = get_peak_fits(E_spectra, \
fitter.single_peak_fit,args)
#-------------------------------------------------------------------------#
# Varify and break apart mean,sigma,amp values and uncertainties
#-------------------------------------------------------------------------#
K_ch, K_ch_errs = get_arrays(K_peaks)
K_sig = [i[0] for i in K_sigmas]
K_A = [i[0] for i in K_amps]
K_ch_ave, K_ch_var = get_stats(K_ch)
K_counts = fitter.get_peak_counts(K_ch,K_sig,K_A)
K_count = cut_outliers(K_counts)
K_mean, K_var = get_stats(np.asarray(K_counts))
for i in range(len(K_ch)):
if abs(K_ch[i]-K_ch_ave) > 3*K_ch_var:
print('Bad K-40 fit: peak channel = {}'.format(K_ch[i]))
#---------------------------------------------------------------------#
# Do the same for Bizmuth-214
#---------------------------------------------------------------------#
# double_peak_fit args: channel lims, gaus index, expo offset, plot flag
#args = [50,130,1,1,True]
if stationID==0:
args = [50,130,1,1,False,'Bi']
Bi_peaks,Bi_sigmas,Bi_amps = get_peak_fits(E_spectra, \
fitter.double_peak_fit,args)
if stationID==1:
args = [90,150,5.0,1,False,'Bi']
Bi_peaks,Bi_sigmas,Bi_amps = get_peak_fits(E_spectra, \
fitter.single_peak_fit,args)
Bi_ch, Bi_ch_errs = get_arrays(Bi_peaks)
Bi_sig = [i[0] for i in Bi_sigmas]
Bi_A = [i[0] for i in Bi_amps]
B_ch_ave,B_ch_var = get_stats(Bi_ch)
#-------------------------------------------------------------------------#
# Process channel data using fit results
#-------------------------------------------------------------------------#
Bi_counts = fitter.get_peak_counts(Bi_ch,Bi_sig,Bi_A)
Bi_counts = cut_outliers(Bi_counts)
Bi_mean, Bi_var = get_stats(np.asarray(Bi_counts))
print('K-40 <channel> = {} +/- {}'.format(K_ch_ave,K_ch_var))
print('K-40 <N> = {} +/- {}'.format(K_mean,K_var))
print('Bi-214 <channel> = {} +/- {}'.format(B_ch_ave,B_ch_var))
print('Bi-214 <N> = {} +/- {}'.format(Bi_mean,Bi_var))
#-------------------------------------------------------------------------#
# Process weather data
#-------------------------------------------------------------------------#
# LBL weather station
#location = 'KCABERKE89'
#location = 'KCABERKE86'
#wtimes,temps = get_weather_data(location,nhours,tstart,tstop)
times_both,counts,temps = merge_data(bin_times,Bi_counts,wtimes,temps)
#-------------------------------------------------------------------------#
# Plots of everything we are interested in!
#-------------------------------------------------------------------------#
make_plot(bin_times,K_counts,np.sqrt(K_counts), \
'Time','counts','K-40 counts vs Time','go','g')
fig_name = '/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/K_counts_{}_5-8.png'.format(stationID)
plt.savefig(fig_name)
make_plot(times,calibs,calib_err, \
'Time','keV/channel','keV/channel vs Time','bo','b', \
2.4,2.6)
fig_name = '/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/calibs_{}_5-8.png'.format(stationID)
plt.savefig(fig_name)
make_plot(bin_times,Bi_counts,np.sqrt(Bi_counts), \
'Time','counts','Bi-214 counts vs Time','go','g')
fig_name = '/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/Bi_counts_{}_5-8.png'.format(stationID)
plt.savefig(fig_name)
#make_plot(Ktimes,K_ch,K_ch_errs, \
# 'Time','1460 center channel','1460 channel vs Time','ro','r')
#make_plot(times,Bi_ch,Bi_ch_errs, \
# 'Time','609 center channel','609 channel vs Time','ro','r', \
# B_ch_ave-10*B_ch_var,B_ch_ave+10*B_ch_var)
make_plot(temps,counts,np.sqrt(counts), \
'Temp (F)','Bi-214 counts','Bi-214 counts vs Temp (F)','ro','r')
fig_name = '/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/Bi_counts_vs_T_{}_5-8.png'.format(stationID)
plt.savefig(fig_name)
plt.show()
if __name__ == '__main__':
url = 'https://radwatch.berkeley.edu/sites/default/files/dosenet/lbl_outside_d3s.csv'
#url = 'https://radwatch.berkeley.edu/sites/default/files/dosenet/etch_roof_d3s.csv'
start = '2017-6-6'
stop = '2017-5-31'
rows = import_csv(url,start,stop)
# number of days to look at and hours to integrate for each data point
nhours = 1
main(rows,nhours,start,stop)
| mit | 2,582,159,175,080,987,600 | 35.132463 | 124 | 0.539887 | false | 3.404887 | false | false | false |
chienlieu2017/it_management | odoo/addons/mrp/models/stock_picking.py | 5 | 1241 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class StockPickingType(models.Model):
_inherit = 'stock.picking.type'
code = fields.Selection(selection_add=[('mrp_operation', 'Manufacturing Operation')])
count_mo_todo = fields.Integer(compute='_get_mo_count')
count_mo_waiting = fields.Integer(compute='_get_mo_count')
count_mo_late = fields.Integer(compute='_get_mo_count')
def _get_mo_count(self):
mrp_picking_types = self.filtered(lambda picking: picking.code == 'mrp_operation')
if not mrp_picking_types:
return
MrpProduction = self.env['mrp.production']
count_mo_waiting = MrpProduction.search_count([('availability', '=', 'waiting')])
count_mo_todo = MrpProduction.search_count([('state', 'in', ('confirmed', 'planned', 'progress'))])
count_mo_late = MrpProduction.search_count(['&', ('date_planned_start', '<', fields.Date.today()), ('state', '=', 'confirmed')])
for picking in mrp_picking_types:
picking.count_mo_waiting = count_mo_waiting
picking.count_mo_todo = count_mo_todo
picking.count_mo_late = count_mo_late
| gpl-3.0 | 8,673,702,273,649,559,000 | 44.962963 | 136 | 0.647059 | false | 3.693452 | false | false | false |
orgito/ansible | lib/ansible/modules/remote_management/oneview/oneview_san_manager_facts.py | 120 | 3321 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_san_manager_facts
short_description: Retrieve facts about one or more of the OneView SAN Managers
description:
- Retrieve facts about one or more of the SAN Managers from OneView
version_added: "2.5"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
provider_display_name:
description:
- Provider Display Name.
params:
description:
- List of params to delimit, filter and sort the list of resources.
- "params allowed:
- C(start): The first item to return, using 0-based indexing.
- C(count): The number of resources to return.
- C(query): A general query string to narrow the list of resources returned.
- C(sort): The sort order of the returned data set."
extends_documentation_fragment:
- oneview
'''
EXAMPLES = '''
- name: Gather facts about all SAN Managers
oneview_san_manager_facts:
config: /etc/oneview/oneview_config.json
delegate_to: localhost
- debug: var=san_managers
- name: Gather paginated, filtered and sorted facts about SAN Managers
oneview_san_manager_facts:
config: /etc/oneview/oneview_config.json
params:
start: 0
count: 3
sort: name:ascending
query: isInternal eq false
delegate_to: localhost
- debug: var=san_managers
- name: Gather facts about a SAN Manager by provider display name
oneview_san_manager_facts:
config: /etc/oneview/oneview_config.json
provider_display_name: Brocade Network Advisor
delegate_to: localhost
- debug: var=san_managers
'''
RETURN = '''
san_managers:
description: Has all the OneView facts about the SAN Managers.
returned: Always, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class SanManagerFactsModule(OneViewModuleBase):
argument_spec = dict(
provider_display_name=dict(type='str'),
params=dict(type='dict')
)
def __init__(self):
super(SanManagerFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.san_managers
def execute_module(self):
if self.module.params.get('provider_display_name'):
provider_display_name = self.module.params['provider_display_name']
san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
if san_manager:
resources = [san_manager]
else:
resources = []
else:
resources = self.oneview_client.san_managers.get_all(**self.facts_params)
return dict(changed=False, ansible_facts=dict(san_managers=resources))
def main():
SanManagerFactsModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,306,415,450,797,404,700 | 29.46789 | 110 | 0.666366 | false | 3.681818 | true | false | false |
mendax-grip/cfdemUtilities | foam/getVelocitiesFoam.py | 2 | 3743 | # This program converts OpenFOAM raw data for the velocity field to a text file with
# both position and velocity vector
#
# Output format :
# position (x y z) and velocity vector
# THIS PROGRAM REQUIRES A DIRECTORY U in the main folder
#
#
# Author : Bruno Blais
#Python imports
#----------------
import os
import sys
import numpy
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
#
readZ=False
readShear=True
readPseudo=True
#Initial time of simulation, final time and time increment must be specified by user
t0=0.4
tf=0.4
dT=0.4
#====================
# READERS
#====================
#This function reads an OpenFOAM raw for a scalar and extract a table of the data
def readfScalar(fname):
infile = open(fname,'r')
if (infile!=0):
#Clear garbage lines
for i in range(0,20,1):
infile.readline()
#Read number of cell centers
n=int(infile.readline())
#Pre-allocate memory
xu=numpy.zeros([n])
#Clear garbage line "("
infile.readline()
#read current property "xu"
for i in range(0,n,1):
number_str=infile.readline()
xu[i]=float(number_str)
else:
print "File %s could not be opened" %fname
infile.close();
return n,xu
#This function reads an OpenFOAM raw file for a vector and extracts a table of the data
def readfVector(fname):
infile = open(fname,'r')
if (infile!=0):
#Clear garbage lines
for i in range(0,20):
infile.readline()
#Read number of cell centers
n=int(infile.readline())
#Pre-allocate memory
x=numpy.zeros([n])
y=numpy.zeros([n])
z=numpy.zeros([n])
#Clear garbage line "("
infile.readline()
#read current property "xu"
for i in range(0,n,1):
number_str=infile.readline()
number2_str=number_str.split("(")
number3_str=number2_str[1].split(")")
number4_str=number3_str[0].split()
x[i]=float(number4_str[0])
y[i]=float(number4_str[1])
z[i]=float(number4_str[2])
else:
print "File %s could not be opened" %fname
infile.close();
return n,x,y,z
#======================
# MAIN
#======================
# Check if the destination folder exists
if not os.path.isdir("./U"):
print "********** Abort **********"
print "The folder particlesInfo does not exist, you must create it manually in the working folder"
#Name of the files to be considered
inname= ['ccx', 'ccy','ccz','p','U','cellVolumes']
if readPseudo:
inname.append('pseudoEq')
elif readShear:
inname.append('shearRate')
os.chdir(sys.argv[1]) # go to directory
nt=int((tf-t0)/dT)+1
t=t0
for i in range(0,nt):
#Current case
print "Post-processing time ", t
#Go to the directory corresponding to the timestep
if (t==0) : os.chdir("0")
elif ((numpy.abs(numpy.mod(t+0.00001,1)))<0.01): os.chdir(str(int(t)))
else :os.chdir(str(t))
[n,x] = readfScalar(inname[0])
[n,y] = readfScalar(inname[1])
if readZ :[n,z] = readfScalar(inname[2])
else : z=numpy.zeros([numpy.size(x)])
[n, p] = readfScalar(inname[3])
[n,u,v, w] = readfVector(inname[4])
[n, V] = readfScalar(inname[5])
if (readShear):
[n, shear] = readfScalar(inname[6])
#Create output file back in main folder
outname="../../U/U_%s" %str(i)
outfile=open(outname,'w')
for j in range(0,n):
if readShear:
outfile.write("%5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e \n" %(x[j],y[j],z[j],u[j],v[j],w[j],p[j],V[j],shear[j]))
else:
outfile.write("%5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e\n" %(x[j],y[j],z[j],u[j],v[j],w[j],p[j],V[j]))
outfile.close()
t += dT
#Go back to CFD directory
os.chdir("..") #
print "Post-processing over"
| lgpl-3.0 | 1,780,018,183,711,861,800 | 23.148387 | 137 | 0.597115 | false | 2.850724 | false | false | false |
h01ger/voctomix | voctogui/lib/studioclock.py | 1 | 3272 | import math
import time
import cairo
from gi.repository import Gtk, GLib
# studio clock that displays a clock like mentioned in:
# https://masterbase.at/studioclock/#C3CD2D
class StudioClock(Gtk.ToolItem):
__gtype_name__ = 'StudioClock'
# set resolution of the update timer in seconds
timer_resolution = 0.1
# init widget
def __init__(self):
super().__init__()
# suggest size of widget
self.set_size_request(130, 50)
# remember last drwn time
self.time = time.localtime(0)
# set up timeout for periodic redraw
GLib.timeout_add_seconds(self.timer_resolution, self.do_timeout)
def do_timeout(self):
# get current time
t = time.localtime(time.time())
# if time did not change since last redraw
if self.time != t:
self.time = t
self.queue_draw()
# just come back
GLib.timeout_add_seconds(self.timer_resolution, self.do_timeout)
# override drawing of the widget
def do_draw(self, cr):
# get actual widget size
width = self.get_allocated_width()
height = self.get_allocated_height()
# calculate center and radius of the clock
center = (width / 2, height / 2)
radius = min(center)
# setup gradients for clock background to get a smooth border
bg_lg = cairo.RadialGradient(
center[0], center[1], 0, center[0], center[1], radius)
bg_lg.add_color_stop_rgba(0.0, 0, 0, 0, 1.0)
bg_lg.add_color_stop_rgba(0.9, 0, 0, 0, 1.0)
bg_lg.add_color_stop_rgba(1.0, 0, 0, 0, 0.0)
# paint background
cr.set_source(bg_lg)
cr.arc(center[0], center[1], radius, 0, 2 * math.pi)
cr.fill()
# draw ticks for every second
for tick in range(0, 60):
# fade out seconds in future and highlight past seconds
if tick > self.time.tm_sec:
cr.set_source_rgb(0.2, 0.3, 0.01)
else:
cr.set_source_rgb(0.764, 0.804, 0.176)
# calculate tick position
angle = tick * math.pi / 30
pos = (center[0] + math.sin(angle) * radius * 0.8,
center[1] - math.cos(angle) * radius * 0.8)
# draw tick
cr.arc(pos[0], pos[1], radius / 40, 0, 2 * math.pi)
cr.fill()
# draw persistant ticks every five seconds
cr.set_source_rgb(0.764, 0.804, 0.176)
for tick in range(0, 12):
# calculate tick position
angle = tick * math.pi / 6
pos = (center[0] + math.sin(angle) * radius * 0.9,
center[1] - math.cos(angle) * radius * 0.9)
# draw tick
cr.arc(pos[0], pos[1], radius / 40, 0, 2 * math.pi)
cr.fill()
# set a reasonable font size
cr.set_font_size(cr.user_to_device_distance(0, height / 5)[1])
# format time into a string
text = time.strftime("%H:%M")
# get text drawing extents
(xbearing, ybearing,
textwidth, textheight,
xadvance, yadvance) = cr.text_extents(text)
# draw time
cr.move_to(center[0] - textwidth / 2, center[1] + textheight / 2)
cr.show_text(text)
| mit | -424,392,922,117,431,940 | 36.181818 | 73 | 0.558068 | false | 3.469777 | false | false | false |
pbanaszkiewicz/amy | amy/workshops/tests/test_diff.py | 1 | 4086 | from django.urls import reverse
from reversion import revisions as reversion
from reversion.models import Version
from reversion.revisions import create_revision
from workshops.models import Event, Person, Tag
from workshops.tests.base import TestBase
class TestRevisions(TestBase):
def setUp(self):
self._setUpUsersAndLogin()
self._setUpOrganizations()
self.tag1, _ = Tag.objects.get_or_create(pk=1)
self.tag2, _ = Tag.objects.get_or_create(pk=2)
with create_revision():
self.event = Event.objects.create(host=self.org_alpha, slug="event")
self.event.tags.add(self.tag1)
self.event.save()
with create_revision():
self.event.slug = "better-event"
self.event.host = self.org_beta
self.event.tags.add(self.tag2)
self.event.save()
# load versions
versions = Version.objects.get_for_object(self.event)
assert len(versions) == 2
self.newer, self.older = versions
def test_showing_diff_event(self):
# get newer revision page
rv = self.client.get(reverse("object_changes", args=[self.newer.pk]))
self.assertEqual(rv.status_code, 200)
assert rv.context["version1"] == self.older
assert rv.context["version2"] == self.newer
assert rv.context["revision"] == self.newer.revision
assert rv.context["object"] == self.event
def test_diff_shows_coloured_labels(self):
# get newer revision page
rv = self.client.get(reverse("object_changes", args=[self.newer.pk]))
# Red label for removed host
self.assertContains(
rv,
'<a class="label label-danger" href="{}">-{}</a>'.format(
self.org_alpha.get_absolute_url(), self.org_alpha
),
html=True,
)
# Green label for assigned host
self.assertContains(
rv,
'<a class="label label-success" href="{}">+{}</a>'.format(
self.org_beta.get_absolute_url(), self.org_beta
),
html=True,
)
# Grey label for pre-assigned tag
self.assertContains(
rv,
'<a class="label label-default" href="#">{}</a>'.format(self.tag1),
html=True,
)
# Green label for additionally assigned tag
self.assertContains(
rv,
'<a class="label label-success" href="#">+{}</a>'.format(self.tag2),
html=True,
)
def test_diff_shows_PK_for_deleted_relationships(self):
# Delete the tag
self.tag1.delete()
self.tag2.delete()
# get newer revision page
rv = self.client.get(reverse("object_changes", args=[self.newer.pk]))
self.assertContains(
rv, '<a class="label label-default" href="#">1</a>', html=True
)
self.assertContains(
rv, '<a class="label label-success" href="#">+2</a>', html=True
)
class TestRegression1083(TestBase):
def setUp(self):
self._setUpUsersAndLogin()
def test_regression_1083(self):
with reversion.create_revision():
alice = Person.objects.create_user(
username="alice",
personal="Alice",
family="Jones",
email="alice@jones.pl",
)
with reversion.create_revision():
bob = Person.objects.create_user(
username="bob", personal="Bob", family="Smith", email="bob@smith.pl"
)
with reversion.create_revision():
alice.family = "Williams"
alice.save()
bob.family = "Brown"
bob.save()
res = self.app.get(reverse("person_details", args=[bob.pk]), user="admin")
revision = res.click("Last modified on")
self.assertIn("Smith", revision)
self.assertIn("Brown", revision)
back_to_person_view = revision.click("View newest")
self.assertIn("Brown", back_to_person_view)
| mit | -2,616,457,965,333,948,000 | 33.05 | 84 | 0.569995 | false | 3.925072 | true | false | false |
ieguiguren/menu | scripts/get_menu.py | 1 | 2134 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
hostname = os.uname()[1]
if hostname == "server1":
prepath = "/opt"
elif hostname == "octopussy":
prepath = "/home/xir/dev"
import sys, urllib, os,cStringIO
try:
import pycurl
except:
print "Intentando instalar pycurl"
try:
os.system('sudo apt-get install -y python-pycurl')
import pycurl
except:
print "No ha sido posible instalar la libreria necesaria *pycurl*"
print "Intentalo a mano"
sys.exit(254)
try:
from BeautifulSoup import BeautifulSoup
except:
print "Intentando instalar BeautifulSoap"
try:
os.system('sudo apt-get install -y python-beautifulsoup')
from BeautifulSoup import BeautifulSoup
except:
print "No ha sido posible instalar la libreria necesaria *BeautifulSoap*"
print "Intentalo a mano"
sys.exit(254)
sys.path.append(os.path.abspath(prepath + "/menu/conf/"))
from menuconf import *
sys.path.append(os.path.abspath(prepath + "/menu/lib/"))
from images import *
mes = [ 'zero','enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'septiembre', 'octubre', 'noviembre','diciembre' ]
if int(today) > 27:
mesADescargar = mes[int(month) + 1]
else:
mesADescargar = mes[int(month)]
def get_image_url( url ):
buf = cStringIO.StringIO()
d = pycurl.Curl()
d.setopt(d.URL, url)
d.setopt(d.WRITEFUNCTION, buf.write)
d.perform()
menu = False
encontrado = False
for p in buf.getvalue().split('>'):
if "Men" in p:
if mesADescargar in p.lower():
menu = True
if menu and not encontrado:
if "imageanchor" in p:
encontrado = True
img = p.split(' ')[1][6:-1]
buf.close()
try:
return img
except:
return ""
# if dir exists, don't download again
if os.path.isfile(datapath + str(descargado)):
sys.exit()
else:
url = get_image_url(rss)
if url != "":
urllib.urlretrieve(url, tpath + filename)
create_images()
f = open (datapath + str(descargado), 'w')
f.close()
| gpl-3.0 | -8,420,493,370,423,797,000 | 26.358974 | 139 | 0.614339 | false | 3.175595 | false | false | false |
machinalis/eff | eff_site/urls.py | 1 | 7722 | # Copyright 2009 - 2011 Machinalis: http://www.machinalis.com/
#
# This file is part of Eff.
#
# Eff is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eff is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Eff. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from django.contrib.auth.views import login, logout
from django.views.generic.simple import redirect_to
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
from eff_site.settings import CURRENT_ABS_DIR
from eff_site.eff.views import (update_hours, eff, eff_check_perms,
eff_previous_week, eff_current_week, eff_current_month, eff_horas_extras,
eff_chart, eff_next, eff_prev, eff_charts, eff_report, eff_update_db,
eff_administration, eff_client_report, eff_client_reports_admin,
UserProfileForm, eff_last_month, eff_admin_add_user,
eff_admin_change_profile, profile_detail, eff_dump_csv_upload,
eff_fixed_price_client_reports, eff_admin_users_association, eff_home,
eff_client_home, index, eff_client_projects, eff_client_summary,
eff_client_summary_period, add_attachment_custom, delete_attachment_custom)
from os.path import join
jscalendar_dir = join(CURRENT_ABS_DIR, 'addons/jscalendar-1.0/')
js_dir = join(CURRENT_ABS_DIR, 'addons/js/')
jscalendar_lang_dir = join(CURRENT_ABS_DIR, 'addons/jscalendar-1.0/lang/')
calendar_dir = join(CURRENT_ABS_DIR, 'addons/simple-calendar/')
sortable_dir = join(CURRENT_ABS_DIR, 'addons/sortable-table/')
templates_dir = join(CURRENT_ABS_DIR, 'templates/')
images_dir = join(CURRENT_ABS_DIR, 'templates/images/')
urlpatterns = patterns('',
url(r'^$', index, name='root'),
url(r'^clients/home/$', eff_client_home, name='client_home'),
url(r'^clients/projects/$', eff_client_projects, name='client_projects'),
url(r'^clients/summary/period/$', eff_client_summary_period,
name='client_summary_period'),
url(r'^clients/summary/$', eff_client_summary,
name='client_summary'),
# django-profiles
url(r'^accounts/login/$', login, {'template_name': 'login.html'},
name='login'),
url(r'^accounts/logout/$', logout, {'template_name': 'logout.html'},
name='logout'),
url(r'^accounts/profile/$', eff_home, name='eff_home'),
url(r'^login/$', redirect_to, {'url': '/accounts/login/'},
name='redir_login'),
url(r'^logout/$', redirect_to, {'url': '/accounts/logout/'},
name='redir_logout'),
url(r'^checkperms/([A-Za-z_0-9]*)/$', eff_check_perms, name='checkperms'),
url(r'^profiles/edit', 'eff.views.edit_profile',
{'form_class': UserProfileForm, }, name='profiles_edit'),
url(r'^profiles/(?P<username>[\w\._-]+)/$', profile_detail,
name='profiles_detail'),
url(r'^profiles/', include('profiles.urls'), name='profiles'),
# password reset
url(r'^accounts/password_reset/$',
'django.contrib.auth.views.password_reset',
{'template_name': 'password_reset.html',
'email_template_name': 'password_reset_email.html'},
name='password_reset'),
url(r'^password_reset/$', redirect_to,
{'url': '/accounts/password_reset/'}, name='redir_password_reset'),
url(r'^accounts/password_reset/done/$',
'django.contrib.auth.views.password_reset_done',
{'template_name': 'password_reset_done.html'},
name='password_reset_done'),
url(r'^accounts/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
'django.contrib.auth.views.password_reset_confirm',
{'template_name': 'password_reset_confirm.html'},
name='password_reset_confirm'),
url(r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
redirect_to,
{'url': '/accounts/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/'},
name='redir_password_reset_confirm'),
url(r'^accounts/reset/done/$',
'django.contrib.auth.views.password_reset_complete',
{'template_name': 'password_reset_complete.html'},
name='password_reset_complete'),
# password change
url(r'^accounts/change_password/$',
'django.contrib.auth.views.password_change',
{'template_name': 'password_change.html',
'post_change_redirect': '/accounts/change_password/done/'},
name='password_change'),
url(r'^accounts/change_password/done/$',
'django.contrib.auth.views.password_change_done',
{'template_name': 'password_change_done.html'},
name='password_change_done'),
url(r'^password_change/$', redirect_to,
{'url': '/accounts/password_change/'},
name='redir_password_change'),
url(r'^updatehours/([A-Za-z_0-9]*)/$', update_hours, name='update_hours'),
url(r'^efi/$', eff, name='eff'),
url(r'^efi/semanaanterior/$', eff_previous_week, name='eff_previous_week'),
url(r'^efi/semanaactual/$', eff_current_week, name='eff_current_week'),
url(r'^efi/mesactual/$', eff_current_month, name='eff_current_month'),
url(r'^efi/mespasado/$', eff_last_month, name='eff_last_month'),
url(r'^efi/horasextras/$', eff_horas_extras, name='eff_extra_hours'),
url(r'^efi/next/$', eff_next, name='eff_next'),
url(r'^efi/prev/$', eff_prev, name='eff_prev'),
url(r'^efi/chart/([A-Za-z_0-9]*)/$', eff_chart, name='eff_chart'),
url(r'^efi/charts/$', eff_charts, name='eff_charts'),
url(r'^efi/reporte/([A-Za-z_0-9]*)/$', eff_report, name='eff_report'),
url(r'^efi/update-db/$', eff_update_db, name='eff_update_db'),
url(r'^efi/administration/users_password/$', eff_administration,
name='eff_administration'),
url(r'^efi/administration/users_profile/$', eff_admin_change_profile,
name='eff_admin_change_profile'),
url(r'^efi/administration/add_user/$', eff_admin_add_user,
name='eff_admin_add_user'),
url(r'^efi/administration/client_reports/$', eff_client_reports_admin,
name='eff_client_reports_admin'),
url(r'^efi/administration/fixed_price_client_reports/$',
eff_fixed_price_client_reports, name='eff_fixed_price_client_reports'),
url(r'^efi/administration/dump-csv-upload/$', eff_dump_csv_upload,
name='eff_dump_csv_upload'),
url(r'^efi/reporte_cliente/([-\w]+)/$', eff_client_report,
name='eff_client_report'),
url(r'^efi/administration/users_association/$',
eff_admin_users_association, name='eff_admin_users_association'),
url(r'^efi/administration/client_summary/$',
eff_client_summary_period,
name='eff_client_summary_period'),
url(r'^efi/administration/client_summary/([-\w]+)/$',
eff_client_summary,
name='eff_client_summary'),
url(r'^admin/', include(admin.site.urls)),
url(r'^comments/', include('django.contrib.comments.urls')),
url(r'^attachments/add-for/(?P<app_label>[\w\-]+)/(?P<module_name>[\w\-]+)/(?P<pk>\d+)/$',
add_attachment_custom, name="add_attachment_custom"),
url(r'^attachments/delete/(?P<attachment_pk>\d+)/$',
delete_attachment_custom, name="delete_attachment_custom"),
url(r'^attachments/', include('attachments.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT}),
)
| gpl-3.0 | 3,161,027,209,666,449,400 | 48.5 | 94 | 0.654105 | false | 3.295775 | false | false | false |
ngr/sm_00 | slave/views.py | 1 | 4072 | ### Slave API Views ###
from django.db.models import F, Count
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from rest_framework import permissions
from rest_framework import pagination
from slave.models import Slave
from slave.serializers import SlaveSerializer, SlaveDetailSerializer
from slave.helpers import filter_by_attribute, filter_by_location_region
class API_SlaveList(generics.ListAPIView):
""" List Slaves. """
permission_classes = (permissions.IsAuthenticated,)
serializer_class = SlaveSerializer
def get_queryset(self):
""" Return Slaves of the current user. """
# Authorization check.
# We assume later that slave_list is already
# filtered with authorized slaves only so we may
# simply add some more filters.
slave_list = Slave.objects.filter(owner=self.request.user)
# Default filter alive slave only.
# Reversed order because alive are much more frequent requests.
if not 'dead' in self.request.query_params:
slave_list = slave_list.filter(date_death__isnull=True)
else:
slave_list = slave_list.filter(date_death__isnull=False)
# Filter by valid attributes
valid_params = ['location', 'sex']
for attr in valid_params:
if attr in self.request.query_params:
slave_list = filter_by_attribute(slave_list,\
attribute_name=attr,\
attribute=self.request.query_params.get(attr))
# Filter by Region
if 'region' in self.request.query_params:
slave_list = filter_by_location_region(slave_list, self.request.query_params.get('region'))
# Filter free Slaves
if 'free' in self.request.query_params:
# FIXME! This looks quite shitty.
# We compare the number of assignments to number of released ones.
# If the numbers are equal - then nothing is currently running.
# Unfortunately I couldn't yet filter by annotation of NON-released ones.
slave_list = slave_list.annotate(assgns=Count('assignments')).\
annotate(rel_assgns=Count('assignments__date_released')).\
filter(assgns=F('rel_assgns'))
# Order By
# Should one day get the ordering from request.
slave_list = slave_list.order_by('location__region', 'date_birth')
# Paginate
# FIXME The build in "LimitOffsetPagination" didn't work
# Had to write directly in the view.
if any(q for q in self.request.query_params if q in ['limit', 'offset']):
if 'limit' in self.request.query_params:
limit = int(self.request.query_params.get('limit'))
offset = int(self.request.query_params.get('offset'))\
if 'offset' in self.request.query_params else 0
if 'limit' in locals():
slave_list = slave_list[offset:limit+offset]
else:
slave_list = slave_list[offset:]
return slave_list
class API_SlaveDetail(APIView):
""" Slave Details. """
permission_classes = (permissions.IsAuthenticated,)
serializer_class = SlaveDetailSerializer
def get_object(self, pk):
""" Get already authorized Item."""
s = Slave.objects.get(pk=pk, owner=self.request.user)
# This updates available skills for the next time
s.get_available_skills()
return s
def get(self, request, pk, format=None):
# Get authorized Slave
try:
slave = self.get_object(pk)
except Slave.DoesNotExist:
return Response("Authorization error or wrong Slave id.",
status=status.HTTP_404_NOT_FOUND)
print(slave);
return Response(self.serializer_class(slave).data) | mit | 1,533,044,917,512,156,700 | 40.4375 | 103 | 0.618124 | false | 4.299894 | false | false | false |
mistergone/college-costs | paying_for_college/tests/test_models.py | 1 | 6953 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import json
import dateutil.parser
import mock
from django.test import TestCase
from paying_for_college.models import School, Contact, Program, Alias, Nickname
from paying_for_college.models import ConstantCap, ConstantRate, Disclosure
from paying_for_college.models import Notification, print_vals
from paying_for_college.models import get_region
class SchoolRegionTest(TestCase):
def test_get_region(self):
school = School(school_id='123456', state='NE')
self.assertTrue(get_region(school) == 'MW')
def test_get_region_failure(self):
school = School(school_id='123456', state='')
self.assertTrue(get_region(school) == '')
class SchoolModelsTest(TestCase):
def create_school(self, ID=999999,
data_json='',
accreditor="Almighty Wizard",
city="Emerald City",
degrees_highest="3",
state="OZ",
ope6=5555,
ope8=555500):
return School.objects.create(school_id=ID,
data_json=data_json,
accreditor=accreditor,
degrees_highest=degrees_highest,
degrees_predominant=degrees_highest,
city=city,
state=state,
ope6_id=ope6,
ope8_id=ope8)
def create_alias(self, alias, school):
return Alias.objects.create(alias=alias,
is_primary=True,
institution=school)
def create_contact(self):
return Contact.objects.create(contact='hackey@school.edu',
name='Hackey Sack')
def create_nickname(self, school):
return Nickname.objects.create(institution=school,
nickname='Hackers')
def create_program(self, school):
return Program.objects.create(institution=school,
program_name='Hacking',
level='3')
def create_disclosure(self, school):
return Disclosure.objects.create(institution=school,
name='Regional transferability',
text="Your credits won't transfer")
def create_notification(self,
school,
oid='f38283b5b7c939a058889f997949efa566c616c5',
time='2016-01-13T20:06:18.913112+00:00'):
return Notification.objects.create(institution=school,
oid=oid,
timestamp=dateutil.parser.parse(time),
errors='none')
def test_school_related_models(self):
s = self.create_school()
self.assertTrue(isinstance(s, School))
self.assertEqual(s.primary_alias, "Not Available")
d = self.create_disclosure(s)
self.assertTrue(isinstance(d, Disclosure))
self.assertTrue(d.name in d.__unicode__())
a = self.create_alias('Wizard U', s)
self.assertTrue(isinstance(a, Alias))
self.assertTrue(a.alias in a.__unicode__())
self.assertEqual(s.primary_alias, a.alias)
self.assertEqual(s.__unicode__(), a.alias + u" (%s)" % s.school_id)
c = self.create_contact()
self.assertTrue(isinstance(c, Contact))
self.assertTrue(c.contact in c.__unicode__())
n = self.create_nickname(s)
self.assertTrue(isinstance(n, Nickname))
self.assertTrue(n.nickname in n.__unicode__())
p = self.create_program(s)
self.assertTrue(isinstance(p, Program))
self.assertTrue(p.program_name in p.__unicode__())
self.assertTrue(p.program_name in p.as_json())
self.assertTrue('Bachelor' in p.get_level())
noti = self.create_notification(s)
self.assertTrue(isinstance(noti, Notification))
self.assertTrue(noti.oid in noti.__unicode__())
self.assertTrue(print_vals(s) is None)
self.assertTrue("Emerald City" in print_vals(s, val_list=True))
self.assertTrue("Emerald City" in print_vals(s, val_dict=True)['city'])
self.assertTrue("Emerald City" in print_vals(s, noprint=True))
self.assertTrue(s.convert_ope6() == '005555')
self.assertTrue(s.convert_ope8() == '00555500')
self.assertTrue('Bachelor' in s.get_highest_degree())
s.ope6_id = 555555
s.ope8_id = 55555500
self.assertTrue(s.convert_ope6() == '555555')
self.assertTrue(s.convert_ope8() == '55555500')
s.ope6_id = None
s.ope8_id = None
self.assertTrue(s.convert_ope6() == '')
self.assertTrue(s.convert_ope8() == '')
def test_constant_models(self):
cr = ConstantRate(name='cr test', slug='crTest', value='0.1')
self.assertTrue(cr.__unicode__() == u'cr test (crTest), updated None')
cc = ConstantCap(name='cc test', slug='ccTest', value='0')
self.assertTrue(cc.__unicode__() == u'cc test (ccTest), updated None')
@mock.patch('paying_for_college.models.send_mail')
def test_email_notification(self, mock_mail):
skul = self.create_school()
noti = self.create_notification(skul)
msg = noti.notify_school()
self.assertTrue('failed' in msg)
contact = self.create_contact()
skul.contact = contact
skul.save()
noti2 = self.create_notification(skul)
msg1 = noti2.notify_school()
self.assertTrue(mock_mail.call_count == 1)
self.assertTrue('email' in msg1)
@mock.patch('paying_for_college.models.requests.post')
def test_endpoint_notification(self, mock_post):
skul = self.create_school()
contact = self.create_contact()
contact.endpoint = 'fake-api.fakeschool.edu'
contact.save()
skul.contact = contact
skul.save()
noti = self.create_notification(skul)
msg = noti.notify_school()
# print("notification mock_post.call_count is {0}".format(mock_post.call_count))
# print("endpoint notification msg is {0}".format(msg))
self.assertTrue(mock_post.call_count == 1)
self.assertTrue('endpoint' in msg)
def test_endpoint_notification_blank_contact(self):
skul = self.create_school()
contact = self.create_contact()
contact.contact = ''
contact.endpoint = ''
contact.save()
skul.contact = contact
skul.save()
noti = self.create_notification(skul)
msg = noti.notify_school()
self.assertTrue('failed' in msg)
| cc0-1.0 | 5,558,168,240,107,250,000 | 41.139394 | 88 | 0.562347 | false | 3.961823 | true | false | false |
rxcomm/qtile | libqtile/widget/memory.py | 6 | 1883 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Jörg Thalheim (Mic92)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
from libqtile.widget import base
def get_meminfo():
val = {}
with open('/proc/meminfo') as file:
for line in file:
key, tail = line.split(':')
uv = tail.split()
val[key] = int(uv[0]) // 1000
val['MemUsed'] = val['MemTotal'] - val['MemFree']
return val
class Memory(base.InLoopPollText):
"""Displays memory usage"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("fmt", "{MemUsed}M/{MemTotal}M", "see /proc/meminfo for field names")
]
def __init__(self, **config):
super(Memory, self).__init__(**config)
self.add_defaults(Memory.defaults)
def poll(self):
return self.fmt.format(**get_meminfo())
| mit | -5,031,802,817,300,626,000 | 38.208333 | 79 | 0.694474 | false | 4.029979 | false | false | false |