repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
leobrowning92/arduino-lineCCD-spectrometer | plotter.py | 1 | 1293 | import serial
import time
import matplotlib.pyplot as plt
plt.interactive(True)
print 'import'
# open up dummy serial to reset the arduino with
s = serial.Serial(port='/dev/ttyUSB1')
# reset the arduino
s.flushInput()
s.setDTR(level=False)
time.sleep(0.5)
# ensure there is no stale data in the buffer
s.flushInput()
s.setDTR()
time.sleep(1)
# now open up a new serial line for communication
s = serial.Serial(baudrate=115200, port='/dev/ttyUSB1', timeout=0.01)
#initializes plotting axis
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
# initializes data
data=[]
# time for system to settle after opening serial port
time.sleep(1)
# initial read command
s.write('r')
#continuous loop that will plot the data anew each time it runs, as well as
#pass the read command to the arduino
while True:
s.write('r') #read command
#loop which iterates through the serial being read, only taking
#non-empty values and appending them to the data set
while True:
value=s.readline()
if value !='':
data.append(float(value.rstrip()))
#determines the length of the dataset to observe
if len(data)==800:
break
#plots the dataset
ax1.clear()
ax1.plot( range(len(data)), data )
plt.draw()
data=[]
| gpl-3.0 | 7,407,440,368,750,293,000 | 22.509091 | 75 | 0.681361 | false | 3.375979 | false | false | false |
pmsserrana/agenda | agenda_administrativa/apps/atividades/migrations/0001_initial.py | 1 | 9361 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-11 11:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AgendaAdministrativa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('compartilhada', models.BooleanField(choices=[(True, 'Sim'), (False, 'Não')], verbose_name='Compartilhar agenda?')),
('dt_referencia', models.DateField(verbose_name='data de referência')),
('pauta', models.TextField(verbose_name='pauta')),
('inicio_acao', models.DateField(verbose_name='ínicio da ação')),
('status', models.BooleanField(choices=[(True, 'aberta'), (False, 'encerrada')], default=True, verbose_name='status')),
('prioridade', models.IntegerField(choices=[(0, 'baixa'), (1, 'média'), (2, 'alta')], default=1, verbose_name='prioridade')),
('fim_acao', models.DateField(blank=True, null=True, verbose_name='fim da ação')),
('dt_prev_dis_agenda', models.DateField(blank=True, null=True, verbose_name='data prev. discussão da agenda')),
('dt_prev_fim_agenda', models.DateField(blank=True, null=True, verbose_name='data prev. fim agenda')),
('dt_fim_agenda', models.DateField(blank=True, null=True, verbose_name='data finalização agenda')),
],
options={
'verbose_name': 'Agenda Administrativa',
'verbose_name_plural': 'Agendas Administrativas',
'db_table': 'tb_agenda_administrativa',
},
),
migrations.CreateModel(
name='AgendaAnexos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descricao', models.CharField(max_length=80, verbose_name='descrição')),
('anexo', models.FileField(blank=True, help_text='anexos para agendas', max_length=200, null=True, upload_to='uploads/anexos/', verbose_name='enviar arquivo')),
('dt_atualizacao', models.DateTimeField(auto_now_add=True, verbose_name='data atualizacao')),
('agenda_administrativa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='anexos', to='atividades.AgendaAdministrativa')),
('usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='anexos', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Agenda Anexo',
'verbose_name_plural': 'Agenda Anexos',
'db_table': 'tb_agenda_anexo',
},
),
migrations.CreateModel(
name='AgendaMovimentacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('desc_movimentacao', models.TextField(blank=True, null=True, verbose_name='Movimentação')),
('dt_atualizacao', models.DateTimeField(auto_now_add=True, verbose_name='data atualizacao')),
('agenda_administrativa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.AgendaAdministrativa')),
('usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='movimentacao', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Agenda Movimentacao',
'verbose_name_plural': 'Agendas Movimentacao',
'db_table': 'tb_agenda_movimentacao',
},
),
migrations.CreateModel(
name='AgendaTipo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tipo', models.CharField(max_length=60, verbose_name='tipo')),
],
options={
'verbose_name': 'Agenda Tipo',
'verbose_name_plural': 'Agendas Tipo',
'db_table': 'tb_agenda_agenda_tipo',
},
),
migrations.CreateModel(
name='DepartamentoSetor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=80, verbose_name='nome')),
],
options={
'verbose_name': 'Departamento ou Setor',
'verbose_name_plural': 'Departamentos ou Setores',
'db_table': 'tb_agenda_departamento_setor',
},
),
migrations.CreateModel(
name='Esfera',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('esfera', models.CharField(max_length=60, verbose_name='esfera')),
],
options={
'verbose_name': 'Esfera',
'verbose_name_plural': 'Esfera',
'db_table': 'tb_agenda_esfera',
},
),
migrations.CreateModel(
name='OrgaoDemandante',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('orgao', models.CharField(max_length=60, verbose_name='orgão')),
('cidade', models.CharField(max_length=80, verbose_name='cidade')),
('uf', models.CharField(choices=[('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'), ('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Ceará'), ('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranhão'), ('MT', 'Mato Grosso'), ('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'), ('PA', 'Pará'), ('PB', 'Paraíba'), ('PR', 'Paraná'), ('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'), ('RS', 'Rio Grande do Sul'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'), ('SP', 'São Paulo'), ('SE', 'Sergipe'), ('TO', 'Tocantins')], max_length=2, verbose_name='uf')),
],
options={
'verbose_name': 'Orgão demandante',
'verbose_name_plural': 'Orgãos demandantes',
'db_table': 'tb_agenda_orgao',
},
),
migrations.CreateModel(
name='PessoasEnvolvidasAgenda',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=80, unique=True, verbose_name='nome')),
('telefone', models.CharField(max_length=15, verbose_name='telefone')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email')),
('funcionario', models.BooleanField(choices=[(True, 'sim'), (False, 'não')], verbose_name='é funcionario?')),
],
options={
'verbose_name': 'Pessoa envolvida',
'verbose_name_plural': 'Pessoas envolvidas',
'db_table': 'tb_pessoa_envolvida',
},
),
migrations.AddField(
model_name='agendaadministrativa',
name='coordenador_agenda',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='coordenador', to='atividades.PessoasEnvolvidasAgenda'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='dpto_setor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.DepartamentoSetor'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='esfera',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.Esfera'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='orgao_demandante',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.OrgaoDemandante'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='pessoas_envolvidas',
field=models.ManyToManyField(related_name='pessoas', to='atividades.PessoasEnvolvidasAgenda'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='tipo_agenda',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.AgendaTipo'),
),
migrations.AddField(
model_name='agendaadministrativa',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='agendas', to=settings.AUTH_USER_MODEL),
),
]
| bsd-3-clause | -6,113,654,590,307,478,000 | 53.561404 | 662 | 0.571383 | false | 3.647381 | false | false | false |
eroicaleo/LearningPython | interview/leet/36_Valid_Sudoku.py | 1 | 2017 | #!/usr/bin/env python
class Solution:
def isValidSudoku(self, board: 'List[List[str]]') -> 'bool':
# Each row
for i in range(9):
sudoku = {}
for j in range(9):
if board[i][j] == ".":
continue
if board[i][j] in sudoku:
return False
else:
sudoku[board[i][j]] = 1
# Each col
for i in range(9):
sudoku = {}
for j in range(9):
if board[j][i] == ".":
continue
if board[j][i] in sudoku:
return False
else:
sudoku[board[j][i]] = 1
# Each 3x3 square
for i in range(0,9,3):
for j in range(0,9,3):
sudoku = {}
for p in range(3):
for q in range(3):
if board[i+p][j+q] == ".":
continue
if board[i+p][j+q] in sudoku:
return False
else:
sudoku[board[i+p][j+q]] = 1
return True
board = [
["8","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]
board = [
["5","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]
sol = Solution()
print(sol.isValidSudoku(board))
| mit | -7,643,550,152,595,659,000 | 31.532258 | 64 | 0.260288 | false | 2.910534 | false | false | false |
adambreznicky/python | CountyMapbook/CountyMapbook_vJeff.py | 1 | 10744 | __file__ = 'CountyMapbook_v1'
__date__ = '6/18/2014'
__author__ = 'ABREZNIC'
import os, arcpy,datetime
from arcpy import env
#date
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
#variables
cofolder = "C:\\TxDOT\\CountyMapbook"
workspace = cofolder + "\\" + curYear
database = workspace + "\\Working.gdb"
comanche = "Connection to Comanche.sde"
restareas = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Travel\\TPP_GIS.APP_TPP_GIS_ADMIN.REST_AREA_PNT"
parks = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Park\\TPP_GIS.APP_TPP_GIS_ADMIN.Public_Lands_2014"
cemeteries = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery"
cemeteriesPT = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery_Points"
roads = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
counties = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.County\\TPP_GIS.APP_TPP_GIS_ADMIN.County"
airports = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport"
airportsPT = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport_Points"
prisons = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Base_Map_Layers\\TPP_GIS.APP_TPP_GIS_ADMIN.Prisons"
military = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Base_Map_Layers\\TPP_GIS.APP_TPP_GIS_ADMIN.Military"
schools = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Base_Map_Layers\\TPP_GIS.APP_TPP_GIS_ADMIN.Education"
cities = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City"
citiesPT = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City_Points"
lakes = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Water\\TPP_GIS.APP_TPP_GIS_ADMIN.Water_Bodies"
railroads = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Railroad\\TPP_GIS.APP_TPP_GIS_ADMIN.Railroads"
rivers = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Water\\TPP_GIS.APP_TPP_GIS_ADMIN.Streams"
grid = "T:\\DATAMGT\\MAPPING\\Mapping Products\\CountyMapbook\\Calendar year 2014\\District Grids\\State_Grid_120K.shp"
def preparation():
print "Creating database..."
if not os.path.exists(workspace):
os.makedirs(workspace)
else:
try:
arcpy.Delete_management(database)
except:
pass
for file in os.listdir(workspace):
thefile = os.path.join(workspace, file)
os.remove(thefile)
arcpy.CreateFileGDB_management(workspace, "Working.gdb")
print "Copying rest areas..."
arcpy.Select_analysis(restareas, database + "\\restarea", "RA_TYPE_NM = 'TIC'")
arcpy.AddField_management(database + "\\restarea", "label", "TEXT", "", "", 100)
cursor = arcpy.UpdateCursor(database + "\\restarea")
for row in cursor:
row.setValue("label", "Travel Information Center")
cursor.updateRow(row)
del cursor
del row
print "Copying parks..."
arcpy.Select_analysis(parks, database + "\\parks", "(GOVT_JURIS = '3' OR GOVT_JURIS = '4') AND LAND_NM IS NOT NULL AND LAND_NM <> ''")
print "Copying cemeteries..."
arcpy.Select_analysis(cemeteries, database + "\\cemetery", "CEMETERY_NM IS NOT NULL AND CEMETERY_NM <> ''")
arcpy.Select_analysis(cemeteriesPT, database + "\\cemetery_point", "CEMETERY_NM IS NOT NULL AND CEMETERY_NM <> ''")
print "Copying highways..."
arcpy.Select_analysis(roads, database + "\\highways", "(( RTE_CLASS = '1' OR RTE_CLASS = '6' ) AND RDBD_TYPE = 'KG' AND RTE_OPEN = 1 ) OR (RTE_NM = 'SL0008' AND RDBD_TYPE = 'KG' AND RTE_OPEN = 1 )")
print "Copying counties..."
arcpy.Copy_management(counties, database + "\\counties")
print "Copying airports..."
arcpy.Select_analysis(airports, database + "\\airports", "ARPRT_NM <> '' AND ARPRT_NM IS NOT NULL")
arcpy.Select_analysis(airportsPT, database + "\\airports_point", "DISPLAY = 'Yes'")
print "Copying county roads..."
arcpy.Select_analysis(roads, database + "\\countyroads", "RTE_CLASS = '2' AND RTE_OPEN = 1 AND RDBD_TYPE = 'KG'")
print "Copying prisons..."
arcpy.Copy_management(prisons, database + "\\prison")
print "Copying military..."
arcpy.Copy_management(military, database + "\\military")
print "Copying schools..."
arcpy.Copy_management(schools, database + "\\school")
print "Copying cities..."
arcpy.Copy_management(cities, database + "\\cities")
arcpy.Select_analysis(citiesPT, database + "\\cities_point", "INC = 'N'")
print "Copying lakes..."
arcpy.Select_analysis(lakes, database + "\\lakes", "BODY_NM IS NOT NULL AND BODY_NM <> '' AND BODY_TYPE = '1'")
print "Copying railroads..."
arcpy.Select_analysis(railroads, database + "\\railroad", "RR_TYPE = 'M' AND RR_STAT = 'A'")
print "Fixing railroad names..."
names = {}
cursor = arcpy.SearchCursor("T:\\DATAMGT\\MAPPING\\Railroad\\DomainRef.dbf")
for row in cursor:
curnum = row.domainTXT
names[curnum] = row.domainNM
del cursor
del row
arcpy.AddField_management(database + "\\railroad", "new_name", "TEXT", "", "", 100)
cursor = arcpy.UpdateCursor(database + "\\railroad")
for row in cursor:
curname = str(row.RR_NM)
if curname in names.keys():
row.setValue("new_name", names[curname])
else:
row.setValue("new_name", row.RR_NM)
cursor.updateRow(row)
del cursor
del row
print "Copying rivers..."
arcpy.Select_analysis(rivers, database + "\\rivers", "STRM_TYPE = '1' AND STRM_NM <> '' AND STRM_NM IS NOT NULL")
print "Copying federal roads..."
arcpy.Select_analysis(roads, database + "\\federal", "RTE_CLASS = '7' AND RTE_OPEN = 1 AND RDBD_TYPE = 'KG' AND FULL_ST_NM <> ''")
print "Copying grid..."
arcpy.Copy_management(grid, database + "\\grid")
# arcpy.Copy_management(grid, database + "\\grid")
# #
# print "Renumbering grid..."
# cursor = arcpy.UpdateCursor(database + "\\grid")
# for row in cursor:
# row.setValue("ID", row.ID - 66)
# row.setValue("STATE_ID", row.STATE_ID - 66)
# if row.NORTH != 0:
# row.setValue("NORTH", row.NORTH - 66)
# if row.SOUTH != 0:
# row.setValue("SOUTH", row.SOUTH - 66)
# if row.EAST != 0:
# row.setValue("EAST", row.EAST - 66)
# if row.WEST != 0:
# row.setValue("WEST", row.WEST - 66)
# cursor.updateRow(row)
# del cursor
# del row
print "Creating union..."
arcpy.Union_analysis([database + "\\grid", database + "\\counties"], database + "\\union")
cursor = arcpy.UpdateCursor(database + "\\union")
for row in cursor:
if row.CNTY_NM == "" or row.CNTY_NM is None or row.STATE_ID == 0:
cursor.deleteRow(row)
del cursor
del row
def intersects():
env.workspace = database
print "Creating field dictionary..."
dict = {}
dict["restarea"] = "label"
dict["parks"] = "LAND_NM"
dict["cemetery"] = "CEMETERY_NM"
dict["cemetery_point"] = "CEMETERY_NM"
dict["highways"] = "FULL_ST_NM"
dict["counties"] = "CNTY_NM"
dict["airports"] = "ARPRT_NM"
dict["airports_point"] = "ARPRT_NM"
dict["countyroads"] = "FULL_ST_NM"
dict["prison"] = "PRISON_NM"
dict["military"] = "BASE_NM"
dict["school"] = "SCHOOL_NM"
dict["cities"] = "CITY_NM"
dict["cities_point"] = "CITY_NM"
dict["lakes"] = "BODY_NM"
dict["railroad"] = "new_name"
dict["rivers"] = "STRM_NM"
dict["federal"] = "FULL_ST_NM"
print "Performing intersects..."
fcList = arcpy.ListFeatureClasses()
for fc in fcList:
if fc != "union" and fc != "grid":
print str(fc)
arcpy.Intersect_analysis(["union", fc], fc + "__INTERSECT")
del fcList
del fc
print "Summarizing..."
fcList = arcpy.ListFeatureClasses()
for fc in fcList:
if fc.split("__")[-1] == "INTERSECT":
dictname = fc.split("__")[0]
print dictname
field = dict[dictname]
arcpy.AddField_management(fc, "UNIQUE", "TEXT", "", "", 250)
cursor = arcpy.UpdateCursor(fc)
for row in cursor:
value = row.getValue(field)
if value is None:
value = ""
row.setValue("UNIQUE", str(row.STATE_ID) + row.CNTY_NM + value)
cursor.updateRow(row)
del cursor
del row
arcpy.Statistics_analysis(fc, dictname + "_SUMMARIZED", [["STATE_ID", "MIN"], ["CNTY_NM", "FIRST"], [dict[dictname], "FIRST"]], ["UNIQUE"])
print "Merging with point tables..."
arcpy.Merge_management(["cemetery_SUMMARIZED", "cemetery_point_SUMMARIZED"], "cemetery_all_SUMMARIZED")
arcpy.Merge_management(["airports_SUMMARIZED", "airports_point_SUMMARIZED"], "airports_all_SUMMARIZED")
arcpy.Merge_management(["cities_SUMMARIZED", "cities_point_SUMMARIZED"], "cities_all_SUMMARIZED")
print "Renaming tables..."
arcpy.Rename_management("cemetery_SUMMARIZED", "cemetery_SUMpreMERGE")
arcpy.Rename_management("cemetery_point_SUMMARIZED", "cemetery_point_SUMpreMERGE")
arcpy.Rename_management("airports_SUMMARIZED", "airports_SUMpreMERGE")
arcpy.Rename_management("airports_point_SUMMARIZED", "airports_point_SUMpreMERGE")
arcpy.Rename_management("cities_SUMMARIZED", "cities_SUMpreMERGE")
arcpy.Rename_management("cities_point_SUMMARIZED", "cities_point_SUMpreMERGE")
def merge():
env.workspace = database
env.overwriteOutput = True
print "Copying mdb..."
newDbase = "T:\\DATAMGT\\MAPPING\\Mapping Products\\CountyMapbook\\Calendar year 2014\\Feature Indicies\\Working\\2014_INDEXS_Geodatabase"+today+".mdb"
arcpy.Copy_management("T:\\DATAMGT\\MAPPING\\Mapping Products\\CountyMapbook\\Calendar year 2014\\Feature Indicies\\Working\\2014_INDEXS_Geodatabase.mdb", newDbase)
print "Overwriting tables..."
tList = arcpy.ListTables()
for table in tList:
if table.split("_")[-1] == "SUMMARIZED":
name = table.split("_")[0]
capname = name.title()
arcpy.Copy_management(table, newDbase + "\\" + capname)
preparation()
intersects()
merge()
print "That's all folks!" | mit | 4,961,575,016,184,159,000 | 47.840909 | 202 | 0.638682 | false | 3.071469 | false | false | false |
midonet/midonet-sandbox | src/tst/test_builder.py | 1 | 4618 | # Copyright 2015 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock
import mock
from injector import Injector, singleton, provides
import pytest
from midonet_sandbox.configuration import Config
from midonet_sandbox.logic.builder import Builder
from midonet_sandbox.logic.dispatcher import Dispatcher
from midonet_sandbox.logic.injection import SandboxModule
from midonet_sandbox.wrappers.docker_wrapper import Docker
from midonet_sandbox.logic.composer import Composer
class DockerMock(object):
def __init__(self):
self.existing_images = []
def set_existing_images(self, existing_images):
self.existing_images = existing_images
def list_images(self, prefix):
filtered = list()
if prefix:
for image in self.existing_images:
if 'RepoTags' in image and image['RepoTags'] is not None:
for tag in image['RepoTags']:
if tag.startswith(prefix):
filtered.append(image)
return filtered
class BuilderMock(object):
pass
class ComposerMock(object):
def get_components_by_flavour(self, flavour):
if flavour is 'with_external_component':
return {'external1:master': None,
'sandbox/internal1:master': None}
elif flavour is 'without_external_component':
return {'sandbox/internal1:master': None,
'sandbox/internal2:master': None}
class SandboxModuleTest(SandboxModule):
def __init__(self):
super(self.__class__, self).__init__(dict())
@singleton
@provides(Config)
def configuration_provider(self):
return Config('mock')
@singleton
@provides(Composer)
def composer_provider(self):
return ComposerMock()
@singleton
@provides(Docker)
def docker_provider(self):
return DockerMock()
class TestBuilder(object):
"""
"""
def setup_method(self, method):
self.injector = Injector([SandboxModuleTest()])
self.dispatcher = self.injector.get(Dispatcher)
self.builder = self.injector.get(Builder)
self._composer = self.injector.get(Composer)
self._docker = self.injector.get(Docker)
self._build = Mock()
self.builder.build = self._build
self.builder._composer = self._composer
def test_build_not_sandbox_image(self):
options = {
'<flavour>': 'with_external_component',
'--force': False
}
self.dispatcher.build_all(options)
self._build.assert_called_once_with(u'internal1:master')
def test_existing_image_not_build(self):
exists = [{'RepoTags': ['sandbox/internal1:master']}]
options = {
'<flavour>': 'without_external_component',
'--force': False
}
self._docker.set_existing_images(exists)
self.dispatcher.build_all(options)
self._build.assert_called_once_with(u'internal2:master')
def test_existing_image_not_build_with_extra_tag(self):
exists = [{'RepoTags': ['sandbox/internal1:master',
'repo/sandbox/internal1:master']}]
options = {
'<flavour>': 'without_external_component',
'--force': False
}
self._docker.set_existing_images(exists)
self.dispatcher.build_all(options)
self._build.assert_called_once_with(u'internal2:master')
def test_force_build_existing_image(self):
exists = [{'RepoTags': ['sandbox/internal1:master',
'repo/sandbox/internal1:master']}]
options = {
'<flavour>': 'without_external_component',
'--force': True
}
self._docker.set_existing_images(exists)
self.dispatcher.build_all(options)
self._build.assert_has_calls([mock.call(u'internal1:master'),
mock.call(u'internal2:master')],
any_order=True)
if __name__ == '__main__':
pytest.main()
| apache-2.0 | -3,875,891,980,378,285,600 | 29.993289 | 74 | 0.622347 | false | 4.123214 | true | false | false |
mandeeps708/scripts | JSON-Exporter.py | 1 | 1861 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
* File Name : JSON-Exporter.py
* Purpose : Writing JSON output by checking the chapters and images for the
"https://gitlab.com/greatdeveloper/kitab" project.
* Creation Date : 15-08-2016 (Independence Day Special Hackathon)
* Copyright (c) 2016 Mandeep Singh <mandeeps708@gmail.com>
"""
from __future__ import print_function
import os
import re
import json
def sort_nicely(targetList):
""" Sorts the given list with natural sort.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(targetList, key=alphanum_key)
# Directory in which chapters are added.
directory = "/home/mandeep/work/json-export/book"
# Declaration.
bookName = "Design Aids"
totalPages = 0
storage = {}
# Get total number of chapters and pages available.
for root, dirs, files in os.walk(directory):
# print root, dirs, files
if root == directory:
chapters = sorted(dirs, key=str)
else:
storage[root] = sort_nicely(files)
"""if root != directory:
print("Files:", root, files)
"""
# Calculates total number of pages available.
totalPages = totalPages + len(files)
print("\nTotal number of Pages:", totalPages)
print("\nTotal number of chapters:", chapters)
print("\nStorage:", storage)
# Basic list structure to be exported as json.
data = {"Book": {"Name": bookName, "totalPages": totalPages,
"info": {}}}
# Updating the json list to contain the images.
for item in chapters:
data['Book']['info'][item] = storage[os.path.join(directory, item)]
print("\nFinal Output:", data)
# Writing data as json format to a file.
with open(os.path.join(directory, "output.json"), 'w') as outputfile:
json.dump(data, outputfile)
| mit | 6,788,678,174,458,330,000 | 26.367647 | 78 | 0.671145 | false | 3.446296 | false | false | false |
sixuanwang/SAMSaaS | wirecloud-develop/src/build/lib.linux-x86_64-2.7/wirecloud/platform/admin.py | 2 | 2808 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from relatives.utils import object_edit_link
from wirecloud.platform import models
admin.site.register(models.Reporter) #sixuan
admin.site.register(models.Article) #sixuan
admin.site.register(models.Constant)
admin.site.register(models.Position)
admin.site.register(models.IWidget)
admin.site.register(models.Variable)
class MarketAdmin(admin.ModelAdmin):
list_display = ('user', 'name')
ordering = ('user', 'name')
admin.site.register(models.Market, MarketAdmin)
class MarketUserDataAdmin(admin.ModelAdmin):
list_display = ('market', 'user', 'name', 'value')
ordering = ('market', 'user', 'name')
admin.site.register(models.MarketUserData, MarketUserDataAdmin)
class VariableDefInline(admin.StackedInline):
model = models.VariableDef
extra = 0
class WidgetAdmin(admin.ModelAdmin):
inlines = (VariableDefInline,)
admin.site.register(models.Widget, WidgetAdmin)
admin.site.register(models.XHTML)
admin.site.register(models.PlatformPreference)
class WorkspacePreferenceInline(admin.TabularInline):
model = models.WorkspacePreference
extra = 1
class TabPreferenceInline(admin.TabularInline):
model = models.TabPreference
extra = 1
class TabInline(admin.TabularInline):
model = models.Tab
edit_link = object_edit_link(_("Edit"))
fields = ('name', 'position', edit_link)
readonly_fields = (edit_link,)
ordering = ('position',)
extra = 1
class TabAdmin(admin.ModelAdmin):
list_display = ('workspace', 'name', 'position')
list_display_links = ('name',)
ordering = ('workspace', 'position')
inlines = (TabPreferenceInline,)
class WorkspaceAdmin(admin.ModelAdmin):
list_display = ('creator', 'name')
ordering = ('creator', 'name')
inlines = (WorkspacePreferenceInline, TabInline,)
admin.site.register(models.Workspace, WorkspaceAdmin)
admin.site.register(models.Tab, TabAdmin)
admin.site.register(models.UserWorkspace)
| gpl-2.0 | 5,498,997,079,468,502,000 | 27.353535 | 77 | 0.739936 | false | 3.703166 | false | false | false |
garygriswold/Bible.js | Versions/Release.2.x.x/pyTmp/ComparePermissionAllowed.py | 1 | 2557 | #
# ComparePermissionAllowed.py
#
# This program compares a generated file of permissions (PermissionsRequest.txt)
# that I generated to an apiallowed.csv file given by FCBH. The purpose is to
# identify any permissions that I am not allowed.
#
# Layout of apiallowed.csv
# column 0 - line number
# column 1 - LangName
# column 2 - Reg NT Text DamId
# column 3 - Reg OT Text DamId
# column 4 - ND NT Text DamId
# column 5 - ND OT Text DamId
# column 6 - Reg NT Audio DamId
# column 7 - Reg OT Audio DamId
# column 8 - ND NT Audio DamId
# column 9 - ND OT Audio DamId
#
# Process
# 1. Read the csv file one line at a time
# 2. Create Set of Col 2 NT Text
# 3. Create Set of Col 3 OT Text
# 4. Create Set of Col 6 and 8 NT Audio
# 5. Create Set of Col 7 and 9 OT Audio
# 6. Read PermissionsRequest.txt, and parse records
# 7. For each row lookup bibleId in NT Text
# 8. For each text lookup textId in NT Text
# 9. For each audio lookup NT damId in NT Audio
# 10. For each audio lookup OT damId in OT Audio
# 11. Report any differences in bibleId to textId
# 12. Report any differences in damId to textId
# 13. Report any differences in language id.
import io
import os
import csv
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
yield [unicode(cell, 'utf-8') for cell in row]
def add(str, expectLen, addLen, aset):
if len(str) == 0:
return None
if str == "NA":
return None
if len(str) != expectLen:
print "Length Error", expectLen, str
return None
aset.add(str[0:addLen])
ntTextSet = set()
otTextSet = set()
ntAudioSet = set()
otAudioSet = set()
filename = os.environ['HOME'] + "/ShortSands/DBL/FCBH/apiallowed.csv"
reader = unicode_csv_reader(open(filename))
for row in reader:
add(row[2], 10, 6, ntTextSet)
add(row[3], 10, 6, otTextSet)
add(row[6], 10, 10, ntAudioSet)
add(row[8], 10, 10, ntAudioSet)
add(row[7], 10, 10, otAudioSet)
add(row[9], 10, 10, otAudioSet)
reader.close()
#print otAudioSet
input1 = io.open("PermissionsRequest.txt", mode="r", encoding="utf-8")
for line in input1:
if line.startswith('\"arn:aws:s3:::'):
row = line.split("/")
#print row[1]
if row[1] == "text":
row[4] = row[4].split("_")[0]
if row[2] not in ntTextSet and row[3] not in ntTextSet and row[4] not in ntTextSet:
print row[1], row[2], row[3], row[4]
elif row[1] == "audio":
#print row[2], row[3], row[4]
if row[3] not in ntAudioSet and row[3] not in otAudioSet:
print row[1], row[2], row[3], row[4]
| mit | 333,765,121,618,681,500 | 29.082353 | 86 | 0.68244 | false | 2.788441 | false | false | false |
unlimitedlabs/orchestra | orchestra/utils/tests/test_project_properties.py | 2 | 2109 | from unittest.mock import patch
from orchestra.models import Iteration
from orchestra.models import Project
from orchestra.tests.helpers import OrchestraTestCase
from orchestra.tests.helpers.fixtures import setup_models
from orchestra.utils.project_properties import completed_projects
from orchestra.utils.task_lifecycle import assign_task
from orchestra.utils.task_lifecycle import submit_task
class ProjectPropertiesTestCase(OrchestraTestCase):
def setUp(self):
super().setUp()
setup_models(self)
def test_completed_projects(self):
projects = Project.objects.all()
initial_task = assign_task(self.workers[6].id,
self.tasks['awaiting_processing'].id)
with patch('orchestra.utils.task_lifecycle._is_review_needed',
return_value=False):
initial_task = submit_task(initial_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[6])
self.assertEqual(completed_projects(projects).count(), 0)
next_task = assign_task(
self.workers[6].id,
initial_task.project.tasks.order_by('-start_datetime')[0].id)
with patch('orchestra.utils.task_lifecycle._is_review_needed',
return_value=False):
initial_task = submit_task(next_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[6])
self.assertEqual(completed_projects(projects).count(), 0)
next_task = assign_task(
self.workers[6].id,
initial_task.project.tasks.order_by('-start_datetime')[0].id)
with patch('orchestra.utils.task_lifecycle._is_review_needed',
return_value=False):
initial_task = submit_task(next_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[6])
self.assertEqual(completed_projects(projects).count(), 1)
| apache-2.0 | 3,096,894,390,393,699,000 | 42.9375 | 73 | 0.596491 | false | 4.321721 | true | false | false |
creatorssyn/creators_py | creators_py/creators_api.py | 1 | 8180 | #!/usr/bin/python
"""
Creators GET API interface v0.3.2
Full API docs: http://get.creators.com/docs/wiki
@author Brandon Telle <btelle@creators.com>
@copyright (c) 2015 Creators <www.creators.com>
"""
import subprocess, shlex, re, urllib, os.path
# Python 3+ puts urlencode in urllib.parse
try:
from urllib import parse
use_parse = True
except:
use_parse = False
# We need some way to parse JSON responses
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
raise ImportError("A JSON library is required to use Creators_API")
# Try to use pycURL instead of system calls
try:
import pycurl
from io import BytesIO
use_pycurl = True
except ImportError:
use_pycurl = False
# User's API Key
api_key = ""
# API url
api_url = "http://get.creators.com/"
# API version
api_version = 0.32
# API key length
api_key_length = 40
# Make an API request
# @param endpoint string API url
# @param parse_json bool if True, parse the result as JSOn and return the parsed object
# @throws ApiError if an error code is returned by the API
# @return parsed JSON object, or raw return string
def __api_request(endpoint, parse_json=True, post_data={}, destination=''):
if api_key == "" and len(post_data) == 0:
raise ApiError('API key must be set')
data = ''
if len(post_data) > 0:
try:
data = urllib.urlencode(post_data)
except:
try:
data = urllib.parse.urlencode(post_data)
except:
raise ApiError('Cannot parse post string')
if use_pycurl:
c = pycurl.Curl()
c.setopt(c.URL, api_url+endpoint)
if data != '':
c.setopt(c.POSTFIELDS, data)
c.setopt(c.HTTPHEADER, ['X_API_KEY: '+api_key, 'X_API_VERSION: '+str(api_version)])
c.setopt(c.FOLLOWLOCATION, True)
buffer = BytesIO()
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
ret = buffer.getvalue()
try:
ret = ret.decode('UTF-8')
except:
if destination != '':
f = open(destination, 'wb')
f.write(ret)
f.close()
ret = True
else:
raise ApiError('Cannot parse API response')
else:
cmd = 'curl --silent -L --header "X_API_KEY: '+api_key+\
'" --header "X_API_VERSION: '+str(api_version)+'" '
if data != '':
cmd += '-X POST --data "'+data+'" '
cmd += api_url+endpoint
ret = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE).stdout.read()
# Check for HTTP error messages
if type(ret) is str:
err = re.search('Error ([0-9]+): (.*)', ret)
if err != None:
raise ApiError(err.group(2), err.group(1))
# Parse JSON if required
if parse_json:
try:
ret = json.loads(ret)
except:
pass
# Check for API-generated error messages, throw exception
try:
if type(ret) is dict and ret['error'] > 0:
raise ApiError(ret['message'], ret['error'])
except KeyError:
pass
return ret
def authenticate(username, password):
try:
login = {'username':username, 'password':password}
ret = __api_request('api/users/auth', post_data=login)
except:
return False
if type(ret) is dict and type(ret['api_key']) is str and len(ret['api_key']) == api_key_length:
global api_key
api_key = ret['api_key']
return True
return False
# SYN the server
# @return string "ack"
def syn():
return __api_request('api/etc/syn')
# Get a list of available active features
# @param limit int number of results to return
# @param get_all bool if true, results will include inactive features
# @return list of features
def get_features(limit=1000, get_all=False):
return __api_request('api/features/get_list/json/NULL/'+str(limit)+'/0?get_all='+str(int(get_all)))
# Get details on a feature
# @param filecode string unique filecode for the feature
# @return dict feature info
def get_feature_details(filecode):
return __api_request('api/features/details/json/'+str(filecode))
# Get a list of releases for a feature
# @param filecode string unique filecode for a feature
# @param offset int offset, default 0
# @param limit int limit, default 10
# @param start_date string start date: YYYY-MM-DD, default none
# @param end_date string end_date: YYYY-MM-DD, default none
# @return list of releases
def get_releases(filecode, offset=0, limit=10, start_date='', end_date=''):
return __api_request('api/features/get_list/json/'+str(filecode)+"/"+str(limit)+"/"+str(offset)+"?start_date="+str(start_date)+"&end_date="+str(end_date))
# Download a file
# @param url string URL string provided in the files section of a release result
# @param destination string path to the location the file should be saved to
# @throws ApiError if destination is not a writable file location or url is unavailable
# @return bool True if file is downloaded successfully
def download_file(url, destination):
if not os.path.isdir(destination):
try:
contents = __api_request(url, parse_json=False, destination=destination)
if type(contents) is bool:
return contents
f = open(destination, 'w')
if len(contents) and contents[0] == '{': # Poor man's JSON check
contents = json.loads(contents)
try:
if type(contents) is dict and contents['error'] > 0:
f.close()
raise ApiError(contents['message'], contents['error'])
except:
f.close()
raise ApiError("Unexpected content type: JSON")
f.write(contents)
f.close()
return True
except IOError:
raise ApiError("Destination is unavailable or unwriteable")
except ApiError:
raise
else:
raise ApiError("Destination is a directory")
# Download a zip archive of the entire contents of a release
# @param release_id int the unique ID of the release to download
# @param destination string path to the location the file should be saved to
# @throws ApiError if destination is not a writeable file or release is not found
# @return bool True if file is downloaded successfully
def download_zip(release_id, destination):
if not os.path.isdir(destination):
try:
contents = __api_request('/api/files/zip/'+str(release_id), parse_json=False, destination=destination)
if type(contents) is bool:
return contents
f = open(destination, 'w')
if len(contents) > 0 and contents[0] == '{': # Poor man's JSON check
contents = json.loads(contents)
try:
if type(contents) is dict and contents['error'] > 0:
f.close()
raise ApiError(contents['message'], contents['error'])
except:
f.close()
raise ApiError("Unexpected content type: JSON")
f.write(contents)
f.close()
return True
except IOError:
raise ApiError("Destination is unavailable or unwriteable")
except ApiError:
raise
else:
raise ApiError("Destination is a directory")
# API Exception class
class ApiError(Exception):
def __init__(self, value, errno=0):
self.value = value
self.errno = errno
def __str__(self):
val = ''
if self.errno > 0:
val += '[Errno '+str(self.errno)+'] '
val += self.value
return repr(val) | mit | -5,183,268,034,446,239,000 | 31.464286 | 158 | 0.57665 | false | 4.096144 | false | false | false |
open-keychain/SafeSlinger-AppEngine | safeslinger-messenger/python/cleanup_reg.py | 2 | 2617 | # The MIT License (MIT)
#
# Copyright (c) 2010-2015 Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
from google.appengine.ext import db, webapp
from google.appengine.ext.webapp import util
import registration
class CleanUpReg(webapp.RequestHandler):
def get(self):
# execute only when request comes from appengine.com
if self.request.headers.get('X-AppEngine-Cron') == 'true':
query = registration.Registration.all().order('key_id').order('-inserted')
num = 0
dup_regs = []
keys = set()
# find duplicated entries in all registrations, keeping all unique most recent ones.
duplicate = 0
lastKeyId = None
lastRegId = None
for r in query:
num += 1
keys.add(r.key_id)
if r.registration_id == lastRegId:
if r.key_id == lastKeyId:
dup_regs.append(r)
duplicate += 1
lastRegId = r.registration_id
lastKeyId = r.key_id
# remove duplicates, record our action
db.delete(dup_regs)
logging.info('cleanup: duplicate reg=%i (total: %i regs, %i keys)' % (duplicate, num, keys.__len__()))
def main():
application = webapp.WSGIApplication([('/cron/cleanup_reg', CleanUpReg)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| mit | 6,194,062,037,424,786,000 | 36.927536 | 114 | 0.631257 | false | 4.420608 | false | false | false |
mophahr/wikipediaGame | wikigame/get_given_names.py | 1 | 4094 | # -*- coding: utf-8 -*-
import json
import sys
# see http://python3porting.com/noconv.html#import-errors
try:
# For Python 3.0 and later
from urllib.request import urlopen
from urllib.parse import quote
except ImportError:
# Fall back to Python 2's urllib
from urllib import urlopen, quote
def get_links(start_page, wikipedia_language='de'):
print('get_links(%s)' % start_page)
# parameters for building a string later:
# pllimit limits the number of links to return (max is 500 | 5000 for bots see http://en.wikipedia.org/w/api.php )
# the plcontinue value returned from the api can be used to continue of this is exceeded
# plnamespace: see here: http://en.wikipedia.org/wiki/Wikipedia:Namespace
def get_more_links(more_parameters=()):
parameters = {"format": "json",
"action": "query",
"prop": "links",
"pllimit": 500,
"plnamespace": 0,
"continue" : "",
"titles": quote(start_page.encode("utf8"))}
parameters.update(more_parameters)
queryString = "&".join("%s=%s" % (k, v) for k, v in parameters.items())
# This ensures that redirects are followed automatically, documented here:
# http://www.mediawiki.org/wiki/API:Query#Resolving_redirects
queryString = queryString+"&redirects"
url = "http://%s.wikipedia.org/w/api.php?%s" % (wikipedia_language, queryString)
#get json data and make a dictionary out of it:
request = urlopen(url)
try:
encoding = request.headers.getparam('charset')
except:
encoding = request.headers.get_param('charset')
jsonData = request.read().decode(encoding)
data = json.loads(jsonData)
pageId = list(data['query']['pages'])[0]
if int(pageId)<=0:
sys.exit("Page doesn't exist.")
link_list = data['query']['pages'][str(pageId)]['links']
return [entry["title"] for entry in link_list], data
all_links, data = get_more_links()
# Each time we get the dictionary we need to check if "query-continue"
# exists amd then repeat the stuff from before:
while 'continue' in data.keys():
continue_dict=dict()
for key in list(data['continue'].keys()):
if key == 'continue':
continue_dict.update({key: data['continue'][key]})
else:
val= "|".join([quote(e) for e in data['continue'][key].split('|')])
continue_dict.update({key: val})
new_links, data=get_more_links(continue_dict)
all_links+=new_links
return all_links
raw_names=get_links("Liste von Vornamen")
print(len(raw_names))
to_remove=["Liste albanischer Vornamen",
"Birmanischer Name",
"Chinesischer Name",
"Liste tibetischer Namen und Titel",
"Liste der bairischen Vornamen",
"Liste deutscher Vornamen aus der Bibel",
"Liste deutscher Vornamen germanischer Herkunft",
"Ostfriesischer Vorname",
"Obersorbische Vornamen",
"Liste finnischer Vornamen",
"Gambische Personennamen",
"Akan-Vorname",
"Liste griechischer Vornamen",
"Indischer Name",
"Römische Vornamen",
"Japanischer Name",
"Koreanischer Name",
"Liste der Namenstage in Lettland",
"Malaysischer Name",
"Personennamen der Sherpa",
"Polnischer Name",
"Spanischer Name",
"Liste türkischer Vornamen",
"Liste kurdischer Vornamen",
"Schreibung vietnamesischer Namen",
"Arabischer Name",
"Jüdischer Name",
"Slawische Vornamen"]
# remove this ^^^ ballast
names_only=set(raw_names)-set(to_remove)
#remove ' (Name)' and ' (Vorname)':
names=[entry.split(" ")[0] for entry in names_only]
with open('given_names.tsv', 'w') as namesfile:
for name in names:
namesfile.write(name+'\n')
| gpl-2.0 | -4,292,398,555,988,735,500 | 34.573913 | 118 | 0.592276 | false | 3.642921 | false | false | false |
aristotelis-metsinis/ArcadeGames | memory.py | 1 | 16152 | #
# Mini-project # 5: "Memory".
#
# Author: Aristotelis Metsinis
# Email: aristotelis.metsinis@gmail.com
# Mini-project # 5: An Introduction to Interactive Programming in Python
# @ https://www.coursera.org/course/interactivepython
# Date: 26 Oct 2014
# Version: 10.0
#
# Implementation of card game: "Memory".
#
# Two game "modes": play with "textual numbers" or
# "images.
#
#---------------------------------------------------------
# Import the "simple gui" module.
import simplegui
# Import module, which contains functions that involve
# randomness.
import random
# Import module that contains additional mathematical
# operations.
import math
#---------------------------------------------------------
# Define and initialize global constants.
# Initialize global constants that will hold the "width"
# and "height" of the "canvas" ("deck of cards" - grid of
# 16 "cards").
CANVAS_WIDTH = 800
CANVAS_HEIGHT = 140
# "Memory" game of 16 "cards" (as global constant).
CARDS_NUMBER = 16
# Compute the "width" of a single cell of this grid;
# "placeholder" for a single "card" (cells distributed
# evently).
CARD_PLACEHOLDER_WIDTH = (CANVAS_WIDTH // CARDS_NUMBER)
# Set general "draw" properties.
FONT_SIZE = 50
FONT_FACE = 'sans-serif'
FONT_COLOR = 'White'
MARGIN_Y = 19
# Compute the (global constant) "vertical" position to
# draw a "card", presenting a "textual number" and taking
# into consideration the height of the "deck of cards"
# plus a "margin".
CARD_VALUE_POINT_Y = (CANVAS_HEIGHT // 2) + MARGIN_Y
# More general "draw" properties.
CARD_PLACEHOLDER_LINE_COLOR = 'Black'
CARD_PLACEHOLDER_FILL_COLOR = 'Green'
CARD_PLACEHOLDER_LINE_WIDTH = 2
# Initialize a "dictionary" as global constant, mapping
# numbers from 0-7 (acting as "keys") to "urls" (acting
# as "values"). In practice, the business logic of the
# program models generally the "deck of cards" as a
# "shuffled" list consisting of 16 numbers with each
# number lying in the range [0,8) and appearing twice.
# The following "urls" (links to images)
# are just being used at the "presentation" layer,
# drawing the proper "image" instead of "number" (text).
IMAGES = {}
IMAGES[0] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/riemann.jpg')
IMAGES[1] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/aristotle.jpg')
IMAGES[2] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/euler.jpg')
IMAGES[3] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/gauss.jpg')
IMAGES[4] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/newton.jpg')
IMAGES[5] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/einstein.jpg')
IMAGES[6] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/hilbert.jpg')
IMAGES[7] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/lagrange.jpg')
#---------------------------------------------------------
# Define and initialize global variables.
# Boolean flag: play the game with "images" (True) or
# with "textual numbers" (False).
play_with_images = False
#---------------------------------------------------------
def new_game():
"""
Helper function that starts and restarts the
game, initializing global variables; reshuffle the
"cards", reset the "turn" counter and restart the
game. All "cards" should start the game hidden.
"""
# Initialize global variable that will hold the "deck
# of cards"; we model the "deck of cards" as a list
# consisting of 16 numbers with each number lying in
# the range [0,8) and appearing twice. The list is
# created by concatenating two lists with range [0,8)
# together. Although Player can play the game with
# "textual numbers" or "images", the above mentioned
# technique is being used modeling the game in both
# game "modes".
global deck_of_cards
deck_of_cards = range(CARDS_NUMBER // 2) + range(CARDS_NUMBER // 2)
# Shuffle the "deck".
random.shuffle(deck_of_cards)
# Remove comment if in DEBUG mode.
#print deck_of_cards
# Initialize global variable that will hold the a list,
# with size equal to the size of the "deck of cards"
# consisting of boolean values. The boolean value
# at a certain list index indicates whether the "card"
# is "exposed" (True) or not (False). Particularly,
# the ith entry should be "True" if the ith card is
# face up and its value is visible or "False" if the
# ith card is face down and it's value is hidden.
global deck_of_cards_exposed
deck_of_cards_exposed = [False] * CARDS_NUMBER
# Initialize global variable that will hold the game
# state (0,1 and 2), i.e. beginning of the game, single
# "exposed" unpaired "card" and end of a "turn"
# respectively (have a look at the comments of
# "mouseclick()" for a detailed description
# concerning this variable).
global state
state = 0
# Initialize global variable that will hold the number
# of "turns" playing the game.
global turn
turn = 0
label.set_text("Turns = " + str(turn))
# Initialize global variable that will hold a "helper"
# list, keeping the index of the cards "exposed" in
# a single "turn".
global index_of_cards_exposed_in_a_turn
index_of_cards_exposed_in_a_turn = [-1, -1]
return None
#---------------------------------------------------------
def mouseclick(pos):
"""
Define "mouse click" event handler; implements game
"state" logic. It receives a parameter; pair of screen
coordinates, i.e. a tuple of two non-negative integers
- the position of the mouse click.
"""
# User clicks on a "card" of the "deck" (grid of
# evenly distributed cells - cards placeholders).
# Compute the index of this "card", i.e. determine
# which card have been clicked on with the mouse.
# Recall that the sequence of cards entirely fills
# the "canvas".
clicked_card_index = int(math.floor(float(pos[0]) / CARD_PLACEHOLDER_WIDTH))
# If user clicks on a card already "exposed"; ignore
# event and "return" function immediately.
if deck_of_cards_exposed[clicked_card_index]:
return None
# The counter of "turns" playing the game will be
# updated as a global variable.
global turn
# The following block implements the game logic for
# selecting two "cards" and determining if they match.
# State 0 corresponds to the start of the game.
# In state 0, if you click on a card, that card is
# exposed, and you switch to state 1.
# State 1 corresponds to a single exposed unpaired
# card.
# In state 1, if you click on an unexposed card, that
# card is exposed and you switch to state 2.
# State 2 corresponds to the end of a turn.
# In state 2, if you click on an unexposed card, that
# card is exposed and you switch to state 1.
global state
if state == 0:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
elif state == 1:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the second card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[1] = clicked_card_index
# Switch to the next game "state".
state = 2
else:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Get the value of the cards exposed in the previous
# "turn" of the game (taking advantage of the
# "indexes" stored). Then determine if the previous
# two "exposed" cards are paired or unpaired.
# If unpaired then switch the "status" of these
# cards back to "unexposed"; i.e. flip them back
# over so that they are hidden before moving to
# state 1.
if deck_of_cards[index_of_cards_exposed_in_a_turn[0]] != deck_of_cards[index_of_cards_exposed_in_a_turn[1]]:
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[0]] = False
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[1]] = False
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game, i.e. replace the "index" of the
# first card "exposed" in the previous "turn" of
# the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
return None
#---------------------------------------------------------
def draw(canvas):
"""
Event handler that is responsible for all drawing.
It receives the "canvas" object and draws the "deck of
cards" (grid) as a horizontal sequence of 16 evently
distributed cells - "card" placeholders. It also draws
the "exposed" cards (if any) taking into consideration
the "mode" of the game, i.e either drawing "textual
numbers" or "images" in the "cells" of the "exposed"
cards (placeholders). "Cards" are logically 50 x 140
pixels in size based on the configurations set for
the purposes of this program.
"""
# Iterate through the "Memory deck" and draw all 16
# "card" placeholders.
for index in range(CARDS_NUMBER):
# Store the position of the left and right border
# of this cell (card placeholder).
card_placeholder_left_x = CARD_PLACEHOLDER_WIDTH * index
card_placeholder_right_x = CARD_PLACEHOLDER_WIDTH * (index + 1)
# Check if the "card" of this cell has an "exposed"
# (already) status.
if deck_of_cards_exposed[index]:
# Compute the position at the middle of this
# cell.
card_placeholder_middle_x = (card_placeholder_right_x + card_placeholder_left_x) // 2
# Play the game with "textual numbers" instead
# of "images".
if not play_with_images:
# Use the "index" of this "cell" as the
# "index" in the list of the "deck of
# cards" extracting the "card value".
# Get the width of the "card value" text
# in pixels; useful in (later) computing
# the position to draw the "card value"
# text - centered justified in the "cell"
# of each "card" (placeholder).
card_value_textwidth_in_px = frame.get_canvas_textwidth(str(deck_of_cards[index]),
FONT_SIZE, FONT_FACE)
card_value_point_x = card_placeholder_middle_x - (card_value_textwidth_in_px // 2)
# Draw the "textual number" associated
# with each "card" on the "canvas".
canvas.draw_text(str(deck_of_cards[index]), (card_value_point_x, CARD_VALUE_POINT_Y),
FONT_SIZE, FONT_COLOR, FONT_FACE)
# Play the game with "images" in place of
# "textual numbers".
else:
# Use the "index" of this "cell" as the
# "index" in the list of the "deck of
# cards" extracting the "card value".
# Later use this "card value" as the "key"
# loading the corresponding "image".
image = IMAGES[deck_of_cards[index]]
# Draw the "image" associated with each
# "card" on the "canvas".
canvas.draw_image(image,
(image.get_width() // 2,image.get_height() // 2),
(image.get_width(), image.get_height()),
(card_placeholder_middle_x, CANVAS_HEIGHT // 2),
(image.get_width(), image.get_height()))
# "Card" of this cell is not "exposed" (already);
# simply draw a cell ("card" placeholder).
else:
card_placeholder_points = [[card_placeholder_left_x, 0],
[card_placeholder_right_x, 0],
[card_placeholder_right_x, CANVAS_HEIGHT],
[card_placeholder_left_x, CANVAS_HEIGHT]]
# Just draw a blank green rectangle.
canvas.draw_polygon(card_placeholder_points,
CARD_PLACEHOLDER_LINE_WIDTH,
CARD_PLACEHOLDER_LINE_COLOR,
CARD_PLACEHOLDER_FILL_COLOR)
return None
#---------------------------------------------------------
def switch_game_mode():
"""
Button event handler that updates properly the boolean
flag, which "keeps" the "mode" of the game. The game
has two modes: play with "textual numbers" (False)
or "images" (True). Each time button is pressed the
value of this variable changes from "True" to "False"
and vice versa. The button text is updated
accordingly.
"""
# The boolean flag will be updated as a global
# variable. If already "True", will be "False" (and
# vice versa).
global play_with_images
play_with_images = not play_with_images
if play_with_images:
# User will play this game with "images". Update
# button text informing the user that he/she will
# reset the on-going game and play the next
# game with "textual numbers".
switch_game_mode_button.set_text("Reset and Play with numbers")
else:
# User will play this game with "textual numbers".
# Update button text informing the user that
# he/she will reset the on-going game and play
# the next game with "images".
switch_game_mode_button.set_text("Reset and Play with images")
# Reset on-going game.
new_game()
return None
#---------------------------------------------------------
# Create frame.
frame = simplegui.create_frame("Memory", CANVAS_WIDTH,
CANVAS_HEIGHT)
# Register event handlers for "control" elements and
# frame buttons to "restart" and if necessary "switch"
# the mode of the game. Once the game is over, you should
# hit the "Reset" button to restart the game.
frame.add_button("Reset", new_game)
frame.add_label("")
label = frame.add_label("Turns = 0")
frame.add_label("")
switch_game_mode_button = frame.add_button("Reset and Play with images",
switch_game_mode, 200)
# Register "event handler" that is responsible for the
# management of the mouse clicks on the "canvas".
frame.set_mouseclick_handler(mouseclick)
# Register the "event handler" that is responsible
# for all drawing.
frame.set_draw_handler(draw)
# Call "new_game()" ensuring that all variables are
# always initialized when the program starts running.
new_game()
# Start frame.
frame.start()
#--------------------------------------------------------- | mit | -5,470,520,956,982,380,000 | 39.687657 | 116 | 0.596459 | false | 3.853053 | false | false | false |
astrorafael/emadb | emadb/mqttsubscriber.py | 1 | 8155 | # ----------------------------------------------------------------------
# Copyright (c) 2015 Rafael Gonzalez.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------
# ========================== DESIGN NOTES ==============================
# A generic MQTT subscriber client to be used by my tiny server framework
# Must be subclassed a least do customize the onMessage() method.
#
# This class inherits from Lazy to periodically execute a work() procedure
# responsible for:
# 1. Managing connection to MQTT Broker. No disconnections are ever requested.
# 2. Managing subscriptions.
# 3. Delivering data to backend objects like databases
#
# The work() procedure executes twice as fast as
# the keepalive timeout specidied to the client MQTT library.
#
# ======================================================================
import logging
import paho.mqtt.client as paho
import socket
import datetime
from abc import ABCMeta, abstractmethod
from server import Lazy
import utils
# MQTT Connection Status
NOT_CONNECTED = 0
CONNECTING = 1
CONNECTED = 2
FAILED = 3
DISCONNECTING = 4
# Default QoS
QOS = 1
log = logging.getLogger('mqtt')
# Callback when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
userdata.onConnect(flags, rc)
def on_disconnect(client, userdata, rc):
userdata.onDisconnect(rc)
# Callback when a PUBLISH message is received from the server.
# The default message callback
def on_message(client, userdata, msg):
userdata.onMessage(msg, datetime.datetime.utcnow())
# Callback subscriptions
def on_subscribe(client, userdata, mid, granted_qos):
userdata.onSubscribe(mid, granted_qos)
def on_unsubscribe(client, userdata, mid):
userdata.onUnsubscribe(mid)
class MQTTGenericSubscriber(Lazy):
# Maximun retry period
MAX_PERIOD = 2*60*60
def __init__(self, srv, parser):
Lazy.__init__(self, 60)
self.__parser = parser
self.srv = srv
self.__state = NOT_CONNECTED
self.__topics = []
srv.addLazy(self)
# We do not allow to reconfigure an existing connection
# to a broker as we would loose incoming data
self.id = parser.get("MQTT", "mqtt_id")
self.__host = parser.get("MQTT", "mqtt_host")
self.__port = parser.getint("MQTT", "mqtt_port")
self.paho = paho.Client(client_id=self.id+'@'+ socket.gethostname(),
clean_session=False, userdata=self)
self.paho.on_connect = on_connect
self.paho.on_disconnect = on_disconnect
self.paho.on_message = on_message
self.paho.on_subscribe = on_subscribe
self.paho.on_unsubscribe = on_unsubscribe
self.reload()
log.info("MQTT client created")
# we only allow to reconfigure the topic list and keepalive period
def reload(self):
'''Reloads and reconfigures itself'''
parser = self.__parser # shortcut
lvl = parser.get("MQTT", "mqtt_log")
log.setLevel(lvl)
self.__keepalive = parser.getint("MQTT", "mqtt_period")
self.__initial_T = self.__keepalive / 2
self.__period = self.__initial_T
self.setPeriod(self.__initial_T )
topics = utils.chop(parser.get("MQTT", "mqtt_topics"),',')
self.__newtopics = [ (topic, QOS) for topic in topics ]
if self.__state == CONNECTED:
self.subscribe()
log.debug("Reload complete")
# ----------------------------------------
# Implement MQTT Callbacks
# -----------------------------------------
def onConnect(self, flags, rc):
'''Send the initial event and set last will on unexpected diconnection'''
if rc == 0:
self.__state = CONNECTED
self.__period = self.__initial_T
self.setPeriod(self.__initial_T)
log.info("Connected successfully")
self.subscribe()
else:
self.handleConnErrors()
def onDisconnect(self, rc):
log.warning("Unexpected disconnection, rc =%d", rc)
self.__state = NOT_CONNECTED
self.__topics = []
try:
self.srv.delReadable(self)
except ValueError as e:
log.warning("Recovered from mqtt library 'double disconnection' bug")
@abstractmethod
def onMessage(self, msg, tstamp):
'''
Process incoming messages from subscribed topics.
Typically will pass the message to a backend object via
the parent server object
'''
pass
def onSubscribe(self, mid, granted_qos):
log.info("Subscriptions ok with MID = %s, granted QoS = %s",
mid, granted_qos)
def onUnsubscribe(self, mid):
log.info("Unsubscribe ok with MID = %s", mid)
# ---------------------------------
# Implement the Event I/O Interface
# ---------------------------------
def onInput(self):
'''
Read from message buffer and notify handlers if message complete.
Called from Server object
'''
self.paho.loop_read()
def fileno(self):
'''Implement this interface to be added in select() system call'''
return self.paho.socket().fileno()
# ----------------------------------------
# Implement The Lazy interface
# -----------------------------------------
def work(self):
'''
Called periodically from a Server object.
Write blocking behaviour.
'''
log.debug("work()")
if self.__state == NOT_CONNECTED:
self.connect()
return
self.paho.loop_misc()
# --------------
# Helper methods
# --------------
def subscribe(self):
'''Subscribe smartly to a list of topics'''
# Unsubscribe first if necessary
topics = [ t[0] for t in (set(self.__topics) - set(self.__newtopics)) ]
if len(topics):
self.paho.unsubscribe(topics)
log.info("Unsubscribing from topics %s", topics)
else:
log.info("no need to unsubscribe")
# Now subscribe
topics = [ t for t in (set(self.__newtopics) - set(self.__topics)) ]
if len(topics):
log.info("Subscribing to topics %s", topics)
self.paho.subscribe(topics)
else:
log.info("no need to subscribe")
self.__topics = self.__newtopics
def connect(self):
'''
Connect to MQTT Broker with parameters passed at creation time.
Add MQTT library to the (external) EMA I/O event loop.
'''
try:
log.info("Connecting to MQTT Broker %s:%s", self.__host, self.__port)
self.__state = CONNECTING
self.paho.connect(self.__host, self.__port, self.__keepalive)
self.srv.addReadable(self)
except IOError as e:
log.error("%s",e)
self.handleConnErrors()
def handleConnErrors(self):
self.__state = NOT_CONNECTED
self.__period *= 2
self.__period = min(self.__period, MQTTGenericSubscriber.MAX_PERIOD)
self.setPeriod(self.__period)
log.info("Connection failed, next try in %d sec.", self.__period)
| mit | 4,258,326,531,601,589,000 | 31.751004 | 80 | 0.605395 | false | 4.065304 | false | false | false |
moertle/pyaas | pyaas/module.py | 1 | 2566 |
import collections
import logging
import pyaas
moduleImports = dict(
auth = 'pyaas.web.auth',
storage = 'pyaas.storage.engines',
cache = 'pyaas.storage.cache',
)
def load():
if not pyaas.config.has_section('modules'):
logging.debug('No modules defined')
return
modules = dict(pyaas.config.items('modules'))
for module in modules:
try:
modulePath = pyaas.module.moduleImports[module]
except KeyError:
logging.error('Unknown module: %s', module)
continue
# on import PyaasModules register themselves
__import__(pyaas.module.moduleImports[module])
for module, moduleClass in pyaas.module.PyaasModule.registry.items():
moduleClass.load()
return
class RegisterModuleMeta(type):
def __init__(cls, name, bases, dct):
if not hasattr(cls, 'registry'):
# this is the base class. Create an empty registry
cls.registry = {}
else:
# this is a derived class. Add cls to the registry
cls.registry[name] = cls
super(RegisterModuleMeta, cls).__init__(name, bases, dct)
class PyaasModule(object):
PKG_PATH = 'pyaas'
CLASSES = collections.defaultdict(dict)
__metaclass__ = RegisterModuleMeta
@classmethod
def load(cls):
raise NotImplementedError
@classmethod
def loadModule(cls, moduleName):
classes = cls.CLASSES[cls.__name__]
try:
return classes[moduleName]
except KeyError:
# try to load the module
pass
# then try loading a pyaas module first
try:
path = cls.PKG_PATH + '.' + moduleName
module = __import__(path)
except ImportError:
# try loading a user-supplied module next
try:
path = moduleName
module = __import__(path)
except ImportError:
raise pyaas.error('Unknown module: %s', moduleName)
subPackageName = path
for subPackageName in subPackageName.split('.')[1:]:
module = getattr(module, subPackageName)
classname = subPackageName.capitalize()
moduleClass = getattr(module, classname, None)
if moduleClass is None:
try:
moduleClass = getattr(module, 'Database')
except AttributeError:
raise pyaas.error('Bad module: %s', moduleName)
classes[moduleName] = moduleClass
return moduleClass
| mit | 3,384,338,324,429,189,000 | 26.010526 | 73 | 0.588465 | false | 4.37884 | false | false | false |
iw3hxn/LibrERP | crm_lead_correct/models/sale_order.py | 1 | 3937 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013-2014 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import orm, fields
class SaleOrder(orm.Model):
_inherit = 'sale.order'
def _get_connected_sale_order(self, cr, uid, ids, field_name, model_name, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
result = {}
order_id = context.get('own_sale_id')
for sale in self.browse(cr, uid, ids, context):
result[sale.id] = False
if sale.id == order_id:
result[sale.id] = True
if self.pool['sale.order']._columns.get('sale_version_id', False):
if sale.sale_version_id and sale.sale_version_id.id == order_id:
result[sale.id] = True
return result
_columns = {
'connected_sale_order': fields.function(_get_connected_sale_order, string='Own Sale', type='boolean'),
'contact_id': fields.many2one('res.partner.address.contact', 'Contact'),
}
def onchange_partner_id(self, cr, uid, ids, part):
res = super(SaleOrder, self).onchange_partner_id(cr, uid, ids, part)
res['value']['contact_id'] = False
return res
def hook_sale_state(self, cr, uid, orders, vals, context):
crm_model = self.pool['crm.lead']
crm_sale_stage_model = self.pool['crm.sale.stage']
state = vals.get('state', False)
for order in orders:
lead_ids = crm_model.search(cr, uid, [('sale_order_id', '=', order.id)], context=context)
if context.get('active_model', '') == 'crm.lead':
lead_ids.append(context.get('active_id'))
lead_ids = list(set(lead_ids))
if lead_ids:
crm_sale_stage_ids = crm_sale_stage_model.search(cr, uid, [('shop_id', '=', order.shop_id.id), ('name', '=', state)], context=context)
if crm_sale_stage_ids:
crm_sale_stage = crm_sale_stage_model.browse(cr, uid, crm_sale_stage_ids[0], context)
stage_id = crm_sale_stage.stage_id.id
crm_value = {'stage_id': stage_id}
crm_value.update(crm_model.onchange_stage_id(cr, uid, lead_ids, stage_id)['value'])
if crm_sale_stage.update_amount:
crm_value.update({
'planned_revenue': order.amount_untaxed
})
crm_model.write(cr, uid, lead_ids, crm_value, context.update({'force_stage_id': True}))
return super(SaleOrder, self).hook_sale_state(cr, uid, orders, vals, context)
| agpl-3.0 | 8,658,856,917,558,177,000 | 48.2125 | 150 | 0.596139 | false | 3.82233 | false | false | false |
crossroadchurch/paul | openlp/core/lib/searchedit.py | 1 | 8204 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
from PyQt4 import QtCore, QtGui
from openlp.core.lib import build_icon
from openlp.core.lib.ui import create_widget_action
log = logging.getLogger(__name__)
class SearchEdit(QtGui.QLineEdit):
"""
This is a specialised QLineEdit with a "clear" button inside for searches.
"""
def __init__(self, parent):
"""
Constructor.
"""
super(SearchEdit, self).__init__(parent)
self._current_search_type = -1
self.clear_button = QtGui.QToolButton(self)
self.clear_button.setIcon(build_icon(':/system/clear_shortcut.png'))
self.clear_button.setCursor(QtCore.Qt.ArrowCursor)
self.clear_button.setStyleSheet('QToolButton { border: none; padding: 0px; }')
self.clear_button.resize(18, 18)
self.clear_button.hide()
self.clear_button.clicked.connect(self._on_clear_button_clicked)
self.textChanged.connect(self._on_search_edit_text_changed)
self._update_style_sheet()
self.setAcceptDrops(False)
def _update_style_sheet(self):
"""
Internal method to update the stylesheet depending on which widgets are available and visible.
"""
frame_width = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
right_padding = self.clear_button.width() + frame_width
if hasattr(self, 'menu_button'):
left_padding = self.menu_button.width()
stylesheet = 'QLineEdit { padding-left: %spx; padding-right: %spx; } ' % (left_padding, right_padding)
else:
stylesheet = 'QLineEdit { padding-right: %spx; } ' % right_padding
self.setStyleSheet(stylesheet)
msz = self.minimumSizeHint()
self.setMinimumSize(max(msz.width(), self.clear_button.width() + (frame_width * 2) + 2),
max(msz.height(), self.clear_button.height() + (frame_width * 2) + 2))
def resizeEvent(self, event):
"""
Reimplemented method to react to resizing of the widget.
:param event: The event that happened.
"""
size = self.clear_button.size()
frame_width = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
self.clear_button.move(self.rect().right() - frame_width - size.width(),
(self.rect().bottom() + 1 - size.height()) // 2)
if hasattr(self, 'menu_button'):
size = self.menu_button.size()
self.menu_button.move(self.rect().left() + frame_width + 2, (self.rect().bottom() + 1 - size.height()) // 2)
def current_search_type(self):
"""
Readonly property to return the current search type.
"""
return self._current_search_type
def set_current_search_type(self, identifier):
"""
Set a new current search type.
:param identifier: The search type identifier (int).
"""
menu = self.menu_button.menu()
for action in menu.actions():
if identifier == action.data():
# setPlaceholderText has been implemented in Qt 4.7 and in at least PyQt 4.9 (I am not sure, if it was
# implemented in PyQt 4.8).
try:
self.setPlaceholderText(action.placeholder_text)
except AttributeError:
pass
self.menu_button.setDefaultAction(action)
self._current_search_type = identifier
self.emit(QtCore.SIGNAL('searchTypeChanged(int)'), identifier)
return True
def set_search_types(self, items):
"""
A list of tuples to be used in the search type menu. The first item in the list will be preselected as the
default.
:param items: The list of tuples to use. The tuples should contain an integer identifier, an icon (QIcon
instance or string) and a title for the item in the menu. In short, they should look like this::
(<identifier>, <icon>, <title>, <place holder text>)
For instance::
(1, <QIcon instance>, "Titles", "Search Song Titles...")
Or::
(2, ":/songs/authors.png", "Authors", "Search Authors...")
"""
menu = QtGui.QMenu(self)
first = None
for identifier, icon, title, placeholder in items:
action = create_widget_action(
menu, text=title, icon=icon, data=identifier, triggers=self._on_menu_action_triggered)
action.placeholder_text = placeholder
if first is None:
first = action
self._current_search_type = identifier
if not hasattr(self, 'menu_button'):
self.menu_button = QtGui.QToolButton(self)
self.menu_button.setIcon(build_icon(':/system/clear_shortcut.png'))
self.menu_button.setCursor(QtCore.Qt.ArrowCursor)
self.menu_button.setPopupMode(QtGui.QToolButton.InstantPopup)
self.menu_button.setStyleSheet('QToolButton { border: none; padding: 0px 10px 0px 0px; }')
self.menu_button.resize(QtCore.QSize(28, 18))
self.menu_button.setMenu(menu)
self.menu_button.setDefaultAction(first)
self.menu_button.show()
self._update_style_sheet()
def _on_search_edit_text_changed(self, text):
"""
Internally implemented slot to react to when the text in the line edit has changed so that we can show or hide
the clear button.
:param text: A :class:`~PyQt4.QtCore.QString` instance which represents the text in the line edit.
"""
self.clear_button.setVisible(bool(text))
def _on_clear_button_clicked(self):
"""
Internally implemented slot to react to the clear button being clicked to clear the line edit. Once it has
cleared the line edit, it emits the ``cleared()`` signal so that an application can react to the clearing of the
line edit.
"""
self.clear()
self.emit(QtCore.SIGNAL('cleared()'))
def _on_menu_action_triggered(self):
"""
Internally implemented slot to react to the select of one of the search types in the menu. Once it has set the
correct action on the button, and set the current search type (using the list of identifiers provided by the
developer), the ``searchTypeChanged(int)`` signal is emitted with the identifier.
"""
for action in self.menu_button.menu().actions():
# Why is this needed?
action.setChecked(False)
self.set_current_search_type(self.sender().data())
| gpl-2.0 | -7,612,010,848,661,411,000 | 45.350282 | 120 | 0.573257 | false | 4.368477 | false | false | false |
agry/NGECore2 | scripts/mobiles/corellia/diseased_vrelt_matriarch.py | 2 | 1677 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('diseased_vrelt_matriarch')
mobileTemplate.setLevel(6)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(10)
mobileTemplate.setHideType("Bristly Hide")
mobileTemplate.setHideAmount(10)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(10)
mobileTemplate.setSocialGroup("vrelt")
mobileTemplate.setAssistRange(10)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_vrelt.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_2')
attacks.add('bm_bolster_armor_2')
attacks.add('bm_enfeeble_2')
mobileTemplate.setDefaultAttack('creatureRangedAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('diseased_vrelt_matriarch', mobileTemplate)
return | lgpl-3.0 | -2,400,141,543,173,543,000 | 33.244898 | 129 | 0.826476 | false | 3.2818 | false | false | false |
richard-taylor/auxo | auxo/test/test_executor.py | 1 | 1467 |
import logging
logging.basicConfig(filename='/tmp/unittest')
import auxo.executor
import auxo.test.mocks
import unittest
class TestExecutor(unittest.TestCase):
def testNoAgents(self):
r = auxo.executor.run([])
self.assertEqual(len(r), 0)
def testGoodAgents(self):
agents = [
auxo.test.mocks.mockAgent("A", "hello"),
auxo.test.mocks.mockAgent("B", "apple"),
auxo.test.mocks.mockAgent("C", "orange")
]
r = auxo.executor.run(agents)
self.assertEqual(len(r), 3)
self.assertEqual(r[0].name, 'A')
self.assertEqual(r[0].text, 'hello')
self.assertEqual(r[1].name, 'B')
self.assertEqual(r[1].text, 'apple')
self.assertEqual(r[2].name, 'C')
self.assertEqual(r[2].text, 'orange')
def testBadAgents(self):
agents = [
auxo.test.mocks.mockAgent("A", None),
auxo.test.mocks.mockAgent("B", "apple"),
auxo.test.mocks.mockAgent("C", None)
]
r = auxo.executor.run(agents)
self.assertEqual(len(r), 3)
self.assertEqual(r[0].name, 'A')
self.assertEqual(r[0].text, 'Failed to complete.\n')
self.assertEqual(r[1].name, 'B')
self.assertEqual(r[1].text, 'apple')
self.assertEqual(r[2].name, 'C')
self.assertEqual(r[2].text, 'Failed to complete.\n')
| gpl-3.0 | 2,895,575,492,069,478,000 | 28.938776 | 60 | 0.549421 | false | 3.501193 | true | false | false |
raiden-network/raiden | raiden/tests/utils/protocol.py | 1 | 7352 | from collections import defaultdict
from unittest.mock import patch
import structlog
from gevent.event import AsyncResult
from raiden.message_handler import MessageHandler
from raiden.messages.abstract import Message
from raiden.raiden_event_handler import EventHandler
from raiden.raiden_service import RaidenService
from raiden.tests.utils.events import check_nested_attrs
from raiden.transfer.architecture import Event as RaidenEvent
from raiden.transfer.mediated_transfer.events import SendSecretRequest, SendUnlock
from raiden.transfer.state import ChainState
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import Callable, Dict, List, NamedTuple, SecretHash, Set
log = structlog.get_logger(__name__)
class MessageWaiting(NamedTuple):
attributes: dict
message_type: type
async_result: AsyncResult
class HoldWait(NamedTuple):
event_type: type
async_result: AsyncResult
attributes: Dict
class Holding(NamedTuple):
event: RaidenEvent
chain_state: ChainState
event_type: type
async_result: AsyncResult
attributes: Dict
class WaitForMessage(MessageHandler):
def __init__(self):
self.waiting: Dict[type, list] = defaultdict(list)
def wait_for_message(self, message_type: type, attributes: dict) -> AsyncResult:
assert not any(attributes == waiting.attributes for waiting in self.waiting[message_type])
waiting = MessageWaiting(
attributes=attributes, message_type=Message, async_result=AsyncResult()
)
self.waiting[message_type].append(waiting)
return waiting.async_result
def on_messages(self, raiden: RaidenService, messages: List[Message]) -> None:
# First handle the message, and then set the events, to ensure the
# expected side-effects of the message are applied
super().on_messages(raiden, messages)
for message in messages:
for waiting in self.waiting[type(message)]:
if check_nested_attrs(message, waiting.attributes):
waiting.async_result.set(message)
class HoldRaidenEventHandler(EventHandler):
"""Use this handler to stop the node from processing an event.
This is useful:
- Simulate network communication problems, by delaying when protocol
messages are sent.
- Simulate blockchain congestion, by delaying transactions.
- Wait for a given state of the protocol, by waiting for an event to be
available.
"""
def __init__(self, wrapped_handler: EventHandler):
self.wrapped = wrapped_handler
self.eventtype_to_waitingholds: Dict[type, List[HoldWait]] = defaultdict(list)
self.eventtype_to_holdings: Dict[type, List[Holding]] = defaultdict(list)
self.pre_hooks: Set[Callable] = set()
def on_raiden_events(
self, raiden: RaidenService, chain_state: ChainState, events: List[RaidenEvent]
):
events_to_dispatch = list()
for event in events:
for hook in self.pre_hooks:
hook(event)
event_type = type(event)
# First check that there are no overlapping holds, otherwise the test
# is likely flaky. It should either reuse the hold for the same event
# or different holds must match a unique event.
for hold in self.eventtype_to_holdings[event_type]:
if check_nested_attrs(event, hold.attributes):
msg = (
f"Matching event of type {event.__class__.__name__} emitted "
f"twice, this should not happen. Either there is a bug in the "
f"state machine or the hold.attributes is too generic and "
f"multiple different events are matching. Event: {event} "
f"Attributes: {hold.attributes}"
)
raise RuntimeError(msg)
waitingholds = self.eventtype_to_waitingholds[event_type]
for pos, waiting_hold in enumerate(waitingholds):
# If it is a match:
# - Delete the waiting hold and add it to the holding
# - Do not dispatch the event
# - Notify the test by setting the async_result
if check_nested_attrs(event, waiting_hold.attributes):
holding = Holding(
event=event,
chain_state=chain_state,
event_type=waiting_hold.event_type,
async_result=waiting_hold.async_result,
attributes=waiting_hold.attributes,
)
del self.eventtype_to_waitingholds[event_type][pos]
self.eventtype_to_holdings[event_type].append(holding)
waiting_hold.async_result.set(event)
break
else:
# Only dispatch the event if it didn't match any of the holds
events_to_dispatch.append(event)
if events_to_dispatch:
self.wrapped.on_raiden_events(raiden, chain_state, events_to_dispatch)
def hold(self, event_type: type, attributes: Dict) -> AsyncResult:
hold = HoldWait(event_type=event_type, async_result=AsyncResult(), attributes=attributes)
self.eventtype_to_waitingholds[event_type].append(hold)
log.debug(f"Hold for {event_type.__name__} with {attributes} created.")
return hold.async_result
def release(self, raiden: RaidenService, event: RaidenEvent):
holds = self.eventtype_to_holdings[type(event)]
found = None
for pos, hold in enumerate(holds):
if hold.event == event:
found = (pos, hold)
break
msg = (
"Cannot release unknown event. "
"Either it was never held, or the event was not emitted yet, "
"or it was released twice."
)
assert found is not None, msg
hold = holds.pop(found[0])
self.wrapped.on_raiden_events(raiden, hold.chain_state, [event])
log.debug(f"{event} released.", node=to_checksum_address(raiden.address))
def hold_secretrequest_for(self, secrethash: SecretHash) -> AsyncResult:
return self.hold(SendSecretRequest, {"secrethash": secrethash})
def hold_unlock_for(self, secrethash: SecretHash):
return self.hold(SendUnlock, {"secrethash": secrethash})
def release_secretrequest_for(self, raiden: RaidenService, secrethash: SecretHash):
for hold in self.eventtype_to_holdings[SendSecretRequest]:
if hold.attributes["secrethash"] == secrethash:
self.release(raiden, hold.event)
def release_unlock_for(self, raiden: RaidenService, secrethash: SecretHash):
for hold in self.eventtype_to_holdings[SendUnlock]:
if hold.attributes["secrethash"] == secrethash:
self.release(raiden, hold.event)
def dont_handle_lock_expired_mock(app):
"""Takes in a raiden app and returns a mock context where lock_expired is not processed"""
def do_nothing(raiden, message): # pylint: disable=unused-argument
return []
return patch.object(app.message_handler, "handle_message_lockexpired", side_effect=do_nothing)
| mit | -5,539,801,465,720,107,000 | 39.844444 | 98 | 0.641866 | false | 4.153672 | false | false | false |
SHDShim/pytheos | pytheos/fit_electronic.py | 1 | 1172 | import lmfit
from .eqn_electronic import zharkov_pel
class ZharkovElecModel(lmfit.Model):
"""
lmfit Model class for Zharkov electronic contribution fitting
"""
def __init__(self, n, z, independent_vars=['v', 'temp'],
param_names=['v0', 'e0', 'g'],
prefix='', missing=None, name=None, **kwargs):
"""
:param n: number of elements in a chemical formula
:param z: number of formula unit in a unit cell
:param independent_vars: define independent variables for lmfit
unit-cell volume in A^3 and temperature in K
:param param_names: define parameter names, v0, e0, g
:param prefix: see lmfit
:param missing: see lmfit
:param name: see lmfit
:param kwargs: see lmfit
"""
kwargs.update({'prefix': prefix, 'missing': missing,
'independent_vars': independent_vars,
'param_names': param_names})
super(ZharkovElecModel, self).__init__(zharkov_pel, n=n, z=z, **kwargs)
self.set_param_hint('v0', min=0.)
self.set_param_hint('e0')
self.set_param_hint('g')
| apache-2.0 | -5,509,651,354,509,060,000 | 38.066667 | 79 | 0.581911 | false | 3.720635 | false | false | false |
gopchandani/ryu | ryu/contrib/ovs/db/types.py | 50 | 21268 | # Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import uuid
from ovs.db import error
import ovs.db.parser
import ovs.db.data
import ovs.ovsuuid
class AtomicType(object):
def __init__(self, name, default, python_types):
self.name = name
self.default = default
self.python_types = python_types
@staticmethod
def from_string(s):
if s != "void":
for atomic_type in ATOMIC_TYPES:
if s == atomic_type.name:
return atomic_type
raise error.Error('"%s" is not an atomic-type' % s, s)
@staticmethod
def from_json(json):
if type(json) not in [str, unicode]:
raise error.Error("atomic-type expected", json)
else:
return AtomicType.from_string(json)
def __str__(self):
return self.name
def to_string(self):
return self.name
def to_json(self):
return self.name
def default_atom(self):
return ovs.db.data.Atom(self, self.default)
VoidType = AtomicType("void", None, ())
IntegerType = AtomicType("integer", 0, (int, long))
RealType = AtomicType("real", 0.0, (int, long, float))
BooleanType = AtomicType("boolean", False, (bool,))
StringType = AtomicType("string", "", (str, unicode))
UuidType = AtomicType("uuid", ovs.ovsuuid.zero(), (uuid.UUID,))
ATOMIC_TYPES = [VoidType, IntegerType, RealType, BooleanType, StringType,
UuidType]
def escapeCString(src):
dst = ""
for c in src:
if c in "\\\"":
dst += "\\" + c
elif ord(c) < 32:
if c == '\n':
dst += '\\n'
elif c == '\r':
dst += '\\r'
elif c == '\a':
dst += '\\a'
elif c == '\b':
dst += '\\b'
elif c == '\f':
dst += '\\f'
elif c == '\t':
dst += '\\t'
elif c == '\v':
dst += '\\v'
else:
dst += '\\%03o' % ord(c)
else:
dst += c
return dst
def commafy(x):
"""Returns integer x formatted in decimal with thousands set off by
commas."""
return _commafy("%d" % x)
def _commafy(s):
if s.startswith('-'):
return '-' + _commafy(s[1:])
elif len(s) <= 3:
return s
else:
return _commafy(s[:-3]) + ',' + _commafy(s[-3:])
def returnUnchanged(x):
return x
class BaseType(object):
def __init__(self, type_, enum=None, min=None, max=None,
min_length=0, max_length=sys.maxint, ref_table_name=None):
assert isinstance(type_, AtomicType)
self.type = type_
self.enum = enum
self.min = min
self.max = max
self.min_length = min_length
self.max_length = max_length
self.ref_table_name = ref_table_name
if ref_table_name:
self.ref_type = 'strong'
else:
self.ref_type = None
self.ref_table = None
def default(self):
return ovs.db.data.Atom.default(self.type)
def __eq__(self, other):
if not isinstance(other, BaseType):
return NotImplemented
return (self.type == other.type and self.enum == other.enum and
self.min == other.min and self.max == other.max and
self.min_length == other.min_length and
self.max_length == other.max_length and
self.ref_table_name == other.ref_table_name)
def __ne__(self, other):
if not isinstance(other, BaseType):
return NotImplemented
else:
return not (self == other)
@staticmethod
def __parse_uint(parser, name, default):
value = parser.get_optional(name, [int, long])
if value is None:
value = default
else:
max_value = 2 ** 32 - 1
if not (0 <= value <= max_value):
raise error.Error("%s out of valid range 0 to %d"
% (name, max_value), value)
return value
@staticmethod
def from_json(json):
if type(json) in [str, unicode]:
return BaseType(AtomicType.from_json(json))
parser = ovs.db.parser.Parser(json, "ovsdb type")
atomic_type = AtomicType.from_json(parser.get("type", [str, unicode]))
base = BaseType(atomic_type)
enum = parser.get_optional("enum", [])
if enum is not None:
base.enum = ovs.db.data.Datum.from_json(
BaseType.get_enum_type(base.type), enum)
elif base.type == IntegerType:
base.min = parser.get_optional("minInteger", [int, long])
base.max = parser.get_optional("maxInteger", [int, long])
if (base.min is not None and base.max is not None
and base.min > base.max):
raise error.Error("minInteger exceeds maxInteger", json)
elif base.type == RealType:
base.min = parser.get_optional("minReal", [int, long, float])
base.max = parser.get_optional("maxReal", [int, long, float])
if (base.min is not None and base.max is not None
and base.min > base.max):
raise error.Error("minReal exceeds maxReal", json)
elif base.type == StringType:
base.min_length = BaseType.__parse_uint(parser, "minLength", 0)
base.max_length = BaseType.__parse_uint(parser, "maxLength",
sys.maxint)
if base.min_length > base.max_length:
raise error.Error("minLength exceeds maxLength", json)
elif base.type == UuidType:
base.ref_table_name = parser.get_optional("refTable", ['id'])
if base.ref_table_name:
base.ref_type = parser.get_optional("refType", [str, unicode],
"strong")
if base.ref_type not in ['strong', 'weak']:
raise error.Error('refType must be "strong" or "weak" '
'(not "%s")' % base.ref_type)
parser.finish()
return base
def to_json(self):
if not self.has_constraints():
return self.type.to_json()
json = {'type': self.type.to_json()}
if self.enum:
json['enum'] = self.enum.to_json()
if self.type == IntegerType:
if self.min is not None:
json['minInteger'] = self.min
if self.max is not None:
json['maxInteger'] = self.max
elif self.type == RealType:
if self.min is not None:
json['minReal'] = self.min
if self.max is not None:
json['maxReal'] = self.max
elif self.type == StringType:
if self.min_length != 0:
json['minLength'] = self.min_length
if self.max_length != sys.maxint:
json['maxLength'] = self.max_length
elif self.type == UuidType:
if self.ref_table_name:
json['refTable'] = self.ref_table_name
if self.ref_type != 'strong':
json['refType'] = self.ref_type
return json
def copy(self):
base = BaseType(self.type, self.enum.copy(), self.min, self.max,
self.min_length, self.max_length, self.ref_table_name)
base.ref_table = self.ref_table
return base
def is_valid(self):
if self.type in (VoidType, BooleanType, UuidType):
return True
elif self.type in (IntegerType, RealType):
return self.min is None or self.max is None or self.min <= self.max
elif self.type == StringType:
return self.min_length <= self.max_length
else:
return False
def has_constraints(self):
return (self.enum is not None or self.min is not None or
self.max is not None or
self.min_length != 0 or self.max_length != sys.maxint or
self.ref_table_name is not None)
def without_constraints(self):
return BaseType(self.type)
@staticmethod
def get_enum_type(atomic_type):
"""Returns the type of the 'enum' member for a BaseType whose
'type' is 'atomic_type'."""
return Type(BaseType(atomic_type), None, 1, sys.maxint)
def is_ref(self):
return self.type == UuidType and self.ref_table_name is not None
def is_strong_ref(self):
return self.is_ref() and self.ref_type == 'strong'
def is_weak_ref(self):
return self.is_ref() and self.ref_type == 'weak'
def toEnglish(self, escapeLiteral=returnUnchanged):
if self.type == UuidType and self.ref_table_name:
s = escapeLiteral(self.ref_table_name)
if self.ref_type == 'weak':
s = "weak reference to " + s
return s
else:
return self.type.to_string()
def constraintsToEnglish(self, escapeLiteral=returnUnchanged,
escapeNumber=returnUnchanged):
if self.enum:
literals = [value.toEnglish(escapeLiteral)
for value in self.enum.values]
if len(literals) == 2:
english = 'either %s or %s' % (literals[0], literals[1])
else:
english = 'one of %s, %s, or %s' % (literals[0],
', '.join(literals[1:-1]),
literals[-1])
elif self.min is not None and self.max is not None:
if self.type == IntegerType:
english = 'in range %s to %s' % (
escapeNumber(commafy(self.min)),
escapeNumber(commafy(self.max)))
else:
english = 'in range %s to %s' % (
escapeNumber("%g" % self.min),
escapeNumber("%g" % self.max))
elif self.min is not None:
if self.type == IntegerType:
english = 'at least %s' % escapeNumber(commafy(self.min))
else:
english = 'at least %s' % escapeNumber("%g" % self.min)
elif self.max is not None:
if self.type == IntegerType:
english = 'at most %s' % escapeNumber(commafy(self.max))
else:
english = 'at most %s' % escapeNumber("%g" % self.max)
elif self.min_length != 0 and self.max_length != sys.maxint:
if self.min_length == self.max_length:
english = ('exactly %s characters long'
% commafy(self.min_length))
else:
english = ('between %s and %s characters long'
% (commafy(self.min_length),
commafy(self.max_length)))
elif self.min_length != 0:
return 'at least %s characters long' % commafy(self.min_length)
elif self.max_length != sys.maxint:
english = 'at most %s characters long' % commafy(self.max_length)
else:
english = ''
return english
def toCType(self, prefix):
if self.ref_table_name:
return "struct %s%s *" % (prefix, self.ref_table_name.lower())
else:
return {IntegerType: 'int64_t ',
RealType: 'double ',
UuidType: 'struct uuid ',
BooleanType: 'bool ',
StringType: 'char *'}[self.type]
def toAtomicType(self):
return "OVSDB_TYPE_%s" % self.type.to_string().upper()
def copyCValue(self, dst, src):
args = {'dst': dst, 'src': src}
if self.ref_table_name:
return ("%(dst)s = %(src)s->header_.uuid;") % args
elif self.type == StringType:
return "%(dst)s = xstrdup(%(src)s);" % args
else:
return "%(dst)s = %(src)s;" % args
def initCDefault(self, var, is_optional):
if self.ref_table_name:
return "%s = NULL;" % var
elif self.type == StringType and not is_optional:
return '%s = "";' % var
else:
pattern = {IntegerType: '%s = 0;',
RealType: '%s = 0.0;',
UuidType: 'uuid_zero(&%s);',
BooleanType: '%s = false;',
StringType: '%s = NULL;'}[self.type]
return pattern % var
def cInitBaseType(self, indent, var):
stmts = []
stmts.append('ovsdb_base_type_init(&%s, %s);' % (
var, self.toAtomicType()))
if self.enum:
stmts.append("%s.enum_ = xmalloc(sizeof *%s.enum_);"
% (var, var))
stmts += self.enum.cInitDatum("%s.enum_" % var)
if self.type == IntegerType:
if self.min is not None:
stmts.append('%s.u.integer.min = INT64_C(%d);'
% (var, self.min))
if self.max is not None:
stmts.append('%s.u.integer.max = INT64_C(%d);'
% (var, self.max))
elif self.type == RealType:
if self.min is not None:
stmts.append('%s.u.real.min = %d;' % (var, self.min))
if self.max is not None:
stmts.append('%s.u.real.max = %d;' % (var, self.max))
elif self.type == StringType:
if self.min_length is not None:
stmts.append('%s.u.string.minLen = %d;'
% (var, self.min_length))
if self.max_length != sys.maxint:
stmts.append('%s.u.string.maxLen = %d;'
% (var, self.max_length))
elif self.type == UuidType:
if self.ref_table_name is not None:
stmts.append('%s.u.uuid.refTableName = "%s";'
% (var, escapeCString(self.ref_table_name)))
stmts.append('%s.u.uuid.refType = OVSDB_REF_%s;'
% (var, self.ref_type.upper()))
return '\n'.join([indent + stmt for stmt in stmts])
class Type(object):
DEFAULT_MIN = 1
DEFAULT_MAX = 1
def __init__(self, key, value=None, n_min=DEFAULT_MIN, n_max=DEFAULT_MAX):
self.key = key
self.value = value
self.n_min = n_min
self.n_max = n_max
def copy(self):
if self.value is None:
value = None
else:
value = self.value.copy()
return Type(self.key.copy(), value, self.n_min, self.n_max)
def __eq__(self, other):
if not isinstance(other, Type):
return NotImplemented
return (self.key == other.key and self.value == other.value and
self.n_min == other.n_min and self.n_max == other.n_max)
def __ne__(self, other):
if not isinstance(other, Type):
return NotImplemented
else:
return not (self == other)
def is_valid(self):
return (self.key.type != VoidType and self.key.is_valid() and
(self.value is None or
(self.value.type != VoidType and self.value.is_valid())) and
self.n_min <= 1 <= self.n_max)
def is_scalar(self):
return self.n_min == 1 and self.n_max == 1 and not self.value
def is_optional(self):
return self.n_min == 0 and self.n_max == 1
def is_composite(self):
return self.n_max > 1
def is_set(self):
return self.value is None and (self.n_min != 1 or self.n_max != 1)
def is_map(self):
return self.value is not None
def is_smap(self):
return (self.is_map()
and self.key.type == StringType
and self.value.type == StringType)
def is_optional_pointer(self):
return (self.is_optional() and not self.value
and (self.key.type == StringType or self.key.ref_table_name))
@staticmethod
def __n_from_json(json, default):
if json is None:
return default
elif type(json) == int and 0 <= json <= sys.maxint:
return json
else:
raise error.Error("bad min or max value", json)
@staticmethod
def from_json(json):
if type(json) in [str, unicode]:
return Type(BaseType.from_json(json))
parser = ovs.db.parser.Parser(json, "ovsdb type")
key_json = parser.get("key", [dict, str, unicode])
value_json = parser.get_optional("value", [dict, str, unicode])
min_json = parser.get_optional("min", [int])
max_json = parser.get_optional("max", [int, str, unicode])
parser.finish()
key = BaseType.from_json(key_json)
if value_json:
value = BaseType.from_json(value_json)
else:
value = None
n_min = Type.__n_from_json(min_json, Type.DEFAULT_MIN)
if max_json == 'unlimited':
n_max = sys.maxint
else:
n_max = Type.__n_from_json(max_json, Type.DEFAULT_MAX)
type_ = Type(key, value, n_min, n_max)
if not type_.is_valid():
raise error.Error("ovsdb type fails constraint checks", json)
return type_
def to_json(self):
if self.is_scalar() and not self.key.has_constraints():
return self.key.to_json()
json = {"key": self.key.to_json()}
if self.value is not None:
json["value"] = self.value.to_json()
if self.n_min != Type.DEFAULT_MIN:
json["min"] = self.n_min
if self.n_max == sys.maxint:
json["max"] = "unlimited"
elif self.n_max != Type.DEFAULT_MAX:
json["max"] = self.n_max
return json
def toEnglish(self, escapeLiteral=returnUnchanged):
keyName = self.key.toEnglish(escapeLiteral)
if self.value:
valueName = self.value.toEnglish(escapeLiteral)
if self.is_scalar():
return keyName
elif self.is_optional():
if self.value:
return "optional %s-%s pair" % (keyName, valueName)
else:
return "optional %s" % keyName
else:
if self.n_max == sys.maxint:
if self.n_min:
quantity = "%s or more " % commafy(self.n_min)
else:
quantity = ""
elif self.n_min:
quantity = "%s to %s " % (commafy(self.n_min),
commafy(self.n_max))
else:
quantity = "up to %s " % commafy(self.n_max)
if self.value:
return "map of %s%s-%s pairs" % (quantity, keyName, valueName)
else:
if keyName.endswith('s'):
plural = keyName + "es"
else:
plural = keyName + "s"
return "set of %s%s" % (quantity, plural)
def constraintsToEnglish(self, escapeLiteral=returnUnchanged,
escapeNumber=returnUnchanged):
constraints = []
keyConstraints = self.key.constraintsToEnglish(escapeLiteral,
escapeNumber)
if keyConstraints:
if self.value:
constraints.append('key %s' % keyConstraints)
else:
constraints.append(keyConstraints)
if self.value:
valueConstraints = self.value.constraintsToEnglish(escapeLiteral,
escapeNumber)
if valueConstraints:
constraints.append('value %s' % valueConstraints)
return ', '.join(constraints)
def cDeclComment(self):
if self.n_min == 1 and self.n_max == 1 and self.key.type == StringType:
return "\t/* Always nonnull. */"
else:
return ""
def cInitType(self, indent, var):
initKey = self.key.cInitBaseType(indent, "%s.key" % var)
if self.value:
initValue = self.value.cInitBaseType(indent, "%s.value" % var)
else:
initValue = ('%sovsdb_base_type_init(&%s.value, '
'OVSDB_TYPE_VOID);' % (indent, var))
initMin = "%s%s.n_min = %s;" % (indent, var, self.n_min)
if self.n_max == sys.maxint:
n_max = "UINT_MAX"
else:
n_max = self.n_max
initMax = "%s%s.n_max = %s;" % (indent, var, n_max)
return "\n".join((initKey, initValue, initMin, initMax))
| apache-2.0 | 1,829,824,586,550,318,300 | 35.231687 | 79 | 0.516927 | false | 3.866206 | false | false | false |
AdvenamTacet/EMP | prog/plots.py | 1 | 2560 | import plotly as py
import plotly.graph_objs as graph
def choose_color(entropy):
""" Calculates color for bare with given entropy """
red = round(255 * entropy**5)
green = round(255 * ((1 - (entropy - 0.5)**2 )/8)**0.6 )
blue = round(255 * (1 - 0.8 * entropy**0.05))
alpha = 0.6 + round(0.3 * entropy * 100)/100.0
return (red, green, blue, alpha)
def create_annotations(entropy_data, block_descriptions, file_parts):
""" Returns annotations - block labels - in plotly style """
return [ dict (
x=block_descriptions[i][0],
y=entropy_data[i][0],
xref='x',
yref='y',
text=str(file_parts[i]),
showarrow=True,
arrowhead=4,
ax=0,
ay=-40
) for i in range(len(entropy_data))]
def create_plot(entropy_data, block_descriptions):
""" Returns colored plotly bar with entropy data """
entropy_data = sum(entropy_data, [])
return graph.Bar(
y=entropy_data,
x=sum(block_descriptions, []),
marker=dict(
color=['rgba' + str(choose_color(x)) for x in entropy_data]),
)
def plot_page(bar, title, name, annotations = []):
""" Converts plotly bar to .html page """
layout = graph.Layout(title=title, annotations = annotations, )
figure = graph.Figure(data=[bar], layout=layout)
# Runs plot page
py.offline.plot(figure, filename=(name + '.html'))
def save_to_file(bar, title, name, width=1024, height=640):
""" Converts plotly bar to page downloading .png file """
layout = graph.Layout(title=title, width=width, height=height)
figure = graph.Figure(data=[bar], layout=layout)
# Usability of this function is low as it runs plotly page
# We should consider writing alternative solution...
# in the distant future. (And in R.)
py.offline.plot(figure, image='png')
if __name__ == "__main__":
""" Part to check if plots creating works fine """
example_data = [ [1 - float(i)/x for i in range(x)] for x in [30, 150, 40] ]
example_desc = [ [i + (30 if x==150 else (180 if x==40 else 0)) for i in range(x)]
for x in [30, 150, 40] ]
example_parts= ['Start', 'Mid', 'End']
annotations = create_annotations(example_data, example_desc, example_parts)
bar = create_plot(example_data, example_desc)
save_to_file(bar, 'Example file title', 'examplename')
print("Page downloading .png file created!")
plot_page(bar, 'Example title', 'examplename', annotations)
print(".html file created - should open in browser autamaticly.")
| mit | -239,573,664,620,935,200 | 33.594595 | 86 | 0.622266 | false | 3.487738 | false | false | false |
avara1986/gozokia | gozokia/i_o/io.py | 1 | 2873 | # encoding: utf-8
"""
I/O configurations
"""
from __future__ import absolute_import, print_function, unicode_literals
import importlib
from gozokia.i_o.exceptions import GozokiaIoError
from gozokia.conf import settings
class Io(object):
_VALUE = 0
_TXT = 1
_VOICE = 2
_TXT_VOICE = 3
_METHOD_DEFAULT = "terminal_txt"
"""
INPUT
"""
_INPUT_METHODS = {"value": _VALUE, "terminal_txt": _TXT, "terminal_voice": _VOICE}
_INPUT_SELECTED = 0
"""
OUTPUT
"""
_OUTPUT_METHODS = {"value": _VALUE, "terminal_txt": _TXT, "terminal_voice": _VOICE, "terminal_txtvoice": _TXT_VOICE}
_OUTPUT_SELECTED = 0
# System program to play sounds
# _AUDIO_PLAYER = "mpg123"
def __init__(self, *args, **kwargs):
self.set_input_method(kwargs.get('input_type', settings.GOZOKIA_INPUT_TYPE))
self.set_output_method(kwargs.get('output_type', settings.GOZOKIA_OUTPUT_TYPE))
def set_input_method(self, input_type):
"""
Input configuration
"""
try:
self._INPUT_SELECTED = self._INPUT_METHODS[input_type]
except KeyError:
raise GozokiaIoError(self.__class__.__name__ + ": Input method {} not exist".format(input_type))
# Initialize the input method
input_module = importlib.import_module('gozokia.i_o.input')
if self._INPUT_SELECTED == self._VALUE:
self.input = input_module.InputValue()
elif self._INPUT_SELECTED == self._TXT:
self.input = input_module.InputTerminalText()
elif self._INPUT_SELECTED == self._VOICE:
self.input = input_module.InputTerminalVoice()
def get_input_method(self):
return self._INPUT_SELECTED
def listen(self, *args, **kwargs):
return self.input.listen(*args, **kwargs)
def set_output_method(self, output_type):
"""
Output configuration
"""
try:
self._OUTPUT_SELECTED = self._OUTPUT_METHODS[output_type]
except KeyError:
raise GozokiaIoError(self.__class__.__name__ + ": Output method {} not exist".format(output_type))
output_module = importlib.import_module('gozokia.i_o.output')
if self._OUTPUT_SELECTED == self._VALUE:
self.output = output_module.OutputValue()
elif self._OUTPUT_SELECTED == self._TXT:
self.output = output_module.OutputTerminalText()
elif self._OUTPUT_SELECTED == self._VOICE:
self.output = output_module.OutputTerminalVoice()
else:
raise GozokiaIoError(self.__class__.__name__ + ": No Output method for [{}] {}".format(self._OUTPUT_SELECTED, output_type))
def get_output_method(self):
return self._OUTPUT_SELECTED
def response(self, text, *args, **kwargs):
return self.output.response(response=text, *args, **kwargs)
| mit | 1,665,085,800,163,650,000 | 34.9125 | 135 | 0.614688 | false | 3.711886 | false | false | false |
tosmun/AdventOfCode | solutions/day18/p2/main.py | 1 | 1353 |
x_len = 100
y_len = 100
steps = 100
def new_grid():
ng = [ ]
for x in range(x_len):
ng.append([])
for y in range(y_len):
ng[x].append(0)
#Force corners to be on
for (x,y) in [(0,0),(0,y_len-1),(x_len-1,0),(x_len-1,y_len-1)]:
ng[x][y] = 1
return ng
def main():
g = new_grid()
with open('../input.txt', 'r') as fp:
for x in range(x_len):
for y in range(y_len):
c = None
while c not in ['#','.']:
c = fp.read(1)
if c is None or c == '':
raise Exception("Not enough input")
g[x][y] = (1 if c == '#' else 0)
for i in range(steps):
new_g = new_grid()
for x in range(0, x_len):
for y in range(0, y_len):
#Ignore corners
if (x,y) in [(0,0),(0,y_len-1),(x_len-1,0),(x_len-1,y_len-1)]:
continue
count = 0
for n_x in range(x-1,x+2):
for n_y in range(y-1,y+2):
#Skip ourselves
if n_x == x and n_y == y:
continue
#Skip out of bounds
elif n_x < 0 or n_x >= x_len or n_y < 0 or n_y >= y_len:
continue
count += g[n_x][n_y]
#If on
if g[x][y] == 1:
value = 1 if count == 2 or count == 3 else 0
#If off
else:
value = 1 if count == 3 else 0
new_g[x][y] = value
g = new_g
#Count on
count = 0
for x in range(x_len):
for y in range(y_len):
count += g[x][y]
print count
if __name__ == "__main__":
main()
| apache-2.0 | 6,195,058,034,031,673,000 | 21.180328 | 66 | 0.504804 | false | 2.30494 | false | false | false |
drtchops/temper | examples/django_example/django_app/utils.py | 1 | 1178 | import sys
from django.conf.urls import patterns
from django.conf.urls import url as django_url
def url(*regexes, **kwargs):
caller_filename = sys._getframe(1).f_code.co_filename
for m in sys.modules.values():
if (m and '__file__' in m.__dict__ and
m.__file__.startswith(caller_filename)):
module = m
break
def _wrapper(cls):
if module:
if 'urlpatterns' not in module.__dict__:
module.urlpatterns = []
view = cls.as_view()
view_name = kwargs.get('name') or cls.__name__
url_kwargs = dict(kwargs)
url_kwargs['name'] = view_name
for regex in regexes:
module.urlpatterns += patterns(
'', django_url(regex, view, **url_kwargs))
return cls
return _wrapper
def select(t, name, options=[], value=None):
with t.select():
for o in options:
with t.option(checked=o[0] == value, value=o[0]):
t(o[1])
def css_link(t, href):
t.link(rel='stylesheet', href=href)
def js_link(t, href):
t.script(type='text/javascript', src=href)()
| mit | 5,668,326,754,291,013,000 | 26.395349 | 62 | 0.541596 | false | 3.812298 | false | false | false |
grollins/foldkin | foldkin/kings/contact_order_model.py | 1 | 2148 | from foldkin.base.model_factory import ModelFactory
from foldkin.base.model import Model
from foldkin.zam_protein import create_zam_protein_from_pdb_id
class ContactOrderModelFactory(ModelFactory):
"""docstring for ContactOrderModelFactory"""
def __init__(self):
super(ContactOrderModelFactory, self).__init__()
def create_model(self, pdb_id, parameter_set):
new_model = ContactOrderModel(pdb_id, parameter_set)
return new_model
class ContactOrderModel(Model):
"""docstring for ContactOrderModel"""
def __init__(self, pdb_id, parameter_set):
super(ContactOrderModel, self).__init__()
self.pdb_id = pdb_id
self.zam_protein = create_zam_protein_from_pdb_id(pdb_id)
self.parameter_set = parameter_set
def get_id(self):
return self.pdb_id
def get_parameter(self, parameter_name):
return self.parameter_set.get_parameter(parameter_name)
def get_contact_list(self):
contact_list = self.zam_protein.get_contact_list()
one_letter_sequence = self.zam_protein.get_sequence()
new_contact_list = []
for c in contact_list:
residue1_number = c[0]
residue2_number = c[1]
residue1_name = one_letter_sequence[residue1_number]
residue2_name = one_letter_sequence[residue2_number]
new_contact = Contact(residue1_name, residue2_name, residue1_number,
residue2_number)
new_contact_list.append(new_contact)
return new_contact_list
class Contact(object):
"""docstring for Contact"""
def __init__(self, residue1_name, residue2_name, residue1_number,
residue2_number):
super(Contact, self).__init__()
self.residue1_name = residue1_name
self.residue2_name = residue2_name
self.residue1_number = residue1_number
self.residue2_number = residue2_number
def get_sequence_separation(self):
return self.residue2_number - self.residue1_number
def get_residue_names_as_letters(self):
return [self.residue1_name, self.residue2_name]
| bsd-2-clause | -4,462,443,247,395,210,000 | 36.034483 | 80 | 0.651304 | false | 3.532895 | false | false | false |
mkdubik/multinet-evaluation | plot/t6.py | 1 | 5291 | import sys
from os import listdir
import operator
import collections
from pprint import pprint
from itertools import count
import matplotlib.pyplot as plt
import networkx as nx
import multinetx as mx
import numpy as np
from t5 import collect_data
from pdb import set_trace
def collect_community(path):
files = filter(lambda x: '_community' in x, listdir(path))
data = {
'lart':{}, 'glouvain':{}, 'pmm':{},
}
for file in files:
params = file.split('_')
method = params[0]
if method == 'pmm' or method == 'glouvain':
params = [params[1], params[2]]
if method == 'lart':
params = [params[1]]
with open(path + file) as fd:
d = fd.read()
data[method]['_'.join(['%.1f' % (float(p)) for p in params])] = d
return data
def main():
data = collect_data('results/t6/')
comm = collect_community('results/t6/')
def find_best(data, method):
key = max(data[method].iteritems(), key=operator.itemgetter(1))[0]
y = data[method][key]
return y, key, method
print 'LART', data['lart']['9.0_1.0_1.0']
print 'PMM', data['pmm']['30.0_140.0']
print 'Glouvain', data['glouvain']['1.0_1.0']
gl = find_best(data, 'glouvain')
ll = find_best(data, 'lart')
pl = find_best(data, 'pmm')
print 'LART', ll
print 'PMM', pl
print 'Glouvain', gl
best = max([gl, ll, pl], key=operator.itemgetter(0))
best_comm = {}
for b in comm[best[2]][best[1]].split('\n'):
if b:
a,l,c = b.split(',')
best_comm['%s-%s' % (a, l)] = int(c)
layers = {
'RT': nx.Graph(), 'ff': nx.Graph(), 'Re': nx.Graph()
}
ids = {}
counter = 0
groups = []
with open('data/t6/dk', 'r') as fd:
for l in fd.readlines():
a1,a2, layer = l.replace('\n', '').split(",")
if a1 not in ids:
ids[a1] = counter
counter = counter + 1
if a2 not in ids:
ids[a2] = counter
counter = counter + 1
groups.append(best_comm['%s-%s' % (a1, layer)])
groups.append(best_comm['%s-%s' % (a2, layer)])
for k,v in layers.iteritems():
v.add_node(ids[a1], label = best_comm['%s-%s' % (a1, layer)])
v.add_node(ids[a2], label = best_comm['%s-%s' % (a2, layer)])
layers[layer].add_edge(ids[a1], ids[a2])
truth = {}
with open('data/t6/dk_truth', 'r') as fd:
for l in fd.readlines():
actor, party = l.replace('\n', '').split(',')
truth[actor] = party
mapping = dict(zip(sorted(groups), count()))
N, L = len(layers['ff'].nodes()), len(layers.keys())
adj_block = mx.lil_matrix(np.zeros((N * L, N * L)))
for i in xrange(L):
for j in xrange(L):
if i < j:
adj_block[N * i: N * (i + 1), N * j: (j+1) * N] = np.identity(N)
adj_block += adj_block.T
mg = mx.MultilayerGraph(list_of_layers=[v for k, v in layers.items()])
#inter_adjacency_matrix = adj_block)
mg.set_edges_weights(intra_layer_edges_weight=1)
#inter_layer_edges_weight=2)
fig = plt.figure(figsize=(16, 16))
plt.title('Twitter data of the Danish 2015 election')
stats = collections.defaultdict(list)
for k, v in best_comm.items():
stats[v].append(k)
stats2 = collections.defaultdict(dict)
for k,v in stats.items():
for e in v:
actor,_ = e.split('-')
if truth[actor] in stats2[k]:
stats2[k][truth[actor]] += 1
else:
stats2[k][truth[actor]] = 1
left = [
'Dansk Folkeparti',
'Venstre',
'Liberal Alliance',
'Det Konservative Folkeparti',
'KristenDemokraterne',
]
right = [
'Socialistisk Folkeparti',
'Radikale Venstre',
'Socialdemokratiet',
'Alternativet',
'Enhedslisten'
]
out = 'Udenfor partierne'
for k,v in stats2.items():
total = 0
for k1,v1 in v.items():
total += v1
pscore = 0
for k1,v1 in v.items():
if k1 in left:
pscore += (stats2[k][k1] * 1)
if k1 in right:
pscore -= (stats2[k][k1] * 1)
stats2[k][k1] = round(float(v1) / float(total), 2)
stats2[k]['nodes'] = filter(lambda x, i = mg, k = k: i.node[x]['label'] == k, mg.node)
stats2[k]['pscore'] = pscore / float(total)
stats2 = dict(stats2)
if len(sys.argv) > 1 and sys.argv[1] == 'heat':
cmap = plt.get_cmap('RdBu_r')
colors = [stats2[mg.node[n]['label']]['pscore'] for n in mg.nodes()]
pos = mx.get_position(mg, nx.spring_layout(layers[layers.keys()[2]],
weight ='pscore'),
layer_vertical_shift=0.2,
layer_horizontal_shift=0.0,
proj_angle=47)
for key, val in stats2.items():
set_trace()
mx.draw_networkx_nodes(mg, pos=pos,node_size=100, with_labels=False,
nodelist = val['nodes'],
label = key,
node_color = [colors[n] for n in val['']],
cmap = cmap)
else:
val_map = {
0: 'k',
1: 'r',
2: 'g',
3: 'b',
4: 'c',
5: 'm',
6: 'y',
7: '0.75',
8: 'w',
}
colors = [val_map[mg.node[n]['label']] for n in mg.nodes()]
pos = mx.get_position(mg, nx.spring_layout(layers[layers.keys()[2]]),
layer_vertical_shift=0.2,
layer_horizontal_shift=0.0,
proj_angle=47)
for k, v in stats2.items():
mx.draw_networkx_nodes(mg, pos=pos,node_size=100, with_labels=False,
nodelist = v['nodes'],
label = k,
node_color = [colors[n] for n in v['nodes']],
cmap=plt.get_cmap('Set2'))
mx.draw_networkx_edges(mg, pos=pos, edge_color = 'b')
fig.tight_layout()
plt.legend(numpoints=1, loc=1)
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == "__main__":
main()
| gpl-2.0 | 8,117,371,479,766,188,000 | 22.20614 | 88 | 0.592327 | false | 2.529159 | false | false | false |
HelloLily/hellolily | lily/notes/api/views.py | 1 | 1200 | from django_filters import CharFilter, NumberFilter
from django_filters import rest_framework as filters
from rest_framework.filters import OrderingFilter
from rest_framework.viewsets import ModelViewSet
from lily.notes.api.serializers import NoteSerializer
from lily.notes.models import Note
class NoteFilter(filters.FilterSet):
content_type = CharFilter(name='gfk_content_type__model')
object_id = NumberFilter(name='gfk_object_id')
class Meta:
model = Note
fields = ('is_pinned', )
class NoteViewSet(ModelViewSet):
"""
This viewset contains all possible ways to manipulate a Note.
"""
model = Note
queryset = Note.objects # Without .all() this filters on the tenant
serializer_class = NoteSerializer
# Set all filter backends that this viewset uses.
filter_backends = (OrderingFilter, filters.DjangoFilterBackend)
# OrderingFilter: set all possible fields to order by.
ordering_fields = ('created',)
# DjangoFilterBackend: set the possible fields to filter on.
filter_class = NoteFilter
def get_queryset(self, *args, **kwargs):
return super(NoteViewSet, self).get_queryset().filter(is_deleted=False)
| agpl-3.0 | -7,865,470,299,694,292,000 | 33.285714 | 79 | 0.729167 | false | 4.095563 | false | false | false |
ramineni/my_congress | congress/datasources/monasca_driver.py | 1 | 6504 | # Copyright (c) 2015 Cisco.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import keystoneclient.v3.client as ksclient
from monascaclient import client as monasca_client
from oslo_log import log as logging
from oslo_utils import timeutils
from congress.datasources import constants
from congress.datasources import datasource_driver
from congress.datasources import datasource_utils as ds_utils
LOG = logging.getLogger(__name__)
# TODO(thinrichs): figure out how to move even more of this boilerplate
# into DataSourceDriver. E.g. change all the classes to Driver instead of
# NeutronDriver, CeilometerDriver, etc. and move the d6instantiate function
# to DataSourceDriver.
class MonascaDriver(datasource_driver.PollingDataSourceDriver,
datasource_driver.ExecutionDriver):
METRICS = "metrics"
DIMENSIONS = "dimensions"
STATISTICS = "statistics"
DATA = "statistics.data"
# TODO(fabiog): add events and logs when fully supported in Monasca
# EVENTS = "events"
# LOGS = "logs"
value_trans = {'translation-type': 'VALUE'}
metric_translator = {
'translation-type': 'HDICT',
'table-name': METRICS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'translator': value_trans},
{'fieldname': 'name', 'translator': value_trans},
{'fieldname': 'dimensions',
'translator': {'translation-type': 'VDICT',
'table-name': DIMENSIONS,
'id-col': 'id',
'key-col': 'key', 'val-col': 'value',
'translator': value_trans}})
}
statistics_translator = {
'translation-type': 'HDICT',
'table-name': STATISTICS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'name', 'translator': value_trans},
{'fieldname': 'statistics',
'translator': {'translation-type': 'LIST',
'table-name': DATA,
'id-col': 'name',
'val-col': 'value_col',
'translator': value_trans}})
}
TRANSLATORS = [metric_translator, statistics_translator]
def __init__(self, name='', args=None):
super(MonascaDriver, self).__init__(name, args=args)
datasource_driver.ExecutionDriver.__init__(self)
self.creds = args
if not self.creds.get('project_name'):
self.creds['project_name'] = self.creds['tenant_name']
if not self.creds.get('poll_time'):
# set default polling time to 1hr
self.creds['poll_time'] = 3600
# Monasca uses Keystone V3
self.creds['auth_url'] = self.creds['auth_url'].replace("v2.0", "v3")
self.keystone = ksclient.Client(**self.creds)
self.creds['token'] = self.keystone.auth_token
if not self.creds.get('endpoint'):
# if the endpoint not defined retrieved it from keystone catalog
self.creds['endpoint'] = self.keystone.service_catalog.url_for(
service_type='monitoring', endpoint_type='publicURL')
self.monasca = monasca_client.Client('2_0', **self.creds)
self.add_executable_client_methods(self.monasca, 'monascaclient.')
self.initialize_update_methods()
self._init_end_start_poll()
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'monasca'
result['description'] = ('Datasource driver that interfaces with '
'monasca.')
result['config'] = ds_utils.get_openstack_required_config()
result['config']['lazy_tables'] = constants.OPTIONAL
result['secret'] = ['password']
return result
def initialize_update_methods(self):
metrics_method = lambda: self._translate_metric(
self.monasca.metrics.list())
self.add_update_method(metrics_method, self.metric_translator)
statistics_method = self.update_statistics
self.add_update_method(statistics_method, self.statistics_translator)
def update_statistics(self):
today = datetime.datetime.now()
yesterday = datetime.timedelta(hours=24)
start_from = timeutils.isotime(today-yesterday)
for metric in self.monasca.metrics.list_names():
LOG.debug("Monasca statistics for metric %s", metric['name'])
_query_args = dict(
start_time=start_from,
name=metric['name'],
statistics='avg',
period=int(self.creds['poll_time']),
merge_metrics='true')
statistics = self.monasca.metrics.list_statistics(
**_query_args)
self._translate_statistics(statistics)
@ds_utils.update_state_on_changed(METRICS)
def _translate_metric(self, obj):
"""Translate the metrics represented by OBJ into tables."""
LOG.debug("METRIC: %s", str(obj))
row_data = MonascaDriver.convert_objs(obj,
self.metric_translator)
return row_data
@ds_utils.update_state_on_changed(STATISTICS)
def _translate_statistics(self, obj):
"""Translate the metrics represented by OBJ into tables."""
LOG.debug("STATISTICS: %s", str(obj))
row_data = MonascaDriver.convert_objs(obj,
self.statistics_translator)
return row_data
def execute(self, action, action_args):
"""Overwrite ExecutionDriver.execute()."""
# action can be written as a method or an API call.
func = getattr(self, action, None)
if func and self.is_executable(func):
func(action_args)
else:
self._execute_api(self.monasca, action, action_args)
| apache-2.0 | 6,159,058,696,092,591,000 | 38.180723 | 78 | 0.602399 | false | 4.140038 | false | false | false |
Rdbaker/Rank | rank/user/models.py | 2 | 3277 | # -*- coding: utf-8 -*-
import datetime as dt
from flask_login import UserMixin
from sqlalchemy.orm import relationship, backref
from rank.api.models import UserRequest
from rank.core.models import (
DB as db,
SurrogatePK,
ReferenceCol,
CRUDMixin
)
from rank.extensions import bcrypt
class Role(SurrogatePK, db.Model):
__tablename__ = 'roles'
name = db.Column(db.String(80), nullable=False)
user_id = ReferenceCol('users', nullable=True)
user = db.relationship('User', backref='roles')
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, CRUDMixin, SurrogatePK, db.Model):
__tablename__ = 'users'
username = db.Column(db.String(80), unique=True, nullable=False)
#: The hashed password
password = db.Column(db.String(128), nullable=True)
created_at = db.Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
active = db.Column(db.Boolean(), default=False)
is_admin = db.Column(db.Boolean(), default=False, nullable=False)
game_id = db.Column(db.Integer, db.ForeignKey("games.id"))
game = relationship("Game", backref=backref("user", uselist=False), cascade="delete")
def __init__(self, username, password=None, **kwargs):
db.Model.__init__(self, username=username, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
return bcrypt.check_password_hash(self.password, value)
def requests_today(self):
midnight = dt.datetime.combine(dt.date.today(), dt.time())
if not self.is_admin:
return UserRequest.query.filter(
UserRequest.game_id == self.game.id,
UserRequest.time_requested > midnight).order_by(UserRequest.time_requested.desc())
else:
return UserRequest.query.filter(
UserRequest.time_requested > midnight).order_by(UserRequest.time_requested.desc())
def requests_this_week(self):
midnight = dt.datetime.combine(dt.date.today(), dt.time())
seven_days_ago = midnight - dt.timedelta(days=7)
if not self.is_admin:
return UserRequest.query.filter(
UserRequest.game_id == self.game.id,
UserRequest.time_requested > seven_days_ago).order_by(UserRequest.time_requested.desc())
else:
return UserRequest.query.filter(
UserRequest.time_requested > seven_days_ago).order_by(UserRequest.time_requested.desc())
def request_count_today(self):
midnight = dt.datetime.combine(dt.date.today(), dt.time())
if not self.is_admin:
return UserRequest.query.filter(
UserRequest.game_id == self.game.id,
UserRequest.time_requested > midnight).count()
else:
return UserRequest.query.filter(
UserRequest.time_requested > midnight).count()
def __repr__(self):
return '<User({username!r})>'.format(username=self.username)
| mit | 9,021,309,524,885,959,000 | 37.104651 | 104 | 0.638694 | false | 3.753723 | false | false | false |
hassaanm/business-articles | nytimesScraper/nytimesScraper/spiders/nytimes_spider.py | 1 | 2778 | from scrapy.spider import BaseSpider
from scrapy.exceptions import DropItem
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from nytimesScraper.items import Article
import re
import json
class NYTSpider(CrawlSpider):
name = 'nytimes'
allowed_domains = ['nytimes.com']
f = open('All_URLs.json')
company = json.load(f)
f.close()
start_urls = company.keys()
rules = [Rule(SgmlLinkExtractor(allow=r'pagewanted=\d+',tags='//a[@class="next"]'), 'parse_link')]
def parse_link(self, response):
x = HtmlXPathSelector(response)
article = Article()
article['url'] = response.url
article['title'] = x.select('//title/text()').extract()
article['company'] = NYTSpider.company[self.baseURL(response.url)] if self.baseURL(response.url) in NYTSpider.company else ""
article['text'] = self.extractText(x.select('//div[@class="articleBody"]//text()').extract()) \
+ self.extractText(x.select('//div[@id="articleBody"]//text()').extract()) \
+ self.extractText(x.select('string(//div[@class="entry-content"])').extract())
article['date'] = self.extractDate(x.select('//meta[@name="pdate"]').extract())
if len(article['company']) == 0 or len(article['text']) == 0:
raise DropItem('Missing company and/or text: %s' % article)
return article
def parse_start_url(self, response):
return self.parse_link(response)
def baseURL(self, url):
url = re.sub('\?pagewanted=\d+', '', url)
url = re.sub('\?_r=\d', '', url)
url = re.sub('&pagewanted=\d+', '', url)
url = re.sub('&_r=\d', '', url)
url = re.sub('pagewanted=\d+', '', url)
url = re.sub('_r=\d', '', url)
return url
def extractText(self, body):
texts = []
for text in body:
'''cleanText = text
while '<' in cleanText:
openTag = cleanText.find('<')
closeTag = cleanText.find('>')
cleanText = cleanText[:openTag] + cleanText[closeTag+1:]
cleanText = cleanText.strip()
if len(cleanText) > 0:
texts.append(cleanText)'''
if len(text.strip()) > 100:
texts.append(text.strip())
return ' '.join(texts)
def extractDate(self, dateTags):
for dateTag in dateTags:
if 'content=' in dateTag:
spot = dateTag.find('content=') + 9
date = dateTag[spot:spot+8]
date = date[:4] + '-' + date[4:6] + '-' + date[6:]
return date
return '2013-01-01'
| apache-2.0 | -4,896,144,330,404,601,000 | 36.540541 | 133 | 0.568754 | false | 3.708945 | false | false | false |
djaodjin/djaodjin-signup | signup/decorators.py | 1 | 10574 | # Copyright (c) 2021, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Decorators that check a User a verified email address.
"""
from __future__ import unicode_literals
from functools import wraps
from django.contrib import messages
from django.contrib.auth import (REDIRECT_FIELD_NAME, logout as auth_logout)
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext_lazy as _
from . import settings, signals
from .auth import validate_redirect
from .compat import available_attrs, is_authenticated, reverse, six
from .models import Contact
from .utils import has_invalid_password, get_accept_list
def _insert_url(request, redirect_field_name=REDIRECT_FIELD_NAME,
inserted_url=None):
'''Redirects to the *inserted_url* before going to the orginal
request path.'''
# This code is pretty much straightforward
# from contrib.auth.user_passes_test
path = request.build_absolute_uri()
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = six.moves.urllib.parse.urlparse(
inserted_url)[:2]
current_scheme, current_netloc = six.moves.urllib.parse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
return redirect_to_login(path, inserted_url, redirect_field_name)
def redirect_or_denied(request, inserted_url,
redirect_field_name=REDIRECT_FIELD_NAME, descr=None):
http_accepts = get_accept_list(request)
if ('text/html' in http_accepts
and isinstance(inserted_url, six.string_types)):
return _insert_url(request, redirect_field_name=redirect_field_name,
inserted_url=inserted_url)
if descr is None:
descr = ""
raise PermissionDenied(descr)
def send_verification_email(contact, request,
next_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Send an email to the user to verify her email address.
The email embed a link to a verification url and a redirect to the page
the verification email was sent from so that the user stays on her
workflow once verification is completed.
"""
back_url = request.build_absolute_uri(reverse('registration_activate',
args=(contact.email_verification_key,)))
if next_url:
back_url += '?%s=%s' % (redirect_field_name, next_url)
signals.user_verification.send(
sender=__name__, user=contact.user, request=request,
back_url=back_url, expiration_days=settings.KEY_EXPIRATION)
def send_verification_phone(contact, request,
next_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Send a text message to the user to verify her phone number.
The email embed a link to a verification url and a redirect to the page
the verification email was sent from so that the user stays on her
workflow once verification is completed.
"""
# XXX needs to send phone text message instead of e-mail!!!
back_url = request.build_absolute_uri(reverse('registration_activate',
args=(contact.email_verification_key,)))
if next_url:
back_url += '?%s=%s' % (redirect_field_name, next_url)
signals.user_verification.send(
sender=__name__, user=contact.user, request=request,
back_url=back_url, expiration_days=settings.KEY_EXPIRATION)
# The user we are looking to activate might be different from
# the request.user (which can be Anonymous)
def check_has_credentials(request, user,
redirect_field_name=REDIRECT_FIELD_NAME,
next_url=None):
"""
Checks that a *user* has set login credentials (i.e. password).
"""
if has_invalid_password(user):
# Let's send e-mail again.
#pylint:disable=unused-variable
contact, created = Contact.objects.prepare_email_verification(
user, user.email)
if not next_url:
next_url = validate_redirect(request)
send_verification_email(
contact, request, next_url=next_url,
redirect_field_name=redirect_field_name)
return False
return True
def check_email_verified(request, user,
redirect_field_name=REDIRECT_FIELD_NAME,
next_url=None):
"""
Checks that a *user*'s e-mail has been verified.
"""
#pylint:disable=unused-variable
if Contact.objects.is_reachable_by_email(user):
return True
contact, created = Contact.objects.prepare_email_verification(
user, user.email)
# Let's send e-mail again.
if not next_url:
next_url = validate_redirect(request)
send_verification_email(
contact, request, next_url=next_url,
redirect_field_name=redirect_field_name)
return False
def check_phone_verified(request, user,
redirect_field_name=REDIRECT_FIELD_NAME,
next_url=None):
"""
Checks that a *user*'s e-mail has been verified.
"""
#pylint:disable=unused-variable
if Contact.objects.is_reachable_by_phone(user):
return True
contact, created = Contact.objects.prepare_phone_verification(
user, user.phone) # XXX
# Let's send e-mail again.
if not next_url:
next_url = validate_redirect(request)
send_verification_phone(
contact, request, next_url=next_url,
redirect_field_name=redirect_field_name)
return False
def fail_authenticated(request):
"""
Authenticated
"""
if not is_authenticated(request):
return str(settings.LOGIN_URL)
return False
def fail_registered(request):
"""
Registered
"""
if not is_authenticated(request):
return str(reverse('registration_register'))
return False
def fail_active(request):
"""
Active with valid credentials
"""
if not check_has_credentials(request, request.user):
return str(settings.LOGIN_URL)
return False
def fail_verified_email(request):
"""
Active with a verified e-mail address
"""
if not check_email_verified(request, request.user):
return str(settings.LOGIN_URL)
return False
def fail_verified_phone(request):
"""
Active with a verified phone number
"""
if not check_phone_verified(request, request.user):
return str(settings.LOGIN_URL)
return False
def active_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=None):
"""
Decorator for views that checks that the user is active. We won't
activate the account of a user until we checked the email address
is valid.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
redirect_url = login_url or str(settings.LOGIN_URL)
if is_authenticated(request):
redirect_url = fail_active(request)
if not redirect_url:
return view_func(request, *args, **kwargs)
# User is logged in but her email has not been verified yet.
http_accepts = get_accept_list(request)
if 'text/html' in http_accepts:
messages.info(request, _(
"You should now secure and activate your account following the instructions"\
" we just emailed you. Thank you."))
auth_logout(request)
return redirect_or_denied(request, redirect_url,
redirect_field_name=redirect_field_name)
return _wrapped_view
if function:
return decorator(function)
return decorator
def verified_email_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=None):
"""
Decorator for views that checks that the user has a verified e-mail address.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
redirect_url = login_url or str(settings.LOGIN_URL)
if is_authenticated(request):
redirect_url = fail_verified_email(request)
if not redirect_url:
return view_func(request, *args, **kwargs)
# User is logged in but her email has not been verified yet.
http_accepts = get_accept_list(request)
if 'text/html' in http_accepts:
messages.info(request, _(
"You should now secure and activate your account following the instructions"\
" we just emailed you. Thank you."))
auth_logout(request)
return redirect_or_denied(request, redirect_url,
redirect_field_name=redirect_field_name)
return _wrapped_view
if function:
return decorator(function)
return decorator
| bsd-2-clause | 1,229,249,765,050,495,000 | 36.496454 | 80 | 0.657178 | false | 4.232986 | false | false | false |
starshipfactory/sfblog | events/models.py | 1 | 1165 | from django.db import models
class Location(models.Model):
name = models.CharField(max_length=128)
page = models.ForeignKey("simplecms.Page", blank=True, null=True)
def __unicode__(self):
return self.name
class EventDescription(models.Model):
cost = models.TextField(default="", blank=True)
page = models.ForeignKey(
"simplecms.Page", blank=True, null=True,
limit_choices_to={"parent__isnull": False,
'locale': 'de'})
post = models.ForeignKey("zinnia.Entry", blank=True, null=True)
location = models.ForeignKey(Location)
instructor = models.CharField(max_length=128)
event = models.OneToOneField("schedule.Event")
def __unicode__(self):
return self.event.title
def get_absolute_url(self):
try:
return self.post.get_absolute_url()
except:
pass
try:
return self.page.get_absolute_url()
except:
pass
"""
list(rrule(dateutil.rrule.MONTHLY, count=5,
byweekday=dateutil.rrule.FR,
dtstart=datetime.date(2015,4,3),
bysetpos=1))
=>
byweekday:4;bysetpos:1
"""
| bsd-3-clause | 3,132,088,787,468,325,400 | 25.477273 | 69 | 0.614592 | false | 3.698413 | false | false | false |
dhouck/effulgence2epub | src/new_toc.py | 1 | 1132 | #!/usr/bin/python
"""Takes the TOC, this time the raw HTML, and produces an ebook xhtml TOC with
rewritten local links.
We're producing this directly from the html so that we can keep the extra
multi-level chapter structure without parsing the entire thing into some
hierarchical tree.
"""
from bs4 import BeautifulSoup
import common
from string import Template
import pkg_resources
toc_template = Template(
pkg_resources.resource_string(__name__, "toc_template.xhtml"))
if __name__== "__main__":
soup = BeautifulSoup(open(
"web_cache/edgeofyourseat.dreamwidth.org/2121.html"))
the_toc_html = soup.select(".entry-content")[0]
# Remove the "how to read" link.
the_toc_html.find_all("center")[0].extract()
# As for the others, parse them & replace them with the appropriate internal
# links.
common.replace_links_with_internal(the_toc_html)
toc_string = the_toc_html.decode_contents(formatter="html")
toc_html_string = toc_template.substitute(toc_entries=toc_string)
with open("global_lists/toc.xhtml", mode="w") as f:
f.write(toc_html_string.encode('utf-8'))
| agpl-3.0 | 1,078,908,016,396,541,400 | 27.3 | 80 | 0.706714 | false | 3.5375 | false | false | false |
Neuvoo/legacy-portage | pym/_emerge/MetadataRegen.py | 1 | 4363 | # Copyright 1999-2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
from portage import os
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.PollScheduler import PollScheduler
class MetadataRegen(PollScheduler):
def __init__(self, portdb, cp_iter=None, consumer=None,
max_jobs=None, max_load=None):
PollScheduler.__init__(self)
self._portdb = portdb
self._global_cleanse = False
if cp_iter is None:
cp_iter = self._iter_every_cp()
# We can globally cleanse stale cache only if we
# iterate over every single cp.
self._global_cleanse = True
self._cp_iter = cp_iter
self._consumer = consumer
if max_jobs is None:
max_jobs = 1
self._max_jobs = max_jobs
self._max_load = max_load
self._valid_pkgs = set()
self._cp_set = set()
self._process_iter = self._iter_metadata_processes()
self.returncode = os.EX_OK
self._error_count = 0
def _iter_every_cp(self):
every_cp = self._portdb.cp_all()
every_cp.sort(reverse=True)
try:
while True:
yield every_cp.pop()
except IndexError:
pass
def _iter_metadata_processes(self):
portdb = self._portdb
valid_pkgs = self._valid_pkgs
cp_set = self._cp_set
consumer = self._consumer
for cp in self._cp_iter:
cp_set.add(cp)
portage.writemsg_stdout("Processing %s\n" % cp)
cpv_list = portdb.cp_list(cp)
for cpv in cpv_list:
valid_pkgs.add(cpv)
ebuild_path, repo_path = portdb.findname2(cpv)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % cpv)
metadata, st, emtime = portdb._pull_valid_cache(
cpv, ebuild_path, repo_path)
if metadata is not None:
if consumer is not None:
consumer(cpv, ebuild_path,
repo_path, metadata)
continue
yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
ebuild_mtime=emtime,
metadata_callback=portdb._metadata_callback,
portdb=portdb, repo_path=repo_path,
settings=portdb.doebuild_settings)
def run(self):
portdb = self._portdb
from portage.cache.cache_errors import CacheError
dead_nodes = {}
while self._schedule():
self._poll_loop()
while self._jobs:
self._poll_loop()
if self._global_cleanse:
for mytree in portdb.porttrees:
try:
dead_nodes[mytree] = set(portdb.auxdb[mytree])
except CacheError as e:
portage.writemsg("Error listing cache entries for " + \
"'%s': %s, continuing...\n" % (mytree, e),
noiselevel=-1)
del e
dead_nodes = None
break
else:
cp_set = self._cp_set
cpv_getkey = portage.cpv_getkey
for mytree in portdb.porttrees:
try:
dead_nodes[mytree] = set(cpv for cpv in \
portdb.auxdb[mytree] \
if cpv_getkey(cpv) in cp_set)
except CacheError as e:
portage.writemsg("Error listing cache entries for " + \
"'%s': %s, continuing...\n" % (mytree, e),
noiselevel=-1)
del e
dead_nodes = None
break
if dead_nodes:
for y in self._valid_pkgs:
for mytree in portdb.porttrees:
if portdb.findname2(y, mytree=mytree)[0]:
dead_nodes[mytree].discard(y)
for mytree, nodes in dead_nodes.items():
auxdb = portdb.auxdb[mytree]
for y in nodes:
try:
del auxdb[y]
except (KeyError, CacheError):
pass
def _schedule_tasks(self):
"""
@rtype: bool
@returns: True if there may be remaining tasks to schedule,
False otherwise.
"""
while self._can_add_job():
try:
metadata_process = next(self._process_iter)
except StopIteration:
return False
self._jobs += 1
metadata_process.scheduler = self.sched_iface
metadata_process.addExitListener(self._metadata_exit)
metadata_process.start()
return True
def _metadata_exit(self, metadata_process):
self._jobs -= 1
if metadata_process.returncode != os.EX_OK:
self.returncode = 1
self._error_count += 1
self._valid_pkgs.discard(metadata_process.cpv)
portage.writemsg("Error processing %s, continuing...\n" % \
(metadata_process.cpv,), noiselevel=-1)
if self._consumer is not None:
# On failure, still notify the consumer (in this case the metadata
# argument is None).
self._consumer(metadata_process.cpv,
metadata_process.ebuild_path,
metadata_process.repo_path,
metadata_process.metadata)
self._schedule()
| gpl-2.0 | -7,811,879,227,615,180,000 | 25.766871 | 69 | 0.667201 | false | 2.978157 | false | false | false |
sanjeevtripurari/hue | desktop/core/ext-py/thrift-0.9.1/src/server/TProcessPoolServer.py | 94 | 3966 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
from multiprocessing import Process, Value, Condition, reduction
from TServer import TServer
from thrift.transport.TTransport import TTransportException
class TProcessPoolServer(TServer):
"""Server with a fixed size pool of worker subprocesses to service requests
Note that if you need shared state between the handlers - it's up to you!
Written by Dvir Volk, doat.com
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.numWorkers = 10
self.workers = []
self.isRunning = Value('b', False)
self.stopCondition = Condition()
self.postForkCallback = None
def setPostForkCallback(self, callback):
if not callable(callback):
raise TypeError("This is not a callback!")
self.postForkCallback = callback
def setNumWorkers(self, num):
"""Set the number of worker threads that should be created"""
self.numWorkers = num
def workerProcess(self):
"""Loop getting clients from the shared queue and process them"""
if self.postForkCallback:
self.postForkCallback()
while self.isRunning.value:
try:
client = self.serverTransport.accept()
self.serveClient(client)
except (KeyboardInterrupt, SystemExit):
return 0
except Exception, x:
logging.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start workers and put into queue"""
# this is a shared state that can tell the workers to exit when False
self.isRunning.value = True
# first bind and listen to the port
self.serverTransport.listen()
# fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except Exception, x:
logging.exception(x)
# wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except Exception, x:
logging.exception(x)
self.isRunning.value = False
def stop(self):
self.isRunning.value = False
self.stopCondition.acquire()
self.stopCondition.notify()
self.stopCondition.release()
| apache-2.0 | -7,950,177,908,395,596,000 | 32.610169 | 79 | 0.634644 | false | 4.600928 | false | false | false |
shamangeorge/beets | beets/dbcore/db.py | 4 | 35898 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The central Model and Database constructs for DBCore.
"""
from __future__ import division, absolute_import, print_function
import time
import os
import re
from collections import defaultdict
import threading
import sqlite3
import contextlib
import beets
from beets.util import functemplate
from beets.util import py3_path
from beets.dbcore import types
from .query import MatchQuery, NullSort, TrueQuery
import six
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping
class DBAccessError(Exception):
"""The SQLite database became inaccessible.
This can happen when trying to read or write the database when, for
example, the database file is deleted or otherwise disappears. There
is probably no way to recover from this error.
"""
class FormattedMapping(Mapping):
"""A `dict`-like formatted view of a model.
The accessor `mapping[key]` returns the formatted version of
`model[key]` as a unicode string.
If `for_path` is true, all path separators in the formatted values
are replaced.
"""
def __init__(self, model, for_path=False):
self.for_path = for_path
self.model = model
self.model_keys = model.keys(True)
def __getitem__(self, key):
if key in self.model_keys:
return self._get_formatted(self.model, key)
else:
raise KeyError(key)
def __iter__(self):
return iter(self.model_keys)
def __len__(self):
return len(self.model_keys)
def get(self, key, default=None):
if default is None:
default = self.model._type(key).format(None)
return super(FormattedMapping, self).get(key, default)
def _get_formatted(self, model, key):
value = model._type(key).format(model.get(key))
if isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
if self.for_path:
sep_repl = beets.config['path_sep_replace'].as_str()
sep_drive = beets.config['drive_sep_replace'].as_str()
if re.match(r'^\w:', value):
value = re.sub(r'(?<=^\w):', sep_drive, value)
for sep in (os.path.sep, os.path.altsep):
if sep:
value = value.replace(sep, sep_repl)
return value
class LazyConvertDict(object):
"""Lazily convert types for attributes fetched from the database
"""
def __init__(self, model_cls):
"""Initialize the object empty
"""
self.data = {}
self.model_cls = model_cls
self._converted = {}
def init(self, data):
"""Set the base data that should be lazily converted
"""
self.data = data
def _convert(self, key, value):
"""Convert the attribute type according the the SQL type
"""
return self.model_cls._type(key).from_sql(value)
def __setitem__(self, key, value):
"""Set an attribute value, assume it's already converted
"""
self._converted[key] = value
def __getitem__(self, key):
"""Get an attribute value, converting the type on demand
if needed
"""
if key in self._converted:
return self._converted[key]
elif key in self.data:
value = self._convert(key, self.data[key])
self._converted[key] = value
return value
def __delitem__(self, key):
"""Delete both converted and base data
"""
if key in self._converted:
del self._converted[key]
if key in self.data:
del self.data[key]
def keys(self):
"""Get a list of available field names for this object.
"""
return list(self._converted.keys()) + list(self.data.keys())
def copy(self):
"""Create a copy of the object.
"""
new = self.__class__(self.model_cls)
new.data = self.data.copy()
new._converted = self._converted.copy()
return new
# Act like a dictionary.
def update(self, values):
"""Assign all values in the given dict.
"""
for key, value in values.items():
self[key] = value
def items(self):
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def get(self, key, default=None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key):
"""Determine whether `key` is an attribute on this object.
"""
return key in self.keys()
def __iter__(self):
"""Iterate over the available field names (excluding computed
fields).
"""
return iter(self.keys())
# Abstract base for model classes.
class Model(object):
"""An abstract object representing an object in the database. Model
objects act like dictionaries (i.e., they allow subscript access like
``obj['field']``). The same field set is available via attribute
access as a shortcut (i.e., ``obj.field``). Three kinds of attributes are
available:
* **Fixed attributes** come from a predetermined list of field
names. These fields correspond to SQLite table columns and are
thus fast to read, write, and query.
* **Flexible attributes** are free-form and do not need to be listed
ahead of time.
* **Computed attributes** are read-only fields computed by a getter
function provided by a plugin.
Access to all three field types is uniform: ``obj.field`` works the
same regardless of whether ``field`` is fixed, flexible, or
computed.
Model objects can optionally be associated with a `Library` object,
in which case they can be loaded and stored from the database. Dirty
flags are used to track which fields need to be stored.
"""
# Abstract components (to be provided by subclasses).
_table = None
"""The main SQLite table name.
"""
_flex_table = None
"""The flex field SQLite table name.
"""
_fields = {}
"""A mapping indicating available "fixed" fields on this type. The
keys are field names and the values are `Type` objects.
"""
_search_fields = ()
"""The fields that should be queried by default by unqualified query
terms.
"""
_types = {}
"""Optional Types for non-fixed (i.e., flexible and computed) fields.
"""
_sorts = {}
"""Optional named sort criteria. The keys are strings and the values
are subclasses of `Sort`.
"""
_queries = {}
"""Named queries that use a field-like `name:value` syntax but which
do not relate to any specific field.
"""
_always_dirty = False
"""By default, fields only become "dirty" when their value actually
changes. Enabling this flag marks fields as dirty even when the new
value is the same as the old value (e.g., `o.f = o.f`).
"""
@classmethod
def _getters(cls):
"""Return a mapping from field names to getter functions.
"""
# We could cache this if it becomes a performance problem to
# gather the getter mapping every time.
raise NotImplementedError()
def _template_funcs(self):
"""Return a mapping from function names to text-transformer
functions.
"""
# As above: we could consider caching this result.
raise NotImplementedError()
# Basic operation.
def __init__(self, db=None, **values):
"""Create a new object with an optional Database association and
initial field values.
"""
self._db = db
self._dirty = set()
self._values_fixed = LazyConvertDict(self)
self._values_flex = LazyConvertDict(self)
# Initial contents.
self.update(values)
self.clear_dirty()
@classmethod
def _awaken(cls, db=None, fixed_values={}, flex_values={}):
"""Create an object with values drawn from the database.
This is a performance optimization: the checks involved with
ordinary construction are bypassed.
"""
obj = cls(db)
obj._values_fixed.init(fixed_values)
obj._values_flex.init(flex_values)
return obj
def __repr__(self):
return '{0}({1})'.format(
type(self).__name__,
', '.join('{0}={1!r}'.format(k, v) for k, v in dict(self).items()),
)
def clear_dirty(self):
"""Mark all fields as *clean* (i.e., not needing to be stored to
the database).
"""
self._dirty = set()
def _check_db(self, need_id=True):
"""Ensure that this object is associated with a database row: it
has a reference to a database (`_db`) and an id. A ValueError
exception is raised otherwise.
"""
if not self._db:
raise ValueError(
u'{0} has no database'.format(type(self).__name__)
)
if need_id and not self.id:
raise ValueError(u'{0} has no id'.format(type(self).__name__))
def copy(self):
"""Create a copy of the model object.
The field values and other state is duplicated, but the new copy
remains associated with the same database as the old object.
(A simple `copy.deepcopy` will not work because it would try to
duplicate the SQLite connection.)
"""
new = self.__class__()
new._db = self._db
new._values_fixed = self._values_fixed.copy()
new._values_flex = self._values_flex.copy()
new._dirty = self._dirty.copy()
return new
# Essential field accessors.
@classmethod
def _type(cls, key):
"""Get the type of a field, a `Type` instance.
If the field has no explicit type, it is given the base `Type`,
which does no conversion.
"""
return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT
def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is
not available.
"""
getters = self._getters()
if key in getters: # Computed.
return getters[key](self)
elif key in self._fields: # Fixed.
if key in self._values_fixed:
return self._values_fixed[key]
else:
return self._type(key).null
elif key in self._values_flex: # Flexible.
return self._values_flex[key]
else:
raise KeyError(key)
def _setitem(self, key, value):
"""Assign the value for a field, return whether new and old value
differ.
"""
# Choose where to place the value.
if key in self._fields:
source = self._values_fixed
else:
source = self._values_flex
# If the field has a type, filter the value.
value = self._type(key).normalize(value)
# Assign value and possibly mark as dirty.
old_value = source.get(key)
source[key] = value
changed = old_value != value
if self._always_dirty or changed:
self._dirty.add(key)
return changed
def __setitem__(self, key, value):
"""Assign the value for a field.
"""
self._setitem(key, value)
def __delitem__(self, key):
"""Remove a flexible attribute from the model.
"""
if key in self._values_flex: # Flexible.
del self._values_flex[key]
self._dirty.add(key) # Mark for dropping on store.
elif key in self._fields: # Fixed
setattr(self, key, self._type(key).null)
elif key in self._getters(): # Computed.
raise KeyError(u'computed field {0} cannot be deleted'.format(key))
else:
raise KeyError(u'no such field {0}'.format(key))
def keys(self, computed=False):
"""Get a list of available field names for this object. The
`computed` parameter controls whether computed (plugin-provided)
fields are included in the key list.
"""
base_keys = list(self._fields) + list(self._values_flex.keys())
if computed:
return base_keys + list(self._getters().keys())
else:
return base_keys
@classmethod
def all_keys(cls):
"""Get a list of available keys for objects of this type.
Includes fixed and computed fields.
"""
return list(cls._fields) + list(cls._getters().keys())
# Act like a dictionary.
def update(self, values):
"""Assign all values in the given dict.
"""
for key, value in values.items():
self[key] = value
def items(self):
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def get(self, key, default=None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key):
"""Determine whether `key` is an attribute on this object.
"""
return key in self.keys(True)
def __iter__(self):
"""Iterate over the available field names (excluding computed
fields).
"""
return iter(self.keys())
# Convenient attribute access.
def __getattr__(self, key):
if key.startswith('_'):
raise AttributeError(u'model has no attribute {0!r}'.format(key))
else:
try:
return self[key]
except KeyError:
raise AttributeError(u'no such field {0!r}'.format(key))
def __setattr__(self, key, value):
if key.startswith('_'):
super(Model, self).__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, key):
if key.startswith('_'):
super(Model, self).__delattr__(key)
else:
del self[key]
# Database interaction (CRUD methods).
def store(self, fields=None):
"""Save the object's metadata into the library database.
:param fields: the fields to be stored. If not specified, all fields
will be.
"""
if fields is None:
fields = self._fields
self._check_db()
# Build assignments for query.
assignments = []
subvars = []
for key in fields:
if key != 'id' and key in self._dirty:
self._dirty.remove(key)
assignments.append(key + '=?')
value = self._type(key).to_sql(self[key])
subvars.append(value)
assignments = ','.join(assignments)
with self._db.transaction() as tx:
# Main table update.
if assignments:
query = 'UPDATE {0} SET {1} WHERE id=?'.format(
self._table, assignments
)
subvars.append(self.id)
tx.mutate(query, subvars)
# Modified/added flexible attributes.
for key, value in self._values_flex.items():
if key in self._dirty:
self._dirty.remove(key)
tx.mutate(
'INSERT INTO {0} '
'(entity_id, key, value) '
'VALUES (?, ?, ?);'.format(self._flex_table),
(self.id, key, value),
)
# Deleted flexible attributes.
for key in self._dirty:
tx.mutate(
'DELETE FROM {0} '
'WHERE entity_id=? AND key=?'.format(self._flex_table),
(self.id, key)
)
self.clear_dirty()
def load(self):
"""Refresh the object's metadata from the library database.
"""
self._check_db()
stored_obj = self._db._get(type(self), self.id)
assert stored_obj is not None, u"object {0} not in DB".format(self.id)
self._values_fixed = LazyConvertDict(self)
self._values_flex = LazyConvertDict(self)
self.update(dict(stored_obj))
self.clear_dirty()
def remove(self):
"""Remove the object's associated rows from the database.
"""
self._check_db()
with self._db.transaction() as tx:
tx.mutate(
'DELETE FROM {0} WHERE id=?'.format(self._table),
(self.id,)
)
tx.mutate(
'DELETE FROM {0} WHERE entity_id=?'.format(self._flex_table),
(self.id,)
)
def add(self, db=None):
"""Add the object to the library database. This object must be
associated with a database; you can provide one via the `db`
parameter or use the currently associated database.
The object's `id` and `added` fields are set along with any
current field values.
"""
if db:
self._db = db
self._check_db(False)
with self._db.transaction() as tx:
new_id = tx.mutate(
'INSERT INTO {0} DEFAULT VALUES'.format(self._table)
)
self.id = new_id
self.added = time.time()
# Mark every non-null field as dirty and store.
for key in self:
if self[key] is not None:
self._dirty.add(key)
self.store()
# Formatting and templating.
_formatter = FormattedMapping
def formatted(self, for_path=False):
"""Get a mapping containing all values on this object formatted
as human-readable unicode strings.
"""
return self._formatter(self, for_path)
def evaluate_template(self, template, for_path=False):
"""Evaluate a template (a string or a `Template` object) using
the object's fields. If `for_path` is true, then no new path
separators will be added to the template.
"""
# Perform substitution.
if isinstance(template, six.string_types):
template = functemplate.template(template)
return template.substitute(self.formatted(for_path),
self._template_funcs())
# Parsing.
@classmethod
def _parse(cls, key, string):
"""Parse a string as a value for the given key.
"""
if not isinstance(string, six.string_types):
raise TypeError(u"_parse() argument must be a string")
return cls._type(key).parse(string)
def set_parse(self, key, string):
"""Set the object's key to a value represented by a string.
"""
self[key] = self._parse(key, string)
# Database controller and supporting interfaces.
class Results(object):
"""An item query result set. Iterating over the collection lazily
constructs LibModel objects that reflect database rows.
"""
def __init__(self, model_class, rows, db, flex_rows,
query=None, sort=None):
"""Create a result set that will construct objects of type
`model_class`.
`model_class` is a subclass of `LibModel` that will be
constructed. `rows` is a query result: a list of mappings. The
new objects will be associated with the database `db`.
If `query` is provided, it is used as a predicate to filter the
results for a "slow query" that cannot be evaluated by the
database directly. If `sort` is provided, it is used to sort the
full list of results before returning. This means it is a "slow
sort" and all objects must be built before returning the first
one.
"""
self.model_class = model_class
self.rows = rows
self.db = db
self.query = query
self.sort = sort
self.flex_rows = flex_rows
# We keep a queue of rows we haven't yet consumed for
# materialization. We preserve the original total number of
# rows.
self._rows = rows
self._row_count = len(rows)
# The materialized objects corresponding to rows that have been
# consumed.
self._objects = []
def _get_objects(self):
"""Construct and generate Model objects for they query. The
objects are returned in the order emitted from the database; no
slow sort is applied.
For performance, this generator caches materialized objects to
avoid constructing them more than once. This way, iterating over
a `Results` object a second time should be much faster than the
first.
"""
# Index flexible attributes by the item ID, so we have easier access
flex_attrs = self._get_indexed_flex_attrs()
index = 0 # Position in the materialized objects.
while index < len(self._objects) or self._rows:
# Are there previously-materialized objects to produce?
if index < len(self._objects):
yield self._objects[index]
index += 1
# Otherwise, we consume another row, materialize its object
# and produce it.
else:
while self._rows:
row = self._rows.pop(0)
obj = self._make_model(row, flex_attrs.get(row['id'], {}))
# If there is a slow-query predicate, ensurer that the
# object passes it.
if not self.query or self.query.match(obj):
self._objects.append(obj)
index += 1
yield obj
break
def __iter__(self):
"""Construct and generate Model objects for all matching
objects, in sorted order.
"""
if self.sort:
# Slow sort. Must build the full list first.
objects = self.sort.sort(list(self._get_objects()))
return iter(objects)
else:
# Objects are pre-sorted (i.e., by the database).
return self._get_objects()
def _get_indexed_flex_attrs(self):
""" Index flexible attributes by the entity id they belong to
"""
flex_values = dict()
for row in self.flex_rows:
if row['entity_id'] not in flex_values:
flex_values[row['entity_id']] = dict()
flex_values[row['entity_id']][row['key']] = row['value']
return flex_values
def _make_model(self, row, flex_values={}):
""" Create a Model object for the given row
"""
cols = dict(row)
values = dict((k, v) for (k, v) in cols.items()
if not k[:4] == 'flex')
# Construct the Python object
obj = self.model_class._awaken(self.db, values, flex_values)
return obj
def __len__(self):
"""Get the number of matching objects.
"""
if not self._rows:
# Fully materialized. Just count the objects.
return len(self._objects)
elif self.query:
# A slow query. Fall back to testing every object.
count = 0
for obj in self:
count += 1
return count
else:
# A fast query. Just count the rows.
return self._row_count
def __nonzero__(self):
"""Does this result contain any objects?
"""
return self.__bool__()
def __bool__(self):
"""Does this result contain any objects?
"""
return bool(len(self))
def __getitem__(self, n):
"""Get the nth item in this result set. This is inefficient: all
items up to n are materialized and thrown away.
"""
if not self._rows and not self.sort:
# Fully materialized and already in order. Just look up the
# object.
return self._objects[n]
it = iter(self)
try:
for i in range(n):
next(it)
return next(it)
except StopIteration:
raise IndexError(u'result index {0} out of range'.format(n))
def get(self):
"""Return the first matching object, or None if no objects
match.
"""
it = iter(self)
try:
return next(it)
except StopIteration:
return None
class Transaction(object):
"""A context manager for safe, concurrent access to the database.
All SQL commands should be executed through a transaction.
"""
def __init__(self, db):
self.db = db
def __enter__(self):
"""Begin a transaction. This transaction may be created while
another is active in a different thread.
"""
with self.db._tx_stack() as stack:
first = not stack
stack.append(self)
if first:
# Beginning a "root" transaction, which corresponds to an
# SQLite transaction.
self.db._db_lock.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Complete a transaction. This must be the most recently
entered but not yet exited transaction. If it is the last active
transaction, the database updates are committed.
"""
with self.db._tx_stack() as stack:
assert stack.pop() is self
empty = not stack
if empty:
# Ending a "root" transaction. End the SQLite transaction.
self.db._connection().commit()
self.db._db_lock.release()
def query(self, statement, subvals=()):
"""Execute an SQL statement with substitution values and return
a list of rows from the database.
"""
cursor = self.db._connection().execute(statement, subvals)
return cursor.fetchall()
def mutate(self, statement, subvals=()):
"""Execute an SQL statement with substitution values and return
the row ID of the last affected row.
"""
try:
cursor = self.db._connection().execute(statement, subvals)
return cursor.lastrowid
except sqlite3.OperationalError as e:
# In two specific cases, SQLite reports an error while accessing
# the underlying database file. We surface these exceptions as
# DBAccessError so the application can abort.
if e.args[0] in ("attempt to write a readonly database",
"unable to open database file"):
raise DBAccessError(e.args[0])
else:
raise
def script(self, statements):
"""Execute a string containing multiple SQL statements."""
self.db._connection().executescript(statements)
class Database(object):
"""A container for Model objects that wraps an SQLite database as
the backend.
"""
_models = ()
"""The Model subclasses representing tables in this database.
"""
supports_extensions = hasattr(sqlite3.Connection, 'enable_load_extension')
"""Whether or not the current version of SQLite supports extensions"""
def __init__(self, path, timeout=5.0):
self.path = path
self.timeout = timeout
self._connections = {}
self._tx_stacks = defaultdict(list)
self._extensions = []
# A lock to protect the _connections and _tx_stacks maps, which
# both map thread IDs to private resources.
self._shared_map_lock = threading.Lock()
# A lock to protect access to the database itself. SQLite does
# allow multiple threads to access the database at the same
# time, but many users were experiencing crashes related to this
# capability: where SQLite was compiled without HAVE_USLEEP, its
# backoff algorithm in the case of contention was causing
# whole-second sleeps (!) that would trigger its internal
# timeout. Using this lock ensures only one SQLite transaction
# is active at a time.
self._db_lock = threading.Lock()
# Set up database schema.
for model_cls in self._models:
self._make_table(model_cls._table, model_cls._fields)
self._make_attribute_table(model_cls._flex_table)
# Primitive access control: connections and transactions.
def _connection(self):
"""Get a SQLite connection object to the underlying database.
One connection object is created per thread.
"""
thread_id = threading.current_thread().ident
with self._shared_map_lock:
if thread_id in self._connections:
return self._connections[thread_id]
else:
conn = self._create_connection()
self._connections[thread_id] = conn
return conn
def _create_connection(self):
"""Create a SQLite connection to the underlying database.
Makes a new connection every time. If you need to configure the
connection settings (e.g., add custom functions), override this
method.
"""
# Make a new connection. The `sqlite3` module can't use
# bytestring paths here on Python 3, so we need to
# provide a `str` using `py3_path`.
conn = sqlite3.connect(
py3_path(self.path), timeout=self.timeout
)
if self.supports_extensions:
conn.enable_load_extension(True)
# Load any extension that are already loaded for other connections.
for path in self._extensions:
conn.load_extension(path)
# Access SELECT results like dictionaries.
conn.row_factory = sqlite3.Row
return conn
def _close(self):
"""Close the all connections to the underlying SQLite database
from all threads. This does not render the database object
unusable; new connections can still be opened on demand.
"""
with self._shared_map_lock:
self._connections.clear()
@contextlib.contextmanager
def _tx_stack(self):
"""A context manager providing access to the current thread's
transaction stack. The context manager synchronizes access to
the stack map. Transactions should never migrate across threads.
"""
thread_id = threading.current_thread().ident
with self._shared_map_lock:
yield self._tx_stacks[thread_id]
def transaction(self):
"""Get a :class:`Transaction` object for interacting directly
with the underlying SQLite database.
"""
return Transaction(self)
def load_extension(self, path):
"""Load an SQLite extension into all open connections."""
if not self.supports_extensions:
raise ValueError(
'this sqlite3 installation does not support extensions')
self._extensions.append(path)
# Load the extension into every open connection.
for conn in self._connections.values():
conn.load_extension(path)
# Schema setup and migration.
def _make_table(self, table, fields):
"""Set up the schema of the database. `fields` is a mapping
from field names to `Type`s. Columns are added if necessary.
"""
# Get current schema.
with self.transaction() as tx:
rows = tx.query('PRAGMA table_info(%s)' % table)
current_fields = set([row[1] for row in rows])
field_names = set(fields.keys())
if current_fields.issuperset(field_names):
# Table exists and has all the required columns.
return
if not current_fields:
# No table exists.
columns = []
for name, typ in fields.items():
columns.append('{0} {1}'.format(name, typ.sql))
setup_sql = 'CREATE TABLE {0} ({1});\n'.format(table,
', '.join(columns))
else:
# Table exists does not match the field set.
setup_sql = ''
for name, typ in fields.items():
if name in current_fields:
continue
setup_sql += 'ALTER TABLE {0} ADD COLUMN {1} {2};\n'.format(
table, name, typ.sql
)
with self.transaction() as tx:
tx.script(setup_sql)
def _make_attribute_table(self, flex_table):
"""Create a table and associated index for flexible attributes
for the given entity (if they don't exist).
"""
with self.transaction() as tx:
tx.script("""
CREATE TABLE IF NOT EXISTS {0} (
id INTEGER PRIMARY KEY,
entity_id INTEGER,
key TEXT,
value TEXT,
UNIQUE(entity_id, key) ON CONFLICT REPLACE);
CREATE INDEX IF NOT EXISTS {0}_by_entity
ON {0} (entity_id);
""".format(flex_table))
# Querying.
def _fetch(self, model_cls, query=None, sort=None):
"""Fetch the objects of type `model_cls` matching the given
query. The query may be given as a string, string sequence, a
Query object, or None (to fetch everything). `sort` is an
`Sort` object.
"""
query = query or TrueQuery() # A null query.
sort = sort or NullSort() # Unsorted.
where, subvals = query.clause()
order_by = sort.order_clause()
sql = ("SELECT * FROM {0} WHERE {1} {2}").format(
model_cls._table,
where or '1',
"ORDER BY {0}".format(order_by) if order_by else '',
)
# Fetch flexible attributes for items matching the main query.
# Doing the per-item filtering in python is faster than issuing
# one query per item to sqlite.
flex_sql = ("""
SELECT * FROM {0} WHERE entity_id IN
(SELECT id FROM {1} WHERE {2});
""".format(
model_cls._flex_table,
model_cls._table,
where or '1',
)
)
with self.transaction() as tx:
rows = tx.query(sql, subvals)
flex_rows = tx.query(flex_sql, subvals)
return Results(
model_cls, rows, self, flex_rows,
None if where else query, # Slow query component.
sort if sort.is_slow() else None, # Slow sort component.
)
def _get(self, model_cls, id):
"""Get a Model object by its id or None if the id does not
exist.
"""
return self._fetch(model_cls, MatchQuery('id', id)).get()
| mit | -3,355,687,870,484,750,300 | 32.580917 | 79 | 0.572595 | false | 4.425296 | false | false | false |
philpot/pymod | boutique.py | 1 | 11494 | #!/usr/bin/python
# Filename: boutique.py
### REDESIGN of market.py
### (1) use web.py
### generator-based, yields web.storage from which you can whatever you need (url, sitekey)
'''
boutique
@author: Andrew Philpot
@version 0.6
WAT boutique module
Usage: python boutique.py
Options:
\t-h, --help:\tprint help to STDOUT and quit
\t-v, --verbose:\tverbose output
\t-s, --source:\tsource default backpage
\t-a, --application:\tapplication default escort (johnboard)
\t-c, --city:\tcity, must be quoted and include state, e.g., 'San Jose, CA', no default
\t-m, --market:\tFAA airport code used to designate market, default LAX
\t-t, --tier:\tsee wataux.markettiers, integer 1-99, no default
\t-r, --region:\4-digit region code or 5-char region desig, see wataux.marketregions, no default
'''
import sys
import getopt
# import trbotdb
import watdb
import util
import re
import web
web.config.debug = False
# import logging
from watlog import watlog
logger = watlog("wat.boutique")
logger.info('wat.boutique initialized')
VERSION = '0.6'
REVISION = "$Revision: 22999 $"
# defaults
VERBOSE = True
SOURCE = 'backpage'
APPLICATION = 'escort'
# MARKET = 'LAX'
MARKET = None
CODE = MARKET
CITY = None
SITEKEY = None
TIER = None
REGION = None
# REGIONID = None
boutiqueClassNames = {"backpage": "BackpageBoutique",
"cityvibe": "CityvibeBoutique",
"eros": "ErosBoutique",
"humaniplex": "HumaniplexBoutique",
"myredbook": "MyredbookBoutique",
"sugardaddy": "SugardaddyBoutique"}
def boutiqueClassName(source):
return boutiqueClassNames.get(source, "Boutique")
def boutiqueClass(source):
className = boutiqueClassName(source)
return globals().get(className)
# moved here from crawl.py
def interpretMarket(desig):
'''market designator could be:
AAA: three letters means faa airport code, use key "market"
RGAAA: five letters means region designator, use key "region"
1111: four digits means region code, use key "region"
11: one or two digits means tier code, use key "tier"
other string with space or comma in it: city name, use key "city"
any other string: site key, use key "sitekey"
'''
try:
i = int(desig)
if 1000<=i and i<=9999:
# region code
return ("region", i)
elif 0<=i and i<=99:
# tier code
return ("tier", i)
except ValueError:
pass
if re.search('^RG[A-Z]{3}', desig):
# region designator
return ("region", desig)
if re.search('^[A-Z]{3}', desig):
# FAA airport code
return ("market", desig)
if " " in desig or "," in desig:
return ("city", desig)
return ("sitekey", desig)
# Let's consider that the idea is to get tuples keyed to sitekeys including
# source (backpage, etc.)
# market anchor city/location (San Francisco, CA)
# application (escort, johnboard)
# code (airport code of the anchor city, SFO)
# regionid 4001/region (RGSFO), a grouping of markets
# tier (1=FBI focus cities, etc.)/tiername
class Boutique(object):
'''create Boutique'''
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
self.verbose = verbose
self.application = application
self.code = code if code else None
self.city = city if city else None
self.sitekey = sitekey if sitekey else None
self.tier = tier if tier else None
(self.region, self.regionid) = (None, None)
try:
self.regionid = int(region)
except:
self.region = region
def genRows(self):
db = watdb.Watdb(conf='wataux', engine=None)
db.connect()
required = []
if self.application:
required.append(wh('application', self.application))
else:
raise ValueError("Must supply application")
if self.source:
required.append(wh('source', self.source))
else:
raise ValueError("Must supply source")
options = []
if self.code:
options.append(wh('code', self.code))
if self.city:
options.append(wh('city', self.city))
if self.sitekey:
options.append(wh('sitekey', self.sitekey))
if self.tier:
options.append(wh('tier', self.tier))
if self.region:
options.append(wh('region', self.region))
if self.regionid:
options.append(wh('regionid', self.regionid))
# logger.info("options = %s", options)
if options:
pass
else:
raise ValueError("Must supply at least one option: code,city,sitekey,tier,region,regionid")
wheres=required
wheres.extend(options)
where = ' and '.join(wheres)
# logger.info(where)
empty = True
# formerly db.select('sites_master', where=where):
sql = 'select * from sites_master where %s' % where
# logger.info("sql = %s", sql)
for row in db.maybeFetch(sql):
empty = False
yield row
# am trusting that this causes the db connection to be freed
db = db.disconnect()
if empty:
if self.verbose:
print >> sys.stderr, "No rows were generated for %s" % wheres
logger.warn("No rows were generated for %s" % wheres)
def fetchBoutique(self, source, desig):
"""This should take a source such as 'backpage' and a desig such as a sitekey or city name and return a code?"""
rows = list(self.genRows())
logger.info("There should be one row, in fact there are %s: %s", len(rows), rows)
return []
fetchMarket = fetchBoutique
def wh(column_name, value, rel='='):
"""is sqlquote good enough to prevent SQL injection?"""
if value:
return """(`%s` %s %s)""" % (column_name, rel, watdb.sqlquote(str(value)))
else:
raise ValueError
class BackpageBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create BPM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'backpage'
class CityvibeBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create CVM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'cityvibe'
class MyredbookBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create MRBM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'myredbook'
class HumaniplexBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create HXM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'humaniplex'
class ErosBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create ERM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'eros'
class SugardaddyBoutique(Boutique):
def __init__(self, verbose=VERBOSE, application=APPLICATION,
code=CODE, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
'''create SDM'''
Boutique.__init__(self, verbose=verbose, application=application,
code=code, city=city, sitekey=sitekey,
tier=tier,
region=region)
self.source = 'sugardaddy'
# 0.5 functional interface
def genSiteKeys(source=SOURCE,
verbose=VERBOSE, application=APPLICATION,
market=MARKET, city=CITY, sitekey=SITEKEY,
tier=TIER,
region=REGION):
return boutiqueClass(source)(verbose=verbose,
application=application,
code=market,
city=city,
sitekey=sitekey,
tier=tier,
region=region).genRows()
def main(argv=None):
'''this is called if run from command line'''
# process command line arguments
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "hvs:a:c:m:t:r:",
["echo=", "help",
"source=", "application=", "city=", "market=", "tier=", "region="])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# default options
my_verbose = VERBOSE
my_source = SOURCE
my_application = APPLICATION
my_city = CITY
my_market = MARKET
my_tier = TIER
my_region = REGION
# process options
for o,a in opts:
if o in ("-h","--help"):
print __doc__
sys.exit(0)
if o in ("--echo", ):
print a
if o in ("-v", "--verbose", ):
my_verbose = True
if o in ("-s", "--source", ):
my_source = a
if o in ("-a", "--application", ):
my_application = a
if o in ("-c", "--city", ):
my_city = a
if o in ("-m", "--market", ):
my_market = a
if o in ("-t", "--tier", ):
my_tier = a
if o in ("-r", "--region", ):
my_region = a
mktClass = boutiqueClass(my_source)
print mktClass
mkt = mktClass(verbose=my_verbose,
application=my_application, city=my_city, code=my_market,
tier=my_tier, region=my_region)
for row in mkt.genRows():
print row.source, row.application, row.tier, row.region, row.code, row.sitekey, row.url
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
# End of boutique.py
| apache-2.0 | 3,305,824,460,378,000,400 | 32.905605 | 120 | 0.555768 | false | 3.78715 | false | false | false |
fau-fablab/FabLabKasse | FabLabKasse/faucardPayment/dinterface/magpos.py | 1 | 4916 | #!/usr/bin/env python
"""
pymagpos -- MagnaCarta POS protocol (minimal robust implementation)
"""
import serial
import codes
import logging
import time
class MagposError(Exception):
""" Base Exception Class for this Module """
pass
class ResponseError(MagposError):
"""
ResponseError occur when the response to a command does not match the command's OK signal.
"""
def __init__(self, function, code):
self.code = code
self.function = function
self.response = [code]
def store_rawdata(self, response):
""" Stores the raw response for evaluation purpose """
self.raw = response
def read_rawdata(self):
"""
Returns the raw response data
:return: Raw response data
:rtype: list of ints
"""
return self.raw
def __str__(self):
return ("[{0}] Unintended response received:{1}".format(self.function,
codes.desc.setdefault(self.code, self.code)))
class TransactionError(MagposError):
"""
TransactionError occur when the amount that has been decreased does not match the given amount
"""
def __init__(self, card, old, new, amount):
self.card = card
self.old = float(old)/100
self.new = float(new)/100
self.amount = float(amount)/100
def __str__(self):
return "Difference in balance does not match the amount that should have been decreased.\
\nCard:{0}\t Amount:{1:.2f}\nOld:{2:.2f}\tNew:{3:.2f}"\
.format(self.card, self.amount, self.old, self.new)
class ConnectionTimeoutError(MagposError):
"""
ConnectionTimeoutError occur, when the connection between the USB/RS232 reader and the MagnaBox is broken
and/or the MagnaBox does not send a response message.
"""
def __init__(self):
pass
def __str__(self):
return ("Serial connection to MagnaBox timed out (did not send command?)")
class MagPOS:
"""
MagPos Class implements functions to access payment features of the MagnaCarta-Security and Payment-System
"""
def __init__(self, device):
"""
Initializes the serial port communication on the given device port
:param device: serial port name
:type device: str
"""
pass
def start_connection(self, retries=5):
"""
Initializes the connection
:return: True if connection successful, False otherwise
:rtype: bool
:param retries: Max. Attempts to accomplish connection, Default value is 5
:type retries: int
"""
raise NotImplementedError()
def card_on_reader(self):
"""
Checks if there is a card on the reader
:return: True if card on reader, False if not
:rtype: bool
"""
raise NotImplementedError()
def set_display_mode(self, mode = 0, amount=0):
"""
Sets the display configuration
:return: True on success, False otherwise
:rtype: bool
:param mode: Config
:type mode: int
:param amonunt: (Optional) Amount the is asked for on display
:type amount: int
"""
raise NotImplementedError()
def get_last_transaction_result(self):
"""
Retrieves the details of last unacknowledged transaction
:return: Returns List of relevant data: status code, card number and amount
:rtype: list[int,int,int]
"""
raise NotImplementedError()
def response_ack(self):
"""
Sends an acknowledge-signal to the MagaBox
"""
raise NotImplementedError()
def decrease_card_balance_and_token(self, amount, card_number=0, token_index=0):
"""
Gives command to decrease balance by amount
:return: Returns list of retrieved data: card number, old balance, new balance, token id
:rtype: list[int,int,int,int]
:param amount: Amount in Cents the card balance shall be decreased
:type amount: int
:param card_number: (Optional) sets the card number from which balance should be decreased
:type card_number: int
:param token_index: (Optional) sets token id which should be decreased by 1
:type token_index: int
"""
raise NotImplementedError()
def get_long_card_number_and_balance(self):
"""
Retrieves the card number and balance of the card on card reader
:return: Returns list containing the response data from MagnaBox: card number and balance
:rtype: list[int]
"""
raise NotImplementedError()
def close(self):
""" Closes serial connection to MagnaBox. Needed to release the serial port for further transactions."""
raise NotImplementedError()
if __name__ == '__main__':
pos = MagPOS(device='/dev/ttyUSB0')
pos.start_connection()
| gpl-3.0 | 7,384,428,041,022,479,000 | 30.113924 | 112 | 0.624085 | false | 4.327465 | false | false | false |
westernx/sgpublish | sgpublish/exporter/ui/publish/generic.py | 1 | 16872 | from __future__ import absolute_import
import functools
import os
import re
import sys
import tempfile
import traceback
import subprocess
import datetime
from PyQt4 import QtCore, QtGui
Qt = QtCore.Qt
from sgfs import SGFS
from sgactions.ticketui import ticket_ui_context
from sgpublish.uiutils import ComboBox, hbox, vbox, icon
class PublishSafetyError(RuntimeError):
pass
class TimeSpinner(QtGui.QSpinBox):
def __init__(self):
super(TimeSpinner, self).__init__(
singleStep=15,
maximum=60*8*5,
)
def textFromValue(self, value):
return '%d:%02d' % (value / 60, value % 60)
def valueFromText(self, text, strict=False):
m = re.match('(\d+):(\d{,2})', text)
if m:
return 60 * int(m.group(1)) + int(m.group(2) or 0)
try:
return int(text)
except ValueError:
pass
try:
return int(60 * float(text))
except ValueError:
pass
if strict:
return None
else:
return 0
def validate(self, text, pos):
if self.valueFromText(text) is not None:
return QtGui.QValidator.Acceptable, pos
else:
return QtGui.QValidator.Invalid, pos
class Widget(QtGui.QWidget):
# Windows should hide on these.
beforeScreenshot = QtCore.pyqtSignal()
afterScreenshot = QtCore.pyqtSignal()
# Need a signal to communicate across threads.
loaded_publishes = QtCore.pyqtSignal(object, object)
def __init__(self, exporter):
super(Widget, self).__init__()
self._exporter = exporter
self._existing_streams = set()
basename = os.path.basename(exporter.filename_hint)
basename = os.path.splitext(basename)[0]
basename = re.sub(r'[^\w-]+', '_', basename)
self._basename = re.sub(r'_*[rv]\d+', '', basename)
self._setup_ui()
# First screenshot.
self.take_full_screenshot()
def _setup_ui(self):
self.setLayout(QtGui.QVBoxLayout())
self._task_combo = ComboBox()
self._task_combo.addItem('Loading...', {'loading': True})
self._task_combo.currentIndexChanged.connect(self._task_changed)
self._name_combo = ComboBox()
self._name_combo.addItem('Loading...', {'loading': True})
self._name_combo.addItem('Create new stream...', {'new': True})
self._name_combo.currentIndexChanged.connect(self._name_changed)
self._tasksLabel = QtGui.QLabel("Task")
self.layout().addLayout(hbox(
vbox(self._tasksLabel, self._task_combo),
vbox("Publish Stream", self._name_combo),
spacing=4
))
self._name_field = QtGui.QLineEdit(self._basename)
self._name_field.setEnabled(False)
self._name_field.editingFinished.connect(self._on_name_edited)
self._version_spinbox = QtGui.QSpinBox()
self._version_spinbox.setMinimum(1)
self._version_spinbox.setMaximum(9999)
self._version_spinbox.valueChanged.connect(self._on_version_changed)
self._version_warning_issued = False
self.layout().addLayout(hbox(
vbox("Name", self._name_field),
vbox("Version", self._version_spinbox),
spacing=4
))
# Get publish data in the background.
self.loaded_publishes.connect(self._populate_existing_data)
self._thread = QtCore.QThread()
self._thread.run = self._fetch_existing_data
self._thread.start()
self._description = QtGui.QTextEdit('')
self._description.setMaximumHeight(100)
self._thumbnail_path = None
self._thumbnail_canvas = QtGui.QLabel()
self._thumbnail_canvas.setFrameShadow(QtGui.QFrame.Sunken)
self._thumbnail_canvas.setFrameShape(QtGui.QFrame.Panel)
self._thumbnail_canvas.setToolTip("Click to specify part of screen.")
self._thumbnail_canvas.mouseReleaseEvent = self.take_partial_screenshot
self.layout().addLayout(hbox(
vbox("Describe Your Changes", self._description),
vbox("Thumbnail", self._thumbnail_canvas),
))
self._movie_path = QtGui.QLineEdit()
self._movie_browse = QtGui.QPushButton(icon('silk/folder', size=12, as_icon=True), "Browse")
self._movie_browse.clicked.connect(self._on_movie_browse)
self._movie_layout = hbox(self._movie_path, self._movie_browse)
self.layout().addLayout(vbox("Path to Movie or Frames (to be copied to publish)", self._movie_layout, spacing=4))
self._movie_browse.setFixedHeight(self._movie_path.sizeHint().height())
self._movie_browse.setFixedWidth(self._movie_browse.sizeHint().width() + 2)
self._promote_checkbox = QtGui.QCheckBox("Promote to 'Version' for review")
# self.layout().addWidget(self._promote_checkbox)
self._timelog_spinbox = TimeSpinner()
add_hour = QtGui.QPushButton("+1 Hour")
add_hour.setFixedHeight(self._timelog_spinbox.sizeHint().height())
@add_hour.clicked.connect
def on_add_hour():
self._timelog_spinbox.setValue(self._timelog_spinbox.value() + 60)
add_day = QtGui.QPushButton("+1 Day")
add_day.setFixedHeight(self._timelog_spinbox.sizeHint().height())
@add_day.clicked.connect
def on_add_day():
self._timelog_spinbox.setValue(self._timelog_spinbox.value() + 60 * 8)
self.layout().addLayout(hbox(
vbox("Time to Log", hbox(self._timelog_spinbox, "hrs:mins", add_hour, add_day)),
vbox("Review", self._promote_checkbox),
))
def _fetch_existing_data(self):
try:
sgfs = SGFS()
tasks = sgfs.entities_from_path(self._exporter.workspace)
if not tasks:
raise ValueError('No entities in workspace %r' % self._exporter.workspace)
if any(x['type'] != 'Task' for x in tasks):
raise ValueError('Non-Task entity in workspace %r' % self._exporter.workspace)
publishes = sgfs.session.find(
'PublishEvent',
[
('sg_link.Task.id', 'in') + tuple(x['id'] for x in tasks),
('sg_type', 'is', self._exporter.publish_type),
('sg_version', 'greater_than', 0), # Skipped failures.
], [
'code',
'sg_version'
]
)
except Exception as e:
self._task_combo.clear()
self._task_combo.addItem('Loading Error! %s' % e, {})
raise
else:
self.loaded_publishes.emit(tasks, publishes)
def _populate_existing_data(self, tasks, publishes):
if tasks:
entity = tasks[0].fetch('entity')
name = entity.get('code') or entity.get('name')
if name:
self._tasksLabel.setText('Task on %s %s' % (entity['type'], name))
history = self._exporter.get_previous_publish_ids()
select = None
publishes.sort(key=lambda p: p['sg_version'])
for t_i, task in enumerate(tasks):
name_to_publish = {}
for publish in publishes:
if publish['sg_link'] is not task:
continue
self._existing_streams.add((task['id'], publish['code']))
name = publish['code']
name_to_publish[name] = publish
if publish['id'] in history:
select = t_i, name
self._task_combo.addItem('%s - %s' % task.fetch(('step.Step.short_name', 'content')), {
'task': task,
'publishes': name_to_publish,
})
if 'loading' in self._task_combo.itemData(0):
if self._task_combo.currentIndex() == 0:
self._task_combo.setCurrentIndex(1)
self._task_combo.removeItem(0)
if select:
self._task_combo.setCurrentIndex(select[0])
for i in xrange(self._name_combo.count()):
data = self._name_combo.itemData(i)
if data and data.get('name') == select[1]:
self._name_combo.setCurrentIndex(i)
break
def _task_changed(self, index):
data = self._name_combo.currentData()
if not data:
return
was_new = 'new' in data or 'loading' in data
self._name_combo.clear()
data = self._task_combo.currentData() or {}
for name, publish in sorted(data.get('publishes', {}).iteritems()):
self._name_combo.addItem('%s (v%04d)' % (name, publish['sg_version']), {'name': name, 'publish': publish})
self._name_combo.addItem('Create New Stream...', {'new': True})
if was_new:
self._name_combo.setCurrentIndex(self._name_combo.count() - 1)
else:
self._name_combo.setCurrentIndex(0)
def _name_changed(self, index):
data = self._name_combo.itemData(index)
if not data:
return
self._name_field.setEnabled('new' in data)
self._name_field.setText(data.get('name', self._basename))
self._version_spinbox.setValue(data.get('publish', {}).get('sg_version', 0) + 1)
def _on_name_edited(self):
name = str(self._name_field.text())
name = re.sub(r'\W+', '_', name).strip('_')
self._name_field.setText(name)
def _on_version_changed(self, new_value):
data = self._name_combo.itemData(self._name_combo.currentIndex())
if data.get('publish') and new_value != data['publish']['sg_version'] + 1 and not self._version_warning_issued:
res = QtGui.QMessageBox.warning(None,
"Manual Versions?",
"Are you sure you want to change the version?\n"
"The next one has already been selected for you...",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel,
QtGui.QMessageBox.Cancel
)
if res & QtGui.QMessageBox.Cancel:
self._version_spinbox.setValue(data['publish']['sg_version'] + 1)
return
self._version_warning_issued = True
def _on_movie_browse(self):
existing = str(self._movie_path.text())
dialog = QtGui.QFileDialog(None, "Select Movie or First Frame")
dialog.setFilter('Movie or Frame (*.mov *.exr *.tif *.tiff *.jpg *.jpeg)')
dialog.setFileMode(dialog.ExistingFile)
dialog.setDirectory(os.path.dirname(existing) if existing else os.getcwd())
if existing:
dialog.selectFile(existing)
if not dialog.exec_():
return
files = dialog.selectedFiles()
path = str(files.First())
self.setFrames(path)
def setFrames(self, path):
self._movie_path.setText(path)
if path:
self._promote_checkbox.setCheckState(Qt.Checked)
def take_full_screenshot(self):
pass
def take_partial_screenshot(self, *args):
path = tempfile.NamedTemporaryFile(suffix=".png", prefix="screenshot", delete=False).name
self.beforeScreenshot.emit()
if sys.platform.startswith('darwin'):
# use built-in screenshot command on the mac
proc = subprocess.Popen(['screencapture', '-mis', path])
else:
proc = subprocess.Popen(['import', path])
proc.wait()
self.afterScreenshot.emit()
if os.stat(path).st_size:
self.setThumbnail(path)
def setThumbnail(self, path):
self._thumbnail_path = path
pixmap = QtGui.QPixmap(path).scaled(200, 100, Qt.KeepAspectRatio, Qt.SmoothTransformation)
self._thumbnail_canvas.setPixmap(pixmap)
self._thumbnail_canvas.setFixedSize(pixmap.size())
def name(self):
data = self._name_combo.currentData()
return data.get('name', str(self._name_field.text()))
def description(self):
return str(self._description.toPlainText())
def version(self):
return self._version_spinbox.value()
def thumbnail_path(self):
return self._thumbnail_path
def _path_is_image(self, path):
if os.path.splitext(path)[1][1:].lower() in (
'jpg', 'jpeg', 'tif', 'tiff', 'exr',
):
return path
def frames_path(self):
path = str(self._movie_path.text())
if path and self._path_is_image(path):
return path
return None
def movie_path(self):
path = str(self._movie_path.text())
if path and not self._path_is_image(path):
return path
return None
def safety_check(self, **kwargs):
# Check that the name is unique for publishes on this task.
task = self._task_combo.currentData().get('task')
existing_name = self._name_combo.currentData().get('name')
new_name = str(self._name_field.text())
if existing_name is None and (task['id'], new_name) in self._existing_streams:
print 'XXX', task['id'], repr(existing_name), repr(new_name)
print self._existing_streams
QtGui.QMessageBox.critical(self,
"Name Collision",
"You cannot create a new stream with the same name as an"
" existing one. Please select the existing stream or enter a"
" unique name.",
)
# Fatal.
return False
# Promoting to version without a movie.
if self._promote_checkbox.isChecked() and not (self.frames_path() or self.movie_path()):
QtGui.QMessageBox.critical(self,
"Review Version Without Movie",
"You cannot promote a publish for review without frames or a"
" movie.",
)
# Fatal.
return False
# Promoting to version without a timelog.
if self._promote_checkbox.isChecked() and not self._timelog_spinbox.value():
res = QtGui.QMessageBox.warning(self,
"Version without Time Log",
"Are you sure that this version did not take you any time?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No,
)
if res & QtGui.QMessageBox.No:
return False
return True
def export(self, **kwargs):
with ticket_ui_context(pass_through=PublishSafetyError):
return self._export(kwargs)
def _export(self, kwargs):
if not self.safety_check(**kwargs):
raise PublishSafetyError()
task_data = self._task_combo.currentData()
task = task_data.get('task')
if not task:
sgfs = SGFS()
tasks = sgfs.entities_from_path(self._exporter.workspace, 'Task')
if not tasks:
raise ValueError('Could not find SGFS tagged entities')
task = tasks[0]
stream_data = self._name_combo.currentData()
parent = stream_data.get('publish')
# Do the promotion.
if self._promote_checkbox.isChecked():
review_version_fields = self._exporter.fields_for_review_version(**kwargs)
else:
review_version_fields = None
publisher = self._exporter.publish(task,
name=self.name(),
description=self.description(),
version=self.version(),
parent=parent,
thumbnail_path=self.thumbnail_path(),
frames_path=self.frames_path(),
movie_path=self.movie_path(),
review_version_fields=review_version_fields,
export_kwargs=kwargs,
)
# Create the timelog.
minutes = self._timelog_spinbox.value()
if minutes:
publisher.sgfs.session.create('TimeLog', {
'project': publisher.entity.project(),
'entity': publisher.link,
'user': publisher.sgfs.session.guess_user(),
'duration': minutes,
'description': '%s_v%04d' % (publisher.name, publisher.version),
'date': datetime.datetime.utcnow().date(),
})
return publisher
| bsd-3-clause | 1,374,290,374,414,146,600 | 35.283871 | 121 | 0.55761 | false | 4.203288 | false | false | false |
szilvajuhos/smallRef | scripts/makeShortContigs.py | 1 | 1273 | #! /usr/bin/env python
import click
import re
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
# Usage something like:
# makeShortContigs.py -l 200000 -c 1,2,3,X -r human_g1k_v37_decoy.fasta
@click.command(context_settings = dict( help_option_names = ['-h', '--help'] ))
@click.option('--length', '-l', type=int, help='length of the contigs to export', required=True)
@click.option('--contigs', '-c', type=str, help='ids of contigs in the fasta file', required=True)
@click.option('--reference', '-r', type=str, help='source reference file', required=True)
def exportShortContigs(length,contigs,reference):
# this is the main processing routine
contigsToPrint = contigs.split(",")
for seq_record in SeqIO.parse(reference,"fasta"):
# seq_record.id is something like
# >chr1 dna:chromosome chromosome:GRCh37:1:1:249250621:1
# but we want to have the "chr1" only, so have to split and replace
shortID = seq_record.id.split()[0].replace(">","")
if shortID in contigs:
newSeq = seq_record.seq[0:length]
sys.stdout.write( SeqRecord(newSeq, id=seq_record.id, description=seq_record.description).format("fasta") )
if __name__ == "__main__":
exportShortContigs()
| mit | 1,313,171,125,127,874,600 | 36.441176 | 110 | 0.706206 | false | 2.967366 | false | false | false |
RobinCPC/algorithm-practice | IntegerArray/removeElement.py | 1 | 1416 | """
#: 27
Title: Remove Element
Description:
------
Given an array and a value, remove all instances of that value in place and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
The order of elements can be changed. It doesn't matter what you leave beyond the new length.
Example:
Given input array nums = `[3,2,2,3]`, val = `3`
Your function should return length = 2, with the first two elements of nums being 2.
------
Time: O(n)
Space: O(1)
Difficulty: Easy
"""
class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
h = 0
t = nums.__len__() - 1
while h <= t:
if nums[h] == val:
nums[h] = nums[t]
t -= 1
else:
h += 1
return t + 1
def removeElement2(self, nums, val):
"""
Alternative method, but not fast than first one
:type nums: List[int]
:type val: int
:rtype: int
"""
while True:
if val in nums:
nums.pop(nums.index(val))
else:
break
return len(nums)
if __name__ == '__main__':
Sol = Solution()
nums = [4,1,2,3,5]
val = 4
leng = Sol.removeElement(nums, val)
print( nums[:leng])
| mit | 538,157,690,904,572,000 | 22.6 | 98 | 0.532486 | false | 3.746032 | false | false | false |
pattisdr/osf.io | osf/migrations/0126_update_review_group_names.py | 14 | 1618 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-08 19:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0125_auto_20180808_1942'),
]
operations = [
migrations.RunSQL(
[
"""
UPDATE auth_group AG0
SET name = (
SELECT 'reviews_' ||
CASE
WHEN P.type = 'osf.preprintprovider'
THEN 'preprint'
WHEN P.type = 'osf.collectionprovider'
THEN 'collection'
WHEN P.type = 'osf.registrationprovider'
THEN 'registration'
END || '_' || id || '_' || split_part(AG0.name, '_', 3)
FROM osf_abstractprovider P
WHERE _id = split_part(AG0.name, '_', 2)
)
WHERE AG0.name LIKE 'reviews_%';
"""
], [
"""
UPDATE auth_group AG0
SET name = (
SELECT 'reviews_' || P._id || '_' || split_part(AG0.name, '_', 4)
FROM osf_abstractprovider P
WHERE id = split_part(AG0.name, '_', 3)::INT
)
WHERE AG0.name LIKE 'reviews_%';
"""
]
)
]
| apache-2.0 | 3,528,708,653,449,753,000 | 34.173913 | 93 | 0.369592 | false | 5.152866 | false | false | false |
babelsberg/babelsberg-r | topaz/modules/ffi/variadic_invoker.py | 1 | 2087 | from topaz.module import ClassDef
from topaz.objects.objectobject import W_Object
from topaz.modules.ffi.type import type_object, ffi_types, W_TypeObject, VOID
from topaz.modules.ffi.dynamic_library import coerce_dl_symbol
from topaz.modules.ffi.function_type import W_FunctionTypeObject
from topaz.modules.ffi.function import W_FFIFunctionObject
from rpython.rlib import clibffi
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import lltype, rffi
class W_VariadicInvokerObject(W_Object):
classdef = ClassDef('VariadicInvoker', W_Object.classdef)
def __init__(self, space):
W_Object.__init__(self, space)
self.w_info = None
self.w_handle = None
@classdef.singleton_method('allocate')
def singleton_method_allocate(self, space, args_w):
return W_VariadicInvokerObject(space)
@classdef.method('initialize')
def method_initialize(self, space, w_handle, w_arg_types,
w_ret_type, w_options=None):
self.w_ret_type = w_ret_type
self.w_options = w_options
self.w_handle = w_handle
if w_options is None:
w_type_map = space.newhash()
else:
w_key = space.newsymbol('type_map')
w_type_map = space.send(w_options, '[]', [w_key])
space.send(self, 'init', [w_arg_types, w_type_map])
@classdef.method('invoke', arg_values_w='array')
def method_invoke(self, space, w_arg_types, arg_values_w):
w_func_cls = space.getclassfor(W_FFIFunctionObject)
w_func = space.send(w_func_cls, 'new',
[self.w_ret_type, w_arg_types,
self.w_handle, self.w_options])
return self._dli_call(space, w_func, arg_values_w)
@jit.dont_look_inside
def _dli_call(self, space, w_func, arg_values_w):
# XXX we are missing argument promotion for the variadic arguments here
# see
# http://stackoverflow.com/questions/1255775/default-argument-promotions-in-c-function-calls
return space.send(w_func, 'call', arg_values_w)
| bsd-3-clause | 3,579,657,997,356,118,000 | 40.74 | 100 | 0.652132 | false | 3.286614 | false | false | false |
tkf/neo | neo/core/block.py | 1 | 2657 | from neo.core.baseneo import BaseNeo
class Block(BaseNeo):
"""
Main container gathering all the data, whether discrete or continous, for a
given recording session.
A block is not necessarily temporally homogeneous, in contrast to Segment.
*Usage*:
TODO
*Required attributes/properties*:
None
*Recommended attributes/properties*:
:name: A label for the dataset
:description: text description
:file_origin: filesystem path or URL of the original data file.
:file_datetime: the creation date and time of the original data file.
:rec_datetime: the date and time of the original recording
:index: integer. You can use this to define an ordering of your Block.
It is not used by Neo in any way.
*Container of*:
:py:class:`Segment`
:py:class:`RecordingChannelGroup`
*Properties*
list_units : descends through hierarchy and returns a list of
:py:class:`Unit` existing in the block. This shortcut exists
because a common analysis case is analyzing all neurons that
you recorded in a session.
list_recordingchannels: descends through hierarchy and returns
a list of :py:class:`RecordingChannel` existing in the block.
"""
def __init__(self, name=None, description=None, file_origin=None,
file_datetime=None, rec_datetime=None, index=None,
**annotations):
"""Initalize a new Block."""
BaseNeo.__init__(self, name=name, file_origin=file_origin,
description=description, **annotations)
self.file_datetime = file_datetime
self.rec_datetime = rec_datetime
self.index = index
self.segments = [ ]
self.recordingchannelgroups = [ ]
@property
def list_units(self):
"""
Return a list of all :py:class:`Unit` in a block.
"""
units = [ ]
for rcg in self.recordingchannelgroups:
for rc in rcg.recordingchannel:
for unit in rc.units:
if unit not in units:
units.append(unit)
return units
@property
def list_recordingchannels(self):
"""
Return a list of all :py:class:`RecordingChannel` in a block.
"""
all_rc = [ ]
for rcg in self.recordingchannelgroups:
for rc in rcg.recordingchannel:
if rc not in all_rc:
all_rc.append(rc)
return all_rc
| bsd-3-clause | 5,692,692,600,697,648,000 | 32.632911 | 79 | 0.581859 | false | 4.604853 | false | false | false |
packetloop/dd-agent | tests/checks/integration/test_php_fpm.py | 45 | 2904 | # 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
# sample from /status?json
# {
# "accepted conn": 350,
# "active processes": 1,
# "idle processes": 2,
# "listen queue": 0,
# "listen queue len": 0,
# "max active processes": 2,
# "max children reached": 0,
# "max listen queue": 0,
# "pool": "www",
# "process manager": "dynamic",
# "slow requests": 0,
# "start since": 4758,
# "start time": 1426601833,
# "total processes": 3
# }
@attr(requires='phpfpm')
class PHPFPMCheckTest(AgentCheckTest):
CHECK_NAME = 'php_fpm'
def test_bad_status(self):
instance = {
'status_url': 'http://localhost:9001/status',
'tags': ['expectedbroken']
}
self.assertRaises(Exception, self.run_check, {'instances': [instance]})
def test_bad_ping(self):
instance = {
'ping_url': 'http://localhost:9001/status',
'tags': ['expectedbroken']
}
self.run_check({'instances': [instance]})
self.assertServiceCheck(
'php_fpm.can_ping',
status=AgentCheck.CRITICAL,
tags=['ping_url:http://localhost:9001/status'],
count=1
)
self.coverage_report()
def test_bad_ping_reply(self):
instance = {
'ping_url': 'http://localhost:42424/ping',
'ping_reply': 'blah',
'tags': ['expectedbroken']
}
self.run_check({'instances': [instance]})
self.assertServiceCheck(
'php_fpm.can_ping',
status=AgentCheck.CRITICAL,
tags=['ping_url:http://localhost:42424/ping'],
count=1
)
self.coverage_report()
def test_status(self):
instance = {
'status_url': 'http://localhost:42424/status',
'ping_url': 'http://localhost:42424/ping',
'tags': ['cluster:forums']
}
self.run_check_twice({'instances': [instance]})
metrics = [
'php_fpm.listen_queue.size',
'php_fpm.processes.idle',
'php_fpm.processes.active',
'php_fpm.processes.total',
'php_fpm.requests.slow',
'php_fpm.requests.accepted',
]
expected_tags = ['cluster:forums', 'pool:www']
for mname in metrics:
self.assertMetric(mname, count=1, tags=expected_tags)
self.assertMetric('php_fpm.processes.idle', count=1, value=1)
self.assertMetric('php_fpm.processes.total', count=1, value=2)
self.assertServiceCheck('php_fpm.can_ping', status=AgentCheck.OK,
count=1,
tags=['ping_url:http://localhost:42424/ping'])
self.assertMetric('php_fpm.processes.max_reached', count=1)
| bsd-3-clause | -1,853,145,356,691,680,300 | 27.470588 | 79 | 0.546832 | false | 3.625468 | true | false | false |
pieleric/odemis | scripts/spectrum_volt.py | 2 | 5683 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 27 Mar 2017
@author: Éric Piel
Acquires a CL spectrum at different e-beam voltages.
If the e-beam is not in spot mode, it will be set to spot mode at the center
of the SEM field-of-view.
The spectrometer settings are used untouched.
Warning: the optical path should be properly configured already (ie, the spectrum
stream should be the last one playing in the GUI).
run as:
./spectrum_volt.py --volt 5 7.5 10 15 --spectrometer spectrometer-integrated --output spectra.h5
"""
from __future__ import division
import argparse
import logging
from odemis import model, dataio, util
import os
import sys
def save_hw_settings(ebeam):
res = ebeam.resolution.value
scale = ebeam.scale.value
trans = ebeam.translation.value
dt = ebeam.dwellTime.value
volt = ebeam.accelVoltage.value
hw_settings = (res, scale, trans, dt, volt)
return hw_settings
def resume_hw_settings(ebeam, hw_settings):
res, scale, trans, dt, volt = hw_settings
# order matters!
ebeam.scale.value = scale
ebeam.resolution.value = res
ebeam.translation.value = trans
ebeam.dwellTime.value = dt
ebeam.accelVoltage.value = volt
def discard_data(df, da):
"""
Receives the SE detector data, which is unused
"""
logging.debug("Received one ebeam data")
def acquire_volts(volts, detector):
"""
vots (list of floats > 0): voltage in kV
detector (str): role of the spectrometer to use
returns (list of DataArray): all the spectra, in order
"""
ebeam = model.getComponent(role="e-beam")
sed = model.getComponent(role="se-detector")
spmt = model.getComponent(role=detector)
hw_settings = save_hw_settings(ebeam)
# Go to spot mode (ie, res = 1x1)
if ebeam.resolution.value != (1, 1):
ebeam.resolution.value = (1, 1)
ebeam.translation.value = (0, 0) # at the center of the FoV
else:
logging.info("Leaving the e-beam in spot mode at %s", ebeam.translation.value)
ebeam.dwellTime.value = 0.1
try:
# Activate the e-beam
sed.data.subscribe(discard_data)
das = []
for vstr in volts:
v = float(vstr) * 1000
ebeam.accelVoltage.value = v
if not util.almost_equal(ebeam.accelVoltage.value, v):
logging.warning("Voltage requested at %g kV, but e-beam set at %g kV",
v / 1000, ebeam.accelVoltage.value / 1000)
else:
logging.info("Acquiring at %g kV", v / 1000)
# Acquire one spectrum
spec = spmt.data.get()
# Add dimensions to make it a spectrum (X, first dim -> C, 5th dim)
spec.shape = (spec.shape[-1], 1, 1, 1, 1)
# Add some useful metadata
spec.metadata[model.MD_DESCRIPTION] = "Spectrum at %g kV" % (v / 1000)
spec.metadata[model.MD_EBEAM_VOLTAGE] = v
# TODO: store the spot position in MD_POS
das.append(spec)
finally:
sed.data.unsubscribe(discard_data) # Just to be sure
resume_hw_settings(ebeam, hw_settings)
return das
def save_data(das, filename):
"""
Saves a series of spectra
das (list of DataArray): data to save
filename (str)
"""
exporter = dataio.find_fittest_converter(filename)
if os.path.exists(filename):
# mostly to warn if multiple ypos/xpos are rounded to the same value
logging.warning("Overwriting file '%s'.", filename)
else:
logging.info("Saving file '%s", filename)
exporter.export(filename, das)
def main(args):
"""
Handles the command line arguments
args is the list of arguments passed
return (int): value to return to the OS as program exit code
"""
# arguments handling
parser = argparse.ArgumentParser(description="Acquires a CL spectrum at different e-beam voltages")
parser.add_argument("--volt", "-v", dest="volts", nargs="+",
help="Voltages (in kV) for which a spectrum should be acquired"
)
parser.add_argument("--spectrometer", "-s", dest="spectrometer", default="spectrometer",
help="Role of the detector to use to acquire a spectrum (default: spectrometer)"
)
parser.add_argument("--output", "-o", dest="output", required=True,
help="Name where to save the spectra. "
"The file format is derived from the extension "
"(TIFF and HDF5 are supported).")
parser.add_argument("--log-level", dest="loglev", metavar="<level>", type=int,
default=1, help="set verbosity level (0-2, default = 1)")
options = parser.parse_args(args[1:])
# Set up logging before everything else
if options.loglev < 0:
logging.error("Log-level must be positive.")
return 127
loglev_names = [logging.WARNING, logging.INFO, logging.DEBUG]
loglev = loglev_names[min(len(loglev_names) - 1, options.loglev)]
logging.getLogger().setLevel(loglev)
try:
das = acquire_volts(options.volts, options.spectrometer)
save_data(das, options.output)
except KeyboardInterrupt:
logging.info("Interrupted before the end of the execution")
return 1
except ValueError as exp:
logging.error("%s", exp)
return 127
except Exception:
logging.exception("Unexpected error while performing action.")
return 127
return 0
if __name__ == '__main__':
ret = main(sys.argv)
logging.shutdown()
exit(ret)
| gpl-2.0 | -1,619,190,431,934,194,700 | 29.880435 | 104 | 0.622316 | false | 3.692008 | false | false | false |
bbengfort/baleen | baleen/opml.py | 2 | 3236 | # baleen.opml
# Reads opml files and gives back outline data
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Sat Sep 20 23:12:07 2014 -0400
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: opml.py [b2f890b] benjamin@bengfort.com $
"""
Reads opml files and gives back outline data
"""
##########################################################################
## Imports
##########################################################################
import baleen.models as db
from bs4 import BeautifulSoup
from collections import Counter
from mongoengine.errors import *
##########################################################################
## Load Database function
##########################################################################
def load_opml(path):
"""
Loads an OPML file into the Mongo database; returns the count of the
number of documents added to the database.
"""
opml = OPML(path)
rows = 0
for feed in opml:
feed.pop('type') # Unneeded for database
feed.pop('text') # Unneeded for database
feed['link'] = feed.pop('xmlUrl') # Rename the XML URL
feed['urls'] = {
'xmlUrl': feed['link'], # Add xmlUrl to urls
'htmlUrl': feed.pop('htmlUrl'), # Add htmlUrl to urls
}
feed = db.Feed(**feed) # Construct without an ObjectId
try:
feed.save()
rows += 1
except NotUniqueError:
continue
return rows
##########################################################################
## OPMLReader
##########################################################################
class OPML(object):
def __init__(self, path):
"""
Reader for OPML XML files.
"""
self.path = path
def categories(self):
"""
Reads the file to capture all the categories
"""
with open(self.path, 'r') as data:
soup = BeautifulSoup(data, 'xml')
for topic in soup.select('body > outline'):
yield topic['title']
def counts(self):
"""
Returns the counts of feeds in each category
"""
counts = Counter()
for item in self:
counts[item['category']] += 1
return counts
def __iter__(self):
"""
Yields a dictionary representing the attributes of the RSS feed
from the OPML file; also captures category data.
"""
with open(self.path, 'r') as data:
soup = BeautifulSoup(data, 'xml')
for topic in soup.select('body > outline'):
for feed in topic.find_all('outline'):
data = feed.attrs.copy()
data['category'] = topic['title']
yield data
def __len__(self):
return sum(1 for item in self)
def __str__(self):
counts = self.counts()
return "OPML with {} categories and {} feeds".format(
len(counts), sum(counts.values())
)
def __repr__(self):
return "<{} at {}>".format(self.__class__.__name__, self.path)
| mit | 3,526,765,180,244,049,000 | 29.242991 | 75 | 0.472188 | false | 4.622857 | false | false | false |
s-pearce/glider-utilities | glider_utils/argos.py | 1 | 1224 | def carry_around_add(a, b):
"""
calculate the carry around sum for the 16 bit binary values
"""
c = a + b
return (c & 0xff) + (c >> 16)
def checksum(msg):
"""
calculate the checksum. documents say sum all the previous bytes and take
the ones complement. that doesn't work, need to use the carry around.
"""
s = 0
for i in msg:
s = carry_around_add(s,int(i,16))
return ~s + 1 & 0xff
def twos_comp(hexstr, nbits):
b = int(hexstr, 16)
if b >= 1<<nbits-1: b -= 1<<nbits
return b
def decode(msg):
pass
def print_decode(msg):
if isinstance(msg, str):
msg_dict = decode(msg)
if isinstance(msg, dict):
msg_dict = msg
print("""Decoded Argos message:
Transmitted checksum: {trans_chk:d}
Calculated checksum: {calcd_chk:d}
{chk_msg}
{timestamp:s}
Valid Fix {lat_iso:.2f} {lon_iso::.2f} ({lat_deg:.3f} {lon_deg:.3f})
Age: {age:d} minutes
Invalid Fix {in_lat_iso:.2f} {in_lon_iso:.2f} {in_age:d} minutes
Too-Far Fix {tf_lat_iso:.2f} {tf_lon_iso:.2f} {tf_age:d} minutes
Water currents: {vx:.2f} {vy:.2f}
{sp:.2f} {dir:.2f}
""".format(**msg_dict))
| gpl-2.0 | -4,782,108,888,793,058,000 | 23.979592 | 77 | 0.563725 | false | 2.942308 | false | false | false |
zoidbergwill/lint-review | tests/test_repo.py | 2 | 5756 | import json
from . import load_fixture
from contextlib import contextmanager
from github3.repos.repo import Repository
from github3.pulls import PullRequest
from lintreview.config import load_config
from lintreview.repo import GithubRepository
from lintreview.repo import GithubPullRequest
from mock import Mock, patch, sentinel
from nose.tools import eq_, ok_
from unittest import TestCase
config = load_config()
class TestGithubRepository(TestCase):
def setUp(self):
fixture = load_fixture('pull_request.json')
self.repo_model = Repository(json.loads(fixture))
@patch('lintreview.repo.github')
def test_repository(self, github_mock):
github_mock.get_repository.return_value = self.repo_model
repo = GithubRepository(config, 'markstory', 'lint-test')
eq_(self.repo_model, repo.repository())
github_mock.get_repository.assert_called_with(
config,
'markstory',
'lint-test')
def test_pull_request(self):
model = self.repo_model
model.pull_request = Mock(return_value=sentinel.pull_request)
repo = GithubRepository(config, 'markstory', 'lint-test')
repo.repository = lambda: self.repo_model
pull = repo.pull_request(1)
ok_(isinstance(pull, GithubPullRequest),
'Should be wrapped object')
def test_ensure_label__missing(self):
model = self.repo_model
model.label = Mock(return_value=None)
model.create_label = Mock()
repo = GithubRepository(config, 'markstory', 'lint-test')
repo.repository = lambda: self.repo_model
repo.ensure_label('A label')
model.create_label.assert_called_with(
name='A label',
color='bfe5bf')
def test_ensure_label__exists(self):
model = self.repo_model
model.create_label = Mock()
model.label = Mock(return_value=True)
repo = GithubRepository(config, 'markstory', 'lint-test')
repo.repository = lambda: self.repo_model
repo.ensure_label('A label')
eq_(False, model.create_label.called)
def test_create_status(self):
model = self.repo_model
model.create_status = Mock()
repo = GithubRepository(config, 'markstory', 'lint-test')
repo.repository = lambda: self.repo_model
repo.create_status('abc123', 'succeeded', 'all good')
model.create_status.assert_called_with(
'abc123',
'succeeded',
None,
'all good',
'lintreview')
class TestGithubPullRequest(TestCase):
def setUp(self):
fixture = load_fixture('pull_request.json')
self.model = PullRequest(json.loads(fixture)['pull_request'])
def test_is_private(self):
pull = GithubPullRequest(self.model)
assert False is pull.is_private
def test_display_name(self):
pull = GithubPullRequest(self.model)
assert 'markstory/lint-test#1' == pull.display_name
def test_number(self):
pull = GithubPullRequest(self.model)
assert 1 == pull.number
def test_head(self):
pull = GithubPullRequest(self.model)
expected = '53cb70abadcb3237dcb2aa2b1f24dcf7bcc7d68e'
assert expected == pull.head
def test_clone_url(self):
pull = GithubPullRequest(self.model)
expected = 'https://github.com/contributor/lint-test.git'
assert expected == pull.clone_url
def test_base_repo_url(self):
pull = GithubPullRequest(self.model)
expected = 'https://github.com/markstory/lint-test.git'
assert expected == pull.base_repo_url
def test_target_branch(self):
pull = GithubPullRequest(self.model)
assert 'master' == pull.target_branch
def test_remove_label__label_exists(self):
pull = GithubPullRequest(self.model)
label_name = 'No lint errors'
with add_ok_label(pull, label_name):
pull.remove_label(label_name)
pull.pull.issue().remove_label.assert_called_with(label_name)
def test_remove_label__label_missing(self):
pull = GithubPullRequest(self.model)
label_name = 'No lint errors'
with add_ok_label(pull, 'Other label'):
pull.remove_label(label_name)
assert 0 == pull.pull.issue().remove_label.call_count
def test_add_label(self):
mock_issue = Mock()
self.model.issue = lambda: mock_issue
pull = GithubPullRequest(self.model)
pull.add_label('No lint errors')
mock_issue.add_labels.assert_called_with('No lint errors')
def test_create_comment(self):
self.model.create_comment = Mock()
pull = GithubPullRequest(self.model)
text = 'No lint errors found'
pull.create_comment(text)
self.model.create_comment.assert_called_with(text)
def test_create_review_comment(self):
self.model.create_review_comment = Mock()
pull = GithubPullRequest(self.model)
comment = {
'body': 'bad whitespace',
'commit_id': 'abc123',
'path': 'some/file.php',
'position': 12
}
pull.create_review_comment(**comment)
self.model.create_review_comment.assert_called_with(
comment['body'],
comment['commit_id'],
comment['path'],
comment['position'])
@contextmanager
def add_ok_label(pull_request, *labels, **kw):
if labels:
class Label(object):
def __init__(self, name):
self.name = name
mock_issue = Mock()
mock_issue.labels.return_value = [Label(n) for n in labels]
pull_request.pull.issue = lambda: mock_issue
yield
| mit | -8,117,336,746,393,311,000 | 32.271676 | 73 | 0.626998 | false | 3.816976 | true | false | false |
imay/palo | be/src/codegen/gen_ir_descriptions.py | 1 | 7881 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This script will generate two headers that describe all of the clang cross compiled
functions.
The script outputs (run: 'doris/common/function-registry/gen_functions.py')
- be/src/generated-sources/doris-ir/doris-ir-functions.h
This file contains enums for all of the cross compiled functions
- be/src/generated-sources/doris-ir/doris-ir-function-names.h
This file contains a mapping of <string, enum>
Mapping of enum to compiled function name. The compiled function name only has to
be a substring of the actual, mangled compiler generated name.
TODO: should we work out the mangling rules?
"""
import string
import os
ir_functions = [
["AGG_NODE_PROCESS_ROW_BATCH_WITH_GROUPING", "process_row_batch_with_grouping"],
["AGG_NODE_PROCESS_ROW_BATCH_NO_GROUPING", "process_row_batch_no_grouping"],
# ["EXPR_GET_VALUE", "IrExprGetValue"],
# ["HASH_CRC", "IrCrcHash"],
# ["HASH_FVN", "IrFvnHash"],
["HASH_JOIN_PROCESS_BUILD_BATCH", "12HashJoinNode19process_build_batch"],
["HASH_JOIN_PROCESS_PROBE_BATCH", "12HashJoinNode19process_probe_batch"],
["EXPR_GET_BOOLEAN_VAL", "4Expr15get_boolean_val"],
["EXPR_GET_TINYINT_VAL", "4Expr16get_tiny_int_val"],
["EXPR_GET_SMALLINT_VAL", "4Expr17get_small_int_val"],
["EXPR_GET_INT_VAL", "4Expr11get_int_val"],
["EXPR_GET_BIGINT_VAL", "4Expr15get_big_int_val"],
["EXPR_GET_LARGEINT_VAL", "4Expr17get_large_int_val"],
["EXPR_GET_FLOAT_VAL", "4Expr13get_float_val"],
["EXPR_GET_DOUBLE_VAL", "4Expr14get_double_val"],
["EXPR_GET_STRING_VAL", "4Expr14get_string_val"],
["EXPR_GET_DATETIME_VAL", "4Expr16get_datetime_val"],
["EXPR_GET_DECIMAL_VAL", "4Expr15get_decimal_val"],
["HASH_CRC", "ir_crc_hash"],
["HASH_FNV", "ir_fnv_hash"],
["FROM_DECIMAL_VAL", "16from_decimal_val"],
["TO_DECIMAL_VAL", "14to_decimal_val"],
["FROM_DATETIME_VAL", "17from_datetime_val"],
["TO_DATETIME_VAL", "15to_datetime_val"],
["IR_STRING_COMPARE", "ir_string_compare"],
# ["STRING_VALUE_EQ", "StringValueEQ"],
# ["STRING_VALUE_NE", "StringValueNE"],
# ["STRING_VALUE_GE", "StringValueGE"],
# ["STRING_VALUE_GT", "StringValueGT"],
# ["STRING_VALUE_LT", "StringValueLT"],
# ["STRING_VALUE_LE", "StringValueLE"],
# ["STRING_TO_BOOL", "IrStringToBool"],
# ["STRING_TO_INT8", "IrStringToInt8"],
# ["STRING_TO_INT16", "IrStringToInt16"],
# ["STRING_TO_INT32", "IrStringToInt32"],
# ["STRING_TO_INT64", "IrStringToInt64"],
# ["STRING_TO_FLOAT", "IrStringToFloat"],
# ["STRING_TO_DOUBLE", "IrStringToDouble"],
# ["STRING_IS_NULL", "IrIsNullString"],
["HLL_UPDATE_BOOLEAN", "hll_updateIN8doris_udf10BooleanVal"],
["HLL_UPDATE_TINYINT", "hll_updateIN8doris_udf10TinyIntVal"],
["HLL_UPDATE_SMALLINT", "hll_updateIN8doris_udf11SmallIntVal"],
["HLL_UPDATE_INT", "hll_updateIN8doris_udf6IntVal"],
["HLL_UPDATE_BIGINT", "hll_updateIN8doris_udf9BigIntVal"],
["HLL_UPDATE_FLOAT", "hll_updateIN8doris_udf8FloatVal"],
["HLL_UPDATE_DOUBLE", "hll_updateIN8doris_udf9DoubleVal"],
["HLL_UPDATE_STRING", "hll_updateIN8doris_udf9StringVal"],
["HLL_UPDATE_TIMESTAMP", "hll_updateIN8doris_udf11DateTimeVal"],
["HLL_UPDATE_DECIMAL", "hll_updateIN8doris_udf10DecimalVal"],
["HLL_MERGE", "hll_merge"],
["CODEGEN_ANYVAL_DATETIME_VAL_EQ", "datetime_val_eq"],
["CODEGEN_ANYVAL_STRING_VAL_EQ", "string_val_eq"],
["CODEGEN_ANYVAL_DECIMAL_VAL_EQ", "decimal_val_eq"],
["CODEGEN_ANYVAL_DATETIME_VALUE_EQ", "datetime_value_eq"],
["CODEGEN_ANYVAL_STRING_VALUE_EQ", "string_value_eq"],
["CODEGEN_ANYVAL_DECIMAL_VALUE_EQ", "decimal_value_eq"],
["RAW_VALUE_COMPARE", "8RawValue7compare"],
]
enums_preamble = '\
// Licensed to the Apache Software Foundation (ASF) under one \n\
// or more contributor license agreements. See the NOTICE file \n\
// distributed with this work for additional information \n\
// regarding copyright ownership. The ASF licenses this file \n\
// to you under the Apache License, Version 2.0 (the \n\
// "License"); you may not use this file except in compliance \n\
// with the License. You may obtain a copy of the License at \n\
// \n\
// http://www.apache.org/licenses/LICENSE-2.0 \n\
// \n\
// Unless required by applicable law or agreed to in writing, \n\
// software distributed under the License is distributed on an \n\
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY \n\
// KIND, either express or implied. See the License for the \n\
// specific language governing permissions and limitations \n\
// under the License. \n\
\n\
// This is a generated file, DO NOT EDIT IT.\n\
// To add new functions, see be/src/codegen/gen_ir_descriptions.py.\n\
\n\
#ifndef DORIS_IR_FUNCTIONS_H\n\
#define DORIS_IR_FUNCTIONS_H\n\
\n\
namespace doris {\n\
\n\
class IRFunction {\n\
public:\n\
enum Type {\n'
enums_epilogue = '\
};\n\
};\n\
\n\
}\n\
\n\
#endif\n'
names_preamble = '\
// Licensed to the Apache Software Foundation (ASF) under one \n\
// or more contributor license agreements. See the NOTICE file \n\
// distributed with this work for additional information \n\
// regarding copyright ownership. The ASF licenses this file \n\
// to you under the Apache License, Version 2.0 (the \n\
// "License"); you may not use this file except in compliance \n\
// with the License. You may obtain a copy of the License at \n\
// \n\
// http://www.apache.org/licenses/LICENSE-2.0 \n\
// \n\
// Unless required by applicable law or agreed to in writing, \n\
// software distributed under the License is distributed on an \n\
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY \n\
// KIND, either express or implied. See the License for the \n\
// specific language governing permissions and limitations \n\
// under the License. \n\
\n\
// This is a generated file, DO NOT EDIT IT.\n\
// To add new functions, see be/src/codegen/gen_ir_descriptions.py.\n\
\n\
#ifndef DORIS_IR_FUNCTION_NAMES_H\n\
#define DORIS_IR_FUNCTION_NAMES_H\n\
\n\
#include "doris_ir/doris_ir_functions.h"\n\
\n\
namespace doris {\n\
\n\
static struct {\n\
std::string fn_name; \n\
IRFunction::Type fn; \n\
} FN_MAPPINGS[] = {\n'
names_epilogue = '\
};\n\
\n\
}\n\
\n\
#endif\n'
BE_PATH = os.environ['DORIS_HOME'] + "/gensrc/build/doris_ir/"
if not os.path.exists(BE_PATH):
os.makedirs(BE_PATH)
if __name__ == "__main__":
print "Generating IR description files"
enums_file = open(BE_PATH + 'doris_ir_functions.h', 'w')
enums_file.write(enums_preamble)
names_file = open(BE_PATH + 'doris_ir_names.h', 'w')
names_file.write(names_preamble)
idx = 0
enums_file.write(" FN_START = " + str(idx) + ",\n")
for fn in ir_functions:
enum = fn[0]
fn_name = fn[1]
enums_file.write(" " + enum + " = " + str(idx) + ",\n")
names_file.write(" { \"" + fn_name + "\", IRFunction::" + enum + " },\n")
idx = idx + 1
enums_file.write(" FN_END = " + str(idx) + "\n")
enums_file.write(enums_epilogue)
enums_file.close()
names_file.write(names_epilogue)
names_file.close()
| apache-2.0 | 875,511,572,620,132,400 | 38.405 | 84 | 0.677452 | false | 2.992027 | false | false | false |
nett55/caniusepypy | caniusepypy/test/test_check.py | 2 | 1958 | # Copyright 2014 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import caniusepypy as ciu
from caniusepypy.test import unittest
import tempfile
EXAMPLE_METADATA = """Metadata-Version: 1.2
Name: TestingMetadata
Version: 0.5
Summary: testing
Home-page: http://github.com/brettcannon/caniusepypy
Author: Brett Cannon
Author-email: brett@python.org
License: Apache
Requires-Dist: Twisted
"""
class CheckTest(unittest.TestCase):
# When testing input, make sure to use project names that **will** lead to
# a False answer since unknown projects are skipped.
def test_success(self):
self.assertTrue(ciu.check(projects=['cryptography']))
def test_failure(self):
self.assertFalse(ciu.check(projects=['Twisted']))
def test_requirements(self):
with tempfile.NamedTemporaryFile('w') as file:
file.write('Twisted\n')
file.flush()
self.assertFalse(ciu.check(requirements_paths=[file.name]))
def test_metadata(self):
self.assertFalse(ciu.check(metadata=[EXAMPLE_METADATA]))
def test_projects(self):
# Implicitly done by test_success and test_failure.
pass
def test_case_insensitivity(self):
self.assertFalse(ciu.check(projects=['TwIsTeD']))
def test_ignore_missing_projects(self):
self.assertTrue(ciu.check(projects=['sdfsjdfsdlfk;jasdflkjasdfdsfsdf']))
| apache-2.0 | 8,970,292,225,690,979,000 | 30.580645 | 80 | 0.720123 | false | 3.824219 | true | false | false |
jeremysanders/veusz | veusz/plugins/__init__.py | 7 | 1320 | # Copyright (C) 2010 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
from .field import *
from .datasetplugin import *
from .importplugin import *
from .toolsplugin import *
from .votable import *
# backward compatibility
ImportDataset1D = Dataset1D
ImportDataset2D = Dataset2D
ImportDatasetText = DatasetText
ImportField = Field
ImportFieldCheck = FieldBool
ImportFieldText = FieldText
ImportFieldFloat = FieldFloat
ImportFieldInt = FieldInt
ImportFieldCombo = FieldCombo
| gpl-2.0 | 6,037,227,856,241,668,000 | 37.823529 | 78 | 0.715909 | false | 4.086687 | false | false | false |
rustico/dp | web-api/dp/settings_prod.py | 1 | 4140 | # -*- encoding: utf-8 -*-
"""
Django settings for dp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@+d%g-+1%))$q!un*qx6pv&vivpcz7yzmd7#3v)56#q&-5n*&@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'social.apps.django_app.default',
'django_extensions',
'rest_framework',
'rest_framework.authtoken',
'djrill',
'tournaments',
'games',
'notifications',
'contact',
'homepage',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dp.urls'
WSGI_APPLICATION = 'dp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
import dj_database_url
DATABASES = {
'default': dj_database_url.config(default='postgres://dp:DPfutbol1983%@localhost:5432/dp')
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'es-ar'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
)
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
#'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
CORS_ORIGIN_WHITELIST = (
'localhost:9090',
'localhost:3000',
'127.0.0.1:9000',
'dpfutbol.com',
'www.dpfutbol.com',
)
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken',
'WWW-Authenticate',
)
SOCIAL_AUTH_FACEBOOK_KEY = '1480234775555747'
SOCIAL_AUTH_FACEBOOK_SECRET = 'ab0980264107f9856823e3650a1871da'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
AUTH_USER_MODEL = 'games.Player'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
# MANDRILL
MANDRILL_API_KEY = '4rbqFI0BJL8ryoHT7CRGLw'
EMAIL_BACKEND = "djrill.mail.backends.djrill.DjrillBackend"
# CELERY SETTINGS
BROKER_URL = 'redis://localhost:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
GRAPPELLI_ADMIN_TITLE = u"DP Fútbol"
| gpl-2.0 | 2,140,954,619,698,260,000 | 23.784431 | 94 | 0.711525 | false | 3.105026 | false | false | false |
brownharryb/erpnext | erpnext/accounts/doctype/invoice_discounting/invoice_discounting.py | 1 | 6331 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.utils import flt, getdate, nowdate, add_days
from erpnext.controllers.accounts_controller import AccountsController
from erpnext.accounts.general_ledger import make_gl_entries
class InvoiceDiscounting(AccountsController):
def validate(self):
self.validate_mandatory()
self.calculate_total_amount()
self.set_status()
self.set_end_date()
def set_end_date(self):
if self.loan_start_date and self.loan_period:
self.loan_end_date = add_days(self.loan_start_date, self.loan_period)
def validate_mandatory(self):
if self.docstatus == 1 and not (self.loan_start_date and self.loan_period):
frappe.throw(_("Loan Start Date and Loan Period are mandatory to save the Invoice Discounting"))
def calculate_total_amount(self):
self.total_amount = sum([flt(d.outstanding_amount) for d in self.invoices])
def on_submit(self):
self.make_gl_entries()
def on_cancel(self):
self.set_status()
self.make_gl_entries()
def set_status(self):
self.status = "Draft"
if self.docstatus == 1:
self.status = "Sanctioned"
elif self.docstatus == 2:
self.status = "Cancelled"
def make_gl_entries(self):
company_currency = frappe.get_cached_value('Company', self.company, "default_currency")
gl_entries = []
for d in self.invoices:
inv = frappe.db.get_value("Sales Invoice", d.sales_invoice,
["debit_to", "party_account_currency", "conversion_rate", "cost_center"], as_dict=1)
if d.outstanding_amount:
outstanding_in_company_currency = flt(d.outstanding_amount * inv.conversion_rate,
d.precision("outstanding_amount"))
ar_credit_account_currency = frappe.get_cached_value("Account", self.accounts_receivable_credit, "currency")
gl_entries.append(self.get_gl_dict({
"account": inv.debit_to,
"party_type": "Customer",
"party": d.customer,
"against": self.accounts_receivable_credit,
"credit": outstanding_in_company_currency,
"credit_in_account_currency": outstanding_in_company_currency \
if inv.party_account_currency==company_currency else d.outstanding_amount,
"cost_center": inv.cost_center,
"against_voucher": d.sales_invoice,
"against_voucher_type": "Sales Invoice"
}, inv.party_account_currency))
gl_entries.append(self.get_gl_dict({
"account": self.accounts_receivable_credit,
"party_type": "Customer",
"party": d.customer,
"against": inv.debit_to,
"debit": outstanding_in_company_currency,
"debit_in_account_currency": outstanding_in_company_currency \
if ar_credit_account_currency==company_currency else d.outstanding_amount,
"cost_center": inv.cost_center,
"against_voucher": d.sales_invoice,
"against_voucher_type": "Sales Invoice"
}, ar_credit_account_currency))
make_gl_entries(gl_entries, cancel=(self.docstatus == 2), update_outstanding='No')
def create_disbursement_entry(self):
je = frappe.new_doc("Journal Entry")
je.voucher_type = 'Journal Entry'
je.company = self.company
je.remark = 'Loan Disbursement entry against Invoice Discounting: ' + self.name
je.append("accounts", {
"account": self.bank_account,
"debit_in_account_currency": flt(self.total_amount) - flt(self.bank_charges),
})
je.append("accounts", {
"account": self.bank_charges_account,
"debit_in_account_currency": flt(self.bank_charges)
})
je.append("accounts", {
"account": self.short_term_loan,
"credit_in_account_currency": flt(self.total_amount),
"reference_type": "Invoice Discounting",
"reference_name": self.name
})
for d in self.invoices:
je.append("accounts", {
"account": self.accounts_receivable_discounted,
"debit_in_account_currency": flt(d.outstanding_amount),
"reference_type": "Invoice Discounting",
"reference_name": self.name,
"party_type": "Customer",
"party": d.customer
})
je.append("accounts", {
"account": self.accounts_receivable_credit,
"credit_in_account_currency": flt(d.outstanding_amount),
"reference_type": "Invoice Discounting",
"reference_name": self.name,
"party_type": "Customer",
"party": d.customer
})
return je
def close_loan(self):
je = frappe.new_doc("Journal Entry")
je.voucher_type = 'Journal Entry'
je.company = self.company
je.remark = 'Loan Settlement entry against Invoice Discounting: ' + self.name
je.append("accounts", {
"account": self.short_term_loan,
"debit_in_account_currency": flt(self.total_amount),
"reference_type": "Invoice Discounting",
"reference_name": self.name,
})
je.append("accounts", {
"account": self.bank_account,
"credit_in_account_currency": flt(self.total_amount)
})
if getdate(self.loan_end_date) > getdate(nowdate()):
for d in self.invoices:
je.append("accounts", {
"account": self.accounts_receivable_discounted,
"credit_in_account_currency": flt(d.outstanding_amount),
"reference_type": "Invoice Discounting",
"reference_name": self.name,
"party_type": "Customer",
"party": d.customer
})
je.append("accounts", {
"account": self.accounts_receivable_unpaid,
"debit_in_account_currency": flt(d.outstanding_amount),
"reference_type": "Invoice Discounting",
"reference_name": self.name,
"party_type": "Customer",
"party": d.customer
})
return je
@frappe.whitelist()
def get_invoices(filters):
filters = frappe._dict(json.loads(filters))
cond = []
if filters.customer:
cond.append("customer=%(customer)s")
if filters.from_date:
cond.append("posting_date >= %(from_date)s")
if filters.to_date:
cond.append("posting_date <= %(to_date)s")
if filters.min_amount:
cond.append("base_grand_total >= %(min_amount)s")
if filters.max_amount:
cond.append("base_grand_total <= %(max_amount)s")
where_condition = ""
if cond:
where_condition += " and " + " and ".join(cond)
return frappe.db.sql("""
select
name as sales_invoice,
customer,
posting_date,
outstanding_amount
from `tabSales Invoice`
where
docstatus = 1
and outstanding_amount > 0
%s
""" % where_condition, filters, as_dict=1) | gpl-3.0 | 2,035,536,234,197,198,000 | 30.979798 | 112 | 0.685674 | false | 3.082278 | false | false | false |
parenthetical-e/modelmodel | tests/test_behave.py | 1 | 3809 | from modelmodel import behave
import numpy as np
SEED=45
def test_trials_trials():
prng = np.random.RandomState(SEED)
# Simplet trials is a trial: [1, ]
trials, prng = behave.trials.random(N=1, k=1, prng=prng)
assert np.allclose(trials, np.array([1,])), (
"simplest trials breaks")
# Does k work right?
trials, prng = behave.trials.random(N=1, k=10, prng=prng)
assert np.allclose(np.sum(trials), 10), "k is off"
# N?
trials, prng = behave.trials.random(N=2, k=1, prng=prng)
assert np.allclose(np.sum(trials), 3), "N is off"
# is L ok?
assert trials.shape[0] == 2, "l (N*k) is off"
def test_trials_jitter():
prng = np.random.RandomState(SEED)
# Jitter should not change N, k
trials, prng = behave.trials.random(2, 2, prng=prng)
trials, prng = behave.trials.jitter(trials, prng=prng)
assert np.allclose(np.sum(trials), 6), "N of k is off"
def test_probability_random():
prng = np.random.RandomState(SEED)
# Random ps should avg to 0.5
trials, prng = behave.trials.random(1, 1000, prng=prng)
l = trials.shape[0]
ps, prng = behave.probability.random(l, prng=prng)
assert np.allclose(np.mean(ps), .5, atol=.05), "Bad avg"
# dim check
assert ps.shape[0] == trials.shape[0], "l off"
# Same avg for N > 1 conds
trials, prng = behave.trials.random(3, 1000, prng=prng)
l = trials.shape[0]
ps, prng = behave.probability.random(l, prng=prng)
assert np.allclose(np.mean(ps), .5, atol=.05), "Bad avg with 3 cond"
# dim check
assert ps.shape[0] == trials.shape[0], "l off"
def test_probability_learn():
prng = np.random.RandomState(SEED)
# Vis
trials, prng = behave.trials.random(1, 20, prng=prng)
l = trials.shape[0]
ps, prng = behave.probability.learn(l, loc=3, prng=prng)
# dim check
assert ps.shape[0] == trials.shape[0], "l off"
print(ps)
# ps should avg to more than 0.5
trials, prng = behave.trials.random(1, 1000, prng=prng)
l = trials.shape[0]
ps, prng = behave.probability.learn(l, loc=3, prng=prng)
assert np.mean(ps) > .5, "Bad avg"
# dim check
assert ps.shape[0] == trials.shape[0], "l off"
def test_acc_accuracy():
prng = np.random.RandomState(SEED)
# For lots of random ps acc should avg to 0.5
k = 5000
ps = np.asarray([0.5] * k)
acc, prng = behave.acc.accuracy(ps, prng=prng)
assert np.allclose(np.sum(acc)/float(k), .5, atol=.05)
# dim check
assert ps.shape == acc.shape, "l off"
def test_behave_random():
prng = np.random.RandomState(SEED)
trials, acc, ps, prng = behave.behave.random(N=1, k=5, prng=prng)
# dim check
assert trials.shape == acc.shape, "l off: trials and acc"
assert trials.shape == ps.shape, "l off: trials and ps"
# Check trials comp, then ps and acc avg
k = 3000
trials, acc, ps, prng = behave.behave.random(N=1, k=k, prng=prng)
assert np.allclose(np.sum(trials), k), "k is off"
assert np.allclose(np.unique(trials), np.array([0, 1])), "N if off"
assert np.allclose(np.mean(ps[ps > 0.0]), .5, atol=.05), "Bad avg"
def test_behave_learn():
prng = np.random.RandomState(SEED)
trials, acc, ps, prng = behave.behave.learn(N=1, k=5, prng=prng)
# dim check
assert trials.shape == acc.shape, "l off: trials and acc"
assert trials.shape == ps.shape, "l off: trials and ps"
# Check trials comp, then ps and acc avg
k = 3000
trials, acc, ps, prng = behave.behave.learn(N=1, k=k, prng=prng)
assert np.allclose(np.sum(trials), k), "k is off"
assert np.allclose(np.unique(trials), np.array([0, 1])), "N if off"
assert np.mean(ps[ps > 0.0]) > .5, "Bad avg"
| bsd-2-clause | 2,348,247,808,626,808,300 | 30.487603 | 72 | 0.611184 | false | 2.840418 | true | false | false |
mazvv/travelcrm | travelcrm/models/outgoing.py | 1 | 3356 | # -*-coding: utf-8-*-
from sqlalchemy import (
Column,
Integer,
Date,
Numeric,
String,
Table,
ForeignKey,
)
from sqlalchemy.orm import relationship, backref
from ..models import (
DBSession,
Base
)
outgoing_cashflow = Table(
'outgoing_cashflow',
Base.metadata,
Column(
'outgoing_id',
Integer,
ForeignKey(
'outgoing.id',
ondelete='restrict',
onupdate='cascade',
name='fk_outgoing_id_outgoing_cashflow',
),
primary_key=True,
),
Column(
'cashflow_id',
Integer,
ForeignKey(
'cashflow.id',
ondelete='restrict',
onupdate='cascade',
name='fk_cashflow_id_outgoing_cashflow',
),
primary_key=True,
)
)
class Outgoing(Base):
__tablename__ = 'outgoing'
id = Column(
Integer,
autoincrement=True,
primary_key=True
)
date = Column(
Date,
nullable=False,
)
resource_id = Column(
Integer,
ForeignKey(
'resource.id',
name="fk_resource_id_outgoing",
ondelete='restrict',
onupdate='cascade',
),
nullable=False,
)
account_item_id = Column(
Integer,
ForeignKey(
'account_item.id',
name="fk_account_item_id_outgoing",
ondelete='restrict',
onupdate='cascade',
),
nullable=False,
)
subaccount_id = Column(
Integer,
ForeignKey(
'subaccount.id',
name="fk_subaccount_id_outgoing",
ondelete='restrict',
onupdate='cascade',
),
nullable=False,
)
sum = Column(
Numeric(16, 2),
nullable=False,
)
descr = Column(
String(length=255),
)
resource = relationship(
'Resource',
backref=backref(
'outgoing',
uselist=False,
cascade="all,delete"
),
foreign_keys=[resource_id],
cascade="all,delete",
uselist=False,
)
account_item = relationship(
'AccountItem',
backref=backref(
'outgoings',
uselist=True,
lazy="dynamic"
),
uselist=False,
)
subaccount = relationship(
'Subaccount',
backref=backref(
'outgoings',
uselist=True,
lazy="dynamic"
),
uselist=False,
)
cashflows = relationship(
'Cashflow',
secondary=outgoing_cashflow,
backref=backref(
'outgoing',
uselist=False,
),
cascade="all,delete",
uselist=True,
)
@classmethod
def get(cls, id):
if id is None:
return None
return DBSession.query(cls).get(id)
@classmethod
def by_resource_id(cls, resource_id):
if resource_id is None:
return None
return (
DBSession.query(cls).filter(cls.resource_id == resource_id).first()
)
def rollback(self):
cashflows = list(self.cashflows)
self.cashflows = []
DBSession.flush()
for cashflow in cashflows:
DBSession.delete(cashflow)
| gpl-3.0 | -7,055,886,075,532,842,000 | 20.375796 | 79 | 0.499702 | false | 4.26972 | false | false | false |
google/CommonLoopUtils | clu/metric_writers/summary_writer.py | 1 | 2517 | # Copyright 2021 The CLU Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MetricWriter for writing to TF summary files.
Only works in eager mode. Does not work for Pytorch code, please use
TorchTensorboardWriter instead.
"""
from typing import Any, Mapping, Optional
from clu.internal import utils
from clu.metric_writers import interface
import tensorflow as tf
from tensorboard.plugins.hparams import api as hparams_api
Array = interface.Array
Scalar = interface.Scalar
class SummaryWriter(interface.MetricWriter):
"""MetricWriter that writes TF summary files."""
def __init__(self, logdir: str):
super().__init__()
self._summary_writer = tf.summary.create_file_writer(logdir)
def write_scalars(self, step: int, scalars: Mapping[str, Scalar]):
with self._summary_writer.as_default():
for key, value in scalars.items():
tf.summary.scalar(key, value, step=step)
def write_images(self, step: int, images: Mapping[str, Array]):
with self._summary_writer.as_default():
for key, value in images.items():
tf.summary.image(key, value, step=step, max_outputs=value.shape[0])
def write_texts(self, step: int, texts: Mapping[str, str]):
with self._summary_writer.as_default():
for key, value in texts.items():
tf.summary.text(key, value, step=step)
def write_histograms(self,
step: int,
arrays: Mapping[str, Array],
num_buckets: Optional[Mapping[str, int]] = None):
with self._summary_writer.as_default():
for key, value in arrays.items():
buckets = None if num_buckets is None else num_buckets.get(key)
tf.summary.histogram(key, value, step=step, buckets=buckets)
def write_hparams(self, hparams: Mapping[str, Any]):
with self._summary_writer.as_default():
hparams_api.hparams(dict(utils.flatten_dict(hparams)))
def flush(self):
self._summary_writer.flush()
def close(self):
self._summary_writer.close()
| apache-2.0 | 4,893,251,175,021,279,000 | 32.56 | 75 | 0.69408 | false | 3.802115 | false | false | false |
rtidatascience/django-postgres-stats | postgres_stats/functions.py | 1 | 2836 | from django.db.models import Aggregate, Func
import six
class DateTrunc(Func):
"""
Accepts a single timestamp field or expression and returns that timestamp
truncated to the specified *precision*. This is useful for investigating
time series.
The *precision* named parameter can take:
* microseconds
* milliseconds
* second
* minute
* hour
* day
* week
* month
* quarter
* year
* decade
* century
* millennium
Usage example::
checkin = Checkin.objects.
annotate(day=DateTrunc('logged_at', 'day'),
hour=DateTrunc('logged_at', 'hour')).
get(pk=1)
assert checkin.logged_at == datetime(2015, 11, 1, 10, 45, 0)
assert checkin.day == datetime(2015, 11, 1, 0, 0, 0)
assert checkin.hour == datetime(2015, 11, 1, 10, 0, 0)
"""
function = "DATE_TRUNC"
template = "%(function)s('%(precision)s', %(expressions)s)"
def __init__(self, expression, precision, **extra):
if six.PY2:
super(DateTrunc, self).__init__(expression, precision=precision, **extra)
else:
super().__init__(expression, precision=precision, **extra)
class Extract(Func):
"""
Accepts a single timestamp or interval field or expression and returns
the specified *subfield* of that expression. This is useful for grouping
data.
The *subfield* named parameter can take:
* century
* day
* decade
* dow (day of week)
* doy (day of year)
* epoch (seconds since 1970-01-01 00:00:00 UTC)
* hour
* isodow
* isodoy
* isoyear
* microseconds
* millennium
* milliseconds
* minute
* month
* quarter
* second
* timezone
* timezone_hour
* timezone_minute
* week
* year
See `the Postgres documentation`_ for details about the subfields.
Usage example::
checkin = Checkin.objects.
annotate(day=Extract('logged_at', 'day'),
minute=Extract('logged_at', 'minute'),
quarter=Extract('logged_at', 'quarter')).
get(pk=1)
assert checkin.logged_at == datetime(2015, 11, 1, 10, 45, 0)
assert checkin.day == 1
assert checkin.minute == 45
assert checkin.quarter == 4
.. _the Postgres documentation: http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
"""
function = 'EXTRACT'
name = 'extract'
template = "%(function)s(%(subfield)s FROM %(expressions)s)"
def __init__(self, expression, subfield, **extra):
if six.PY2:
super(Extract, self).__init__(expression, subfield=subfield, **extra)
else:
super().__init__(expression, subfield=subfield, **extra)
| bsd-3-clause | -7,891,007,696,067,426,000 | 25.259259 | 132 | 0.592031 | false | 3.971989 | false | false | false |
0lidaxiang/WeArt | reader/view/loginView.py | 1 | 4398 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from django.http import JsonResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators import csrf
# from django.contrib.auth.decorators import login_required
from tool.tools import createId
from reader.models import reader
from author.models import author
# connect to mysql and check
def loginReader(request):
lastUrl = ""
if "lastUrl" in request.POST:
lastUrl = request.POST['lastUrl']
context = {}
if "readerId" in request.session:
context['status'] = "success"
if lastUrl == "null":
# context['message'] = "/reader/readerIndex/"
return HttpResponseRedirect("/reader/index/")
elif lastUrl == "" or lastUrl is None:
context['status'] = "fail"
context['message'] = "錯誤的訪問"
return JsonResponse(context)
else:
# context['message'] = lastUrl
return HttpResponseRedirect(lastUrl)
# return JsonResponse(context)
if 'userName' not in request.POST and 'passwd' not in request.POST :
context['status'] = "fail"
context['message'] = "請重載後輸入 Email 和密碼"
return JsonResponse(context)
# return render(request, 'reader/login.html')
userName = unicode(request.POST['userName'])
passwd = createId(96,request.POST['passwd'])
try:
readerObj = reader.objects.get(email=userName)
if passwd != readerObj.passwd:
context['status'] = "fail"
context['message'] = "密碼錯誤!請重新登錄!"
return JsonResponse(context)
# return render(request, 'reader/loginFail.html', {'message': u'密碼錯誤!請重新登錄!'})
if readerObj.status == "allowed":
request.session["readerId"] = readerObj.id
request.session["userName"] = readerObj.name
# check user is or not author and author's status
isAuthor = author.isExist(readerObj.id)
request.session["isAuthor"] = isAuthor
authorStatus = author.getStatus(readerObj.id)
if not isAuthor:
request.session["authorStatus"] = ""
context['status'] = "success"
if lastUrl == "null":
context['message'] = "/reader/index/"
else:
context['message'] = lastUrl
return JsonResponse(context)
authorId = author.getId(readerObj.id)
if authorId != "":
request.session["authorId"] = authorId
if authorStatus == "active":
request.session["authorStatus"] = "active"
else:
request.session["authorStatus"] = authorStatus
context['status'] = "success"
if lastUrl == "null":
context['message'] = "/reader/index/"
else:
context['message'] = lastUrl
return JsonResponse(context)
elif readerObj.status == "abuse":
context['status'] = "fail"
context['message'] = "您尚未驗證郵箱!請前往注冊郵箱驗證身份!"
return JsonResponse(context)
else :
context['status'] = "fail"
context['message'] = '您的帳號狀態異常,無法登錄,目前狀態爲:' + str(readerObj.status) + '請聯繫管理員或重新註冊。'
return JsonResponse(context)
except reader.DoesNotExist:
context['status'] = "fail"
context['message'] = '用戶不存在!請重新登錄!'
return JsonResponse(context)
def logout(request):
# delete session
if "readerId" in request.session:
del request.session["readerId"] # if not exists, report error
del request.session["userName"] # if not exists, report error
del request.session["isAuthor"] # if not exists, report error
if 'authorId' in request.session:
del request.session["authorId"] # if not exists, report error
del request.session["authorStatus"] # if not exists, report error
request.session.flush()
return HttpResponseRedirect('/reader/login/')
else:
return HttpResponseRedirect('/reader/login/')
| bsd-3-clause | 3,033,648,149,762,961,400 | 35.189655 | 96 | 0.593616 | false | 3.765022 | false | false | false |
mikel-egana-aranguren/MSc_Bioinformatics_UM_13-14_LSSW | doc/Programming-the-Semantic-Web/chapter2/simplegraph.py | 2 | 5563 | import csv
class SimpleGraph:
def __init__(self):
self._spo = {}
self._pos = {}
self._osp = {}
def add(self, (sub, pred, obj)):
"""
Adds a triple to the graph.
"""
self._addToIndex(self._spo, sub, pred, obj)
self._addToIndex(self._pos, pred, obj, sub)
self._addToIndex(self._osp, obj, sub, pred)
def _addToIndex(self, index, a, b, c):
"""
Adds a triple to a specified index.
"""
if a not in index: index[a] = {b:set([c])}
else:
if b not in index[a]: index[a][b] = set([c])
else: index[a][b].add(c)
def remove(self, (sub, pred, obj)):
"""
Remove a triple pattern from the graph.
"""
triples = list(self.triples((sub, pred, obj)))
for (delSub, delPred, delObj) in triples:
self._removeFromIndex(self._spo, delSub, delPred, delObj)
self._removeFromIndex(self._pos, delPred, delObj, delSub)
self._removeFromIndex(self._osp, delObj, delSub, delPred)
def _removeFromIndex(self, index, a, b, c):
"""
Removes a triple from an index and clears up empty indermediate structures.
"""
try:
bs = index[a]
cset = bs[b]
cset.remove(c)
if len(cset) == 0: del bs[b]
if len(bs) == 0: del index[a]
# KeyErrors occur if a term was missing, which means that it wasn't a valid delete:
except KeyError:
pass
def triples(self, (sub, pred, obj)):
"""
Generator over the triple store.
Returns triples that match the given triple pattern.
"""
# check which terms are present in order to use the correct index:
try:
if sub != None:
if pred != None:
# sub pred obj
if obj != None:
if obj in self._spo[sub][pred]: yield (sub, pred, obj)
# sub pred None
else:
for retObj in self._spo[sub][pred]: yield (sub, pred, retObj)
else:
# sub None obj
if obj != None:
for retPred in self._osp[obj][sub]: yield (sub, retPred, obj)
# sub None None
else:
for retPred, objSet in self._spo[sub].items():
for retObj in objSet:
yield (sub, retPred, retObj)
else:
if pred != None:
# None pred obj
if obj != None:
for retSub in self._pos[pred][obj]:
yield (retSub, pred, obj)
# None pred None
else:
for retObj, subSet in self._pos[pred].items():
for retSub in subSet:
yield (retSub, pred, retObj)
else:
# None None obj
if obj != None:
for retSub, predSet in self._osp[obj].items():
for retPred in predSet:
yield (retSub, retPred, obj)
# None None None
else:
for retSub, predSet in self._spo.items():
for retPred, objSet in predSet.items():
for retObj in objSet:
yield (retSub, retPred, retObj)
# KeyErrors occur if a query term wasn't in the index, so we yield nothing:
except KeyError:
pass
def value(self, sub=None, pred=None, obj=None):
for retSub, retPred, retObj in self.triples((sub, pred, obj)):
if sub is None: return retSub
if pred is None: return retPred
if obj is None: return retObj
break
return None
def load(self, filename):
f = open(filename, "rb")
reader = csv.reader(f)
for sub, pred, obj in reader:
sub = unicode(sub, "UTF-8")
pred = unicode(pred, "UTF-8")
obj = unicode(obj, "UTF-8")
self.add((sub, pred, obj))
f.close()
def save(self, filename):
f = open(filename, "wb")
writer = csv.writer(f)
for sub, pred, obj in self.triples((None, None, None)):
writer.writerow([sub.encode("UTF-8"), pred.encode("UTF-8"), obj.encode("UTF-8")])
f.close()
if __name__ == "__main__":
g = SimpleGraph()
g.add(("blade_runner", "name", "Blade Runner"))
g.add(("blade_runner", "name", "Blade Runner"))
g.add(("blade_runner", "release_date", "June 25, 1982"))
g.add(("blade_runner", "directed_by", "Ridley Scott"))
print list(g.triples((None, None, None)))
print list(g.triples(("blade_runner", None, None)))
print list(g.triples(("blade_runner", "name", None)))
print list(g.triples(("blade_runner", "name", "Blade Runner")))
print list(g.triples(("blade_runner", None, "Blade Runner")))
print list(g.triples((None, "name", "Blade Runner")))
print list(g.triples((None, None, "Blade Runner")))
print list(g.triples(("foo", "name", "Blade Runner")))
print list(g.triples(("blade_runner", "foo", "Blade Runner")))
print list(g.triples(("blade_runner", "name", "foo")))
| gpl-3.0 | 1,389,983,831,151,572,200 | 37.365517 | 93 | 0.480676 | false | 3.939802 | false | false | false |
Linaro/lava-server | dashboard_app/migrations/0015_remove_stale_content_types.py | 1 | 1122 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
def remove_stale_content_types(apps, schema_editor):
# Remove known stale objects from django_content_type table.
ContentType = apps.get_model("contenttypes", "ContentType")
models = {
("auth", "message"),
("dashboard_app", "launchpadbug"),
("dashboard_app", "imagecharttestrun"),
("dashboard_app", "testingeffort"),
("dashboard_app", "imageattribute")
}
for model in models:
try:
ContentType.objects.get(app_label=model[0],
model=model[1]).delete()
except ContentType.DoesNotExist:
pass
def reverse_func(apps, schema_editor):
# Content types are automatically added by django.
pass
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0014_auto_20150212_0604'),
]
operations = [
migrations.RunPython(
remove_stale_content_types,
reverse_func
),
]
| agpl-3.0 | -6,703,562,230,126,698,000 | 25.714286 | 64 | 0.604278 | false | 4.25 | false | false | false |
clerian/copyleftscienceTutorials | 001-scrapeDataFromWebpage/getData.py | 1 | 2988 | import bs4 # BeautifulSoup HTML parser
import urllib2 # open URLs
import json # store data in a portable way
def parseData(htmlData, timeTable):
"""
The purpose of this function is to extract the interessting data from the
preloaded HTML page. This is done by using BeautifulSoup
"""
soup = bs4.BeautifulSoup(htmlData)
title = soup.find('title') # extract title and date information
title = title.text
start= title.find('am') # find begin of date string
if start != -1:
date = title[start+2:].strip().split()
else:
start= title.find('-')
date = title[start+2:].strip().split()
# extract all tables with a summary indictaing the people of the debate
table = soup.findAll('table', {'class': 'tabelle tabelleHistorie', \
'summary' : 'Rednerinnen und Redner der Debatte'})
for tab in table: # iterate over all tables extract the required information
topic = tab.find_previous_sibling("h3",{'style': 'padding-top: 0.5em;'}).text
fields = tab.findAll('td')
for i in range(len(fields)/9):
try:
#print fields[i*9+4].text.strip()
tmp = fields[i*9+6].text.strip().replace(':',' ').split()
if fields[i*9+2].text.strip() not in timeTable.keys():
timeTable[fields[i*9+2].text.strip()] = []
timeTable[fields[i*9+2].text.strip()].append([int(tmp[0])*60 + int(tmp[1]),topic,fields[i*9+4].text.strip()])
except:
continue
#print fields[i*9+2].text.strip(), fields[i*9+6].text.strip()
return date
def parseFullSet(s, timeTable):
"""
iterate over a full legislature period, this hast to be specified using
roman numbers.
"""
# allow up to 300 debate days, this is VERY unlikely to be exceeded
for i in range(300):
try:
page = 'http://www.parlament.gv.at/PAKT/VHG/{0}/NRSITZ/NRSITZ_{1:0>5}/index.shtml'.format(s,i)
print page
webpage = urllib2.urlopen(page)
table = {}
date = parseData(webpage,table)
print len(date), date
if len(date) != 3:
date = timeTable[-1][2:5]
print "neues d=", date
timeTable.append([s,i,date[0],date[1],date[2],table])
# in case the URL doesn't exist we ignore it
except urllib2.URLError:
continue
if __name__ == "__main__":
timeTable = []
parseFullSet('XX',timeTable)
parseFullSet('XXI',timeTable)
parseFullSet('XXII',timeTable)
parseFullSet('XXIII',timeTable)
parseFullSet('XXIV',timeTable)
parseFullSet('XXV',timeTable)
f = open("speakerTimeTable.json", "w")
json.dump(timeTable, f)
f.close()
#for i in timeTable:
# for j in i[5]:
# print i[0], i[1], i[2], i[3], i[4], j, i[5][j]
#print timeTable
| gpl-3.0 | 7,837,949,942,959,875,000 | 33.744186 | 125 | 0.570616 | false | 3.519435 | false | false | false |
welwel2/wsrepo | _tools/cr.py | 1 | 2374 | import os
import shutil
toolsdir = os.path.dirname(os.path.abspath(__file__))
base_path = os.path.split(toolsdir)[0]
datadir = '_repo'
toolsdir = "_tools"
output = os.path.join(base_path, datadir)
#print 'Output directory is %s'% output
wslivetv = 'https://github.com/welwel2/wslivetv.git#dev'
#wslivetv = 'https://github.com/welwel2/wslivetv.git:plugin.video.wslivetv'
wslivestream = 'https://github.com/welwel2/wslivestream.git:plugin.video.wslivestream'
wsteledunet = 'https://github.com/welwel2/wsteledunet.git:plugin.video.wsteledunet'
wsrepo = 'https://github.com/welwel2/wsrepo.git:_repo/repository.wsrepo'
addons = [wslivetv, wslivestream, wsteledunet, wsrepo]
addons_str = ''.join('%s '%addon for addon in addons)
def update_remote(repo_path, rm='origin'):
# the objective here is to develp code to push the wsrepo changes to the server
# the steps requied are:
# 1. commit the changes locally
# 2. push changes to server
from git import Repo
repo = Repo(repo_path) # instantiate the repo
assert not repo.bare # ensure that the repo is not empty
if repo.is_dirty(): # check if there is changes that needs to be commited
# commit changes
# master = repo.heads.master
# repo.head.reference = master # switch to master branch
#git.checkout('--', '*.py[oc]')
index = repo.index
#for (path, stage), entry in index.entries.items():
# print path, stage
#repo.index.rm('*')
repo.index.add('*')
repo.index.add(repo.untracked_files)
repo.index.commit("commit changes")
print 'commited changes'
else:
print "repo is clean no changes to commit"
remote = eval('repo.remotes.%s'%rm)
assert remote.exists()
try:
remote.push()
except:
import push
pp = push.PushProject()
pp.get_git()
print 'pushed changes'
#print 'addons paths are %s'%addons_str
def delete_output():
if os.path.exists(output):
shutil.rmtree(output)
os.chdir(os.path.join(base_path, toolsdir))
if __name__ == "__main__":
delete_output()
os.system(r'py -3 create_repository.py --datadir=%s %s'%(output, addons_str))
update_remote(base_path)
| gpl-3.0 | 771,650,095,560,302,800 | 32.911765 | 86 | 0.619208 | false | 3.301808 | false | false | false |
chenke91/ihaveablog | app/admin/views.py | 1 | 2139 | from flask import render_template, request, redirect, url_for, g, jsonify, flash
from flask.ext.login import login_required
from app.models import Blog, Category
from app import avatars
from app.decorators import admin_required
from . import admin
@admin.route('/blogs/add/', methods=['GET', 'POST'])
@login_required
@admin_required
def add_blog():
from .forms import BlogForm
form = BlogForm()
g.open_article = True
if form.validate_on_submit():
try:
filename = avatars.save(request.files['avatars'])
except Exception as e:
flash('上传失败,请检查文件格式')
return render_template('admin/add_blog.html', form=form)
file_url = avatars.url(filename)
form.avatars.data = file_url
Blog.from_form(form)
return redirect(url_for('main.index'))
return render_template('admin/add_blog.html', form=form)
@admin.route('/blogs/edit/<int:id>/', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_blog(id):
from .forms import EditBlogForm
blog = Blog.query.get_or_404(id)
form = EditBlogForm(title=blog.title,
category=blog.category_id,
summary=blog.summary,
blog_body=blog.body)
if form.validate_on_submit():
blog.title = form.title.data
blog.category_id = form.category.data
blog.summary = form.summary.data
blog.body = form.blog_body.data
blog.save()
return redirect(url_for('main.get_blog', id=id))
return render_template('admin/edit_blog.html', form=form)
@admin.route('/blogs/')
@login_required
@admin_required
def get_blogs():
g.open_article = True
return render_template('admin/blogs.html')
@admin.route('/api/categories/')
@login_required
@admin_required
def categories():
categories = Category.query.all()
data = [cate.to_dict() for cate in categories]
res = {'data': data}
return jsonify(res)
@admin.route('/api/blogs/')
@login_required
@admin_required
def blogs():
blogs = Blog.query.all()
data = [blog.to_dict() for blog in blogs]
res = {'data': data}
return jsonify(res)
| mit | 8,605,077,185,840,440,000 | 29.242857 | 80 | 0.659424 | false | 3.339117 | false | false | false |
spencerahill/aospy-obj-lib | aospy_user/models/cmip5_models.py | 1 | 18439 | """aospy.Model objects corresponding to CMIP5 data."""
import datetime
import os
from aospy.model import Model
from .. import runs
root_dir = '/archive/pcmdi/repo/CMIP5/output/'
# BCC
bcc_csm1 = Model(
name='bcc_csm1-1',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'BCC/BCC-CSM1-1')),
grid_file_paths=[
'/archive/pcmdi/repo/CMIP5/output/BCC/BCC-CSM1-1/historical/fx/atmos/'
'fx/r0i0p0/v1/orog/orog_fx_bcc-csm1-1_historical_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/BCC/BCC-CSM1-1/historical/fx/atmos/'
'fx/r0i0p0/v1/sftlf/sftlf_fx_bcc-csm1-1_historical_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/BCC/BCC-CSM1-1/historical/fx/atmos/'
'fx/r0i0p0/v1/areacella/areacella_fx_bcc-csm1-1_historical_r0i0p0.nc',
],
# data_dur=30,
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# BNU
bnu_esm = Model(
name='bnu_esm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'BNU/BNU-ESM')),
runs=[runs.amip],
default_runs=False
)
# CCCma
cccma_canam4 = Model(
name='cccma_canam4',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CCCma/CanAM4')),
repo_version=0,
# data_dur=30,
# data_start_date=datetime.datetime(1950, 1, 1),
# data_end_date=datetime.datetime(2009, 12, 31),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cccma_cancm4 = Model(
name='cccma_cancm4',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CCCma/CanCM4')),
runs=[runs.amip],
default_runs=False
)
cccma_canesm2 = Model(
name='cccma_canesm2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CCCma/CanESM2')),
runs=[runs.amip],
default_runs=False
)
# CMCC
cmcc_cesm = Model(
name='cmcc-cesm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CMCC/CMCC-CESM')),
runs=[runs.amip],
default_runs=False
)
cmcc_cm = Model(
name='cmcc-cm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CMCC/CMCC-CM')),
runs=[runs.amip],
default_runs=False
)
cmcc_cms = Model(
name='cmcc-cms',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CMCC/CMCC-CMS')),
runs=[runs.amip],
default_runs=False
)
# CNRM-CERFACS
cnrm_cm5 = Model(
name='cnrc-cm5',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CNRM-CERFACS/CNRM-CM5')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cnrm_cm5_2 = Model(
name='cnrc-cm5-2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CNRM-CERFACS/CNRC-CM5-2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# COLA-CFS
cola_cfsv2 = Model(
name='cola-cfsv2-2011',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'COLA/CFSv2-2011')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# CSIRO-BOM
csiro_bom_access1_0 = Model(
name='csiro-bom-access1-0',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CSIRO-BOM/CSIRO-ACCESS1-0')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
csiro_bom_access1_3 = Model(
name='csiro-bom-access1-3',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CSIRO-BOM/CSIRO-ACCESS1-3')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# CSIRO-QCCCE
csiro_qccce_mk3_6_0 = Model(
name='csiro-qccce-mk3-6-0',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CSIRO-QCCCE/CSIRO-Mk3-6-0')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# FIO
fio_esm = Model(
name='fio-esm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'FIO/FIO-ESM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# ICHEC
ichec_ec_earth = Model(
name='ichec_ec_earth',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'ICHEC/EC-EARTH')),
repo_ens_mem='r3i1p1',
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# INM
inm_cm4 = Model(
name='inm-cm4',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'INM/INM-CM4')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# INPE
inpe_hadgem2_es = Model(
name='inpe-hadgem2-es',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'INPE/HadGEM2-ES')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# IPSL
ipsl_cm5a_lr = Model(
name='ipsl-cm5a-lr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'IPSL/IPSL-CM5A-LR')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
ipsl_cm5a_mr = Model(
name='ipsl-cm5a-mr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'IPSL/IPSL-CM5A-MR')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
ipsl_cm5b_lr = Model(
name='ipsl-cm5b-lr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'IPSL/IPSL-CM5B-LR')),
grid_file_paths=[
'/archive/pcmdi/repo/CMIP5/output/IPSL/IPSL-CM5B-LR/piControl/fx/'
'atmos/fx/r0i0p0/v20120430/orog/'
'orog_fx_IPSL-CM5B-LR_piControl_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/IPSL/IPSL-CM5B-LR/piControl/fx/'
'atmos/fx/r0i0p0/v20120430/areacella/'
'areacella_fx_IPSL-CM5B-LR_piControl_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/IPSL/IPSL-CM5B-LR/piControl/fx/'
'atmos/fx/r0i0p0/v20120430/sftlf/'
'sftlf_fx_IPSL-CM5B-LR_piControl_r0i0p0.nc',
],
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# LASG-CESS
lasg_cess_fgoals_g2 = Model(
name='lasg-cess-fgoals-g2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'LASG-CESS/FGOALS-g2')),
repo_version=0,
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# LASG-IAP
lasg_iap_fgoals_g1 = Model(
name='lasg-iap-fgoals-g1',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'LASG-IAP/FGOALS-g1')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
lasg_iap_fgoals_s2 = Model(
name='lasg-iap-fgoals-s2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'LASG-IAP/FGOALS-s2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# MIROC
miroc4h = Model(
name='miroc4h',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MIROC/MIROC4h')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
miroc5 = Model(
name='miroc5',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MIROC/MIROC5')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
miroc_esm = Model(
name='miroc-esm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MIROC/MIROC-ESM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
miroc_esm_chem = Model(
name='miroc-esm-chem',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MIROC/MIROC-ESM-CHEM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# MOHC (Met Office Hadley Centre)
mohc_hadcm3 = Model(
name='mohc_hadcm3',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MOHC/HadCM3')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mohc_hadgem2_a = Model(
name='mohc_hadgem2a',
description='',
# data_dir_struc='gfdl_repo',
repo_version=1,
# data_direc=os.path.realpath(os.path.join(root_dir, 'MOHC/HadGEM2-A')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mohc_hadgem2_cc = Model(
name='mohc_hadgem2cc',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MOHC/HadGEM2-CC')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mohc_hadgem2_es = Model(
name='hadgem2-es',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MOHC/HadGEM2-ES')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# MPI-M
mpi_m_esm_lr = Model(
name='mpi-esm-lr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MPI-M/MPI-ESM-LR')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mpi_m_esm_mr = Model(
name='mpi-esm-mr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MPI-M/MPI-ESM-MR')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mpi_m_esm_p = Model(
name='mpi-esm-p',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MPI-M/MPI-ESM-P')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# MRI
mri_agcm3_2h = Model(
name='mri-agcm3-2h',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MRI/MRI-AGCM3-2H')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mri_agcm3_2s = Model(
name='mri-agcm3-2s',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MRI/MRI-AGCM3-2S')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mri_cgcm3 = Model(
name='mri-cgcm3',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MRI/MRI-CGCM3')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mri_esm1 = Model(
name='mri-esm1',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MRI/MRI-ESM1')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NASA-GISS
nasa_giss_e2_h = Model(
name='giss-e2-h',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GISS/GISS-E2-H')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
nasa_giss_e2_h_cc = Model(
name='giss-e2-h-cc',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GISS/GISS-E2-H-CC')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
nasa_giss_e2_r = Model(
name='giss-e2-r',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GISS/GISS-E2-R')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
nasa_giss_e2_r_cc = Model(
name='giss-e2-r-cc',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GISS/GISS-E2-R-CC')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NASA-GMAO
nasa_gmao_geos_5 = Model(
name='gmao-geos-5',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GMAO/GEOS-5')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NCAR
ncar_ccsm4 = Model(
name='ncar-ccsm4',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NCAR/CCSM4')),
grid_file_paths=[
'/archive/pcmdi/repo/CMIP5/output/NCAR/CCSM4/piControl/fx/atmos/fx/'
'r0i0p0/v20120413/orog/orog_fx_CCSM4_piControl_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/NCAR/CCSM4/piControl/fx/atmos/fx/'
'r0i0p0/v20120413/sftlf/sftlf_fx_CCSM4_piControl_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/NCAR/CCSM4/piControl/fx/atmos/fx/'
'r0i0p0/v20120213/areacella/areacella_fx_CCSM4_piControl_r0i0p0.nc',
],
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NCC
ncc_noresm1_m = Model(
name='ncc-noresm1-m',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NCC/NorESM1-M')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
ncc_noresm1_me = Model(
name='ncc-noresm1-me',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NCC/NorESM1-me')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NCEP
ncep_cfsv2_2011 = Model(
name='ncep_cfsv2-2011',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NCEP/CFSv2-2011')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NIMR-KMA
nimr_kma_hadgem2_ao = Model(
name='nimr-kma-hadgem2-ao',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NIMR-KMA/HadGEM2-AO')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NOAA-GFDL
gfdl_cm2_1 = Model(
name='gfdl_cm2.1',
description='NOAA GFDL CM2.1 AOGCM',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-CM2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_cm3 = Model(
name='gfdl_cm3',
description='NOAA GFDL CM3 AOGCM',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-CM3')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_esm2m = Model(
name='gfdl_esm2m',
description='NOAA GFDL ESM2M earth-system model',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-ESM2M')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_esm2g = Model(
name='gfdl_esm2g',
description='NOAA GFDL ESM2G earth-system model',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-ESM2G')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_hiram_c180 = Model(
name='gfdl_hiram-c180',
description='NOAA GFDL HIRAM-C180 AGCM',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-HIRAM-C180')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_hiram_c360 = Model(
name='gfdl_hiram-c360',
description='NOAA GFDL HIRAM-C360 AGCM',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-HIRAM-C360')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NSF-DOE-NCAR
cesm1_bgc = Model(
name='cesm1-bgc',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-BGC')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cesm1_cam5 = Model(
name='ncar_cesm1_cam5',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-CAM5')),
grid_file_paths=['/archive/s1h/cmip5/cam5_land_mask/cam5_land_mask.nc'],
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cesm1_cam5_1_fv2 = Model(
name='cesm1-cam5-1-fv2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-CAM5-1-FV2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cesm1_fastchem = Model(
name='cesm1-fastchem',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-FASTCHEM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cesm1_waccm = Model(
name='cesm1-waccm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-WACCM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# SMHI
smhi_ec_earth = Model(
name='smhi_ec_earth',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'SMHI/EC-EARTH')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# UNSW
unsw_csiro_mk3l_1_2 = Model(
name='unsw-csiro-mk3l-1-2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'UNSW/CSIRO-Mk3L-1-2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
| apache-2.0 | 5,292,037,132,025,496,000 | 29.782972 | 91 | 0.640978 | false | 2.314713 | false | true | false |
blaisb/cfdemUtilities | mixing/foamTreatment/evolutionBedLevel.py | 1 | 4813 | #------------------------------------------------------------------------------------------------------------
#
# This program averages openfoam probe data azimuthally. If variable is a scalar it also outputs the std-dev
#
# Output format : velocity : r z ux uy uz ur ut uz
# scalar : r z scalar
#
# Usage : python UProbefile
#
# Author : Bruno Blais
#
#-------------------------------------------------------------------------------------------------------------
# Python imports
#----------------
import os
import sys
import numpy
import math
import matplotlib.pyplot as plt
from matplotlib import ticker #Manually change number of tick
import matplotlib.patches as patches
#----------------
#================================
# USER DEFINED VARIABLES
#================================
pdf=False
tol=1e-4
paperMode=True
impeller=True
impellerType="pbtTs4"
contour=False
nContour=100
colorList=["c","m","g","r","b", "k","c","m","g","r","b","k"]
aval=0.5
#Functions for the averaging
from functionAverage import *
#===============================
# FIGURE OPTIONS
#===============================
#Figure size
plt.rcParams['figure.figsize'] = 17, 8
params = {'backend': 'ps',
'axes.labelsize': 26,
'axes.titlesize': 26,
'text.fontsize': 20,
'legend.fontsize': 20,
'xtick.labelsize': 22,
'ytick.labelsize': 22,
'text.usetex': True,
}
plt.rcParams.update(params)
#================================
# MAIN
#================================
try:
folder = sys.argv[1:]
except:
print "Insufficient number of arguments, need a folder argument"
# Acquire list of time step
speedFolder=folder
# Sort so that speed will already be sorted
speedFolder.sort()
fig = plt.figure(figsize=(6,8))
ax = fig.add_subplot(111)
for j,i in enumerate(speedFolder):
subFolder=i +"/CFD/resultsCFD/postProcessing/probes"
time=os.listdir(subFolder)
fname = subFolder+"/"+max(time)+"/"+"voidfraction"
print "Postprocessing file : ", fname
rl,zl,acc,dev=scalarAverage(fname,impeller,impellerType)
extent=(numpy.min(rl),numpy.max(rl),numpy.min(zl),numpy.max(zl))
if (j==0):
accAll=acc
levels = (0.,1.) # numpy.arange(mindiff, maxdiff+tol, (maxdiff-mindiff)/nContour)
CS=plt.contourf(accAll, levels, hold='on',alpha=1, colors="w",origin='lower', extent=extent)
else: accAll=numpy.maximum(acc,accAll)
#plt.subplots_adjust(left=0.02, bottom=0.09, right=0.95, top=0.94, wspace=0.15)
#plt.subplot(1,1,1)
#plt.xlabel("r [m]")
#plt.ylabel("z [m]")
#plt.imshow(acc[:,:],extent=extent,origin='lower',interpolation="bicubic",vmin=0.4,vmax=1.)
#if (len(sys.argv)>3):
# plt.title("%s" %(sys.argv[3]))
#else:
# plt.title("%s" %(sys.argv[1]))
#cbar = plt.colorbar( drawedges=False)
#tick_locator = ticker.MaxNLocator(nbins=7)
#cbar.locator = tick_locator
#cbar.update_ticks()
#cbar.ax.tick_params(labelsize=20)
#cbar.solids.set_edgecolor("face")
maxdiff=numpy.nanmax(acc)
mindiff=numpy.nanmin(acc)
levels = (0,0.5) # numpy.arange(mindiff, maxdiff+tol, (maxdiff-mindiff)/nContour)
CS=plt.contourf(acc, levels, hold='on',alpha=0.5, colors=colorList[j],origin='lower', extent=extent)
CS=plt.contour(acc, levels, hold='on',alpha=1, colors="k", origin='lower', extent=extent)
#plt.clabel(CS, inline=1, fontsize=14,colors="white")
levels = (0.,1.) # numpy.arange(mindiff, maxdiff+tol, (maxdiff-mindiff)/nContour)
CS=plt.contourf(accAll, levels, hold='on',alpha=0.10, colors="y",origin='lower', extent=extent)
# get data you will need to create a "background patch" to your plot
xmin = numpy.min(rl)
xmax = numpy.max(rl)
ymin = numpy.min(zl)
ymax = numpy.max(zl)
xy = (xmin,ymin)
width = xmax - xmin
height = ymax - ymin
# create the patch and place it in the back of countourf (zorder!)
p = patches.Rectangle(xy, width, height, fill=True,color="k",alpha=0.4, zorder=-10)
ax.add_patch(p)
#Get artists and labels for legend and chose which ones to display
handles, labels = ax.get_legend_handles_labels()
display = (0,1,2)
#Create custom artists
a1 = patches.Rectangle((0,0),1,1,color=colorList[0],alpha=aval)
a2 = patches.Rectangle((0,0),1,1,color=colorList[1],alpha=aval)
a3 = patches.Rectangle((0,0),1,1,color=colorList[2],alpha=aval)
a4 = patches.Rectangle((0,0),1,1,color=colorList[3],alpha=aval)
#anyArtist = plt.Line2D((0,1),(0,0), color='k')
ax.legend([handle for i,handle in enumerate(handles) if i in display]+[a1,a2,a3,a4],
[label for i,label in enumerate(labels) if i in display]+["100RPM","200RPM","300RPM","400RPM"])
plt.show()
if (pdf): plt.savefig("./levelAnalysis.pdf")
plt.show()
| lgpl-3.0 | -6,221,462,796,995,080,000 | 30.253247 | 110 | 0.594847 | false | 3.121271 | false | false | false |
inej/toad | tasks/11-tensordipy.py | 1 | 5151 | import dipy.core.gradients
import dipy.reconst.dti
import dipy.segment.mask
import dipy.reconst.dti
import numpy
import nibabel
from core.generictask import GenericTask
from lib.images import Images
__author__ = 'desmat'
class TensorDipy(GenericTask):
def __init__(self, subject):
GenericTask.__init__(self, subject, 'upsampling', 'registration', 'qa')
def implement(self):
dwi = self.getUpsamplingImage('dwi', 'upsample')
bValsFile = self.getUpsamplingImage('grad', None, 'bvals')
bVecsFile = self.getUpsamplingImage('grad', None, 'bvecs')
mask = self.getRegistrationImage('mask', 'resample')
fit = self.__produceTensors(dwi, bValsFile, bVecsFile, mask)
def __produceTensors(self, source, bValsFile, bVecsFile, mask):
self.info("Starting tensors creation from dipy on {}".format(source))
dwiImage = nibabel.load(source)
maskImage = nibabel.load(mask)
maskData = maskImage.get_data()
dwiData = dwiImage.get_data()
dwiData = dipy.segment.mask.applymask(dwiData, maskData)
gradientTable = dipy.core.gradients.gradient_table(numpy.loadtxt(bValsFile), numpy.loadtxt(bVecsFile))
model = dipy.reconst.dti.TensorModel(gradientTable)
fit = model.fit(dwiData)
tensorsValues = dipy.reconst.dti.lower_triangular(fit.quadratic_form)
correctOrder = [0,1,3,2,4,5]
tensorsValuesReordered = tensorsValues[:,:,:,correctOrder]
tensorsImage = nibabel.Nifti1Image(tensorsValuesReordered.astype(numpy.float32), dwiImage.get_affine())
nibabel.save(tensorsImage, self.buildName(source, "tensor"))
nibabel.save(nibabel.Nifti1Image(fit.fa.astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "fa"))
nibabel.save(nibabel.Nifti1Image(fit.ad.astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "ad"))
nibabel.save(nibabel.Nifti1Image(fit.rd.astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "rd"))
nibabel.save(nibabel.Nifti1Image(fit.md.astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "md"))
nibabel.save(nibabel.Nifti1Image(fit.evecs[0].astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "v1"))
nibabel.save(nibabel.Nifti1Image(fit.evecs[1].astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "v2"))
nibabel.save(nibabel.Nifti1Image(fit.evecs[2].astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "v3"))
#nibabel.save(nibabel.Nifti1Image(fit.adc(dipy.data.get_sphere('symmetric724')).astype(numpy.float32),
# dwiImage.get_affine()), self.buildName(target, "adc"))
faColor = numpy.clip(fit.fa, 0, 1)
rgb = dipy.reconst.dti.color_fa(faColor, fit.evecs)
nibabel.save(nibabel.Nifti1Image(numpy.array(255 * rgb, 'uint8'), dwiImage.get_affine()), self.buildName(source, "tensor_rgb"))
self.info("End tensor and metrics creation from dipy, resulting file is {} ".format(fit))
return fit
def isIgnore(self):
return self.get("ignore")
def meetRequirement(self):
return Images((self.getUpsamplingImage('dwi', 'upsample'), "upsampled diffusion"),
(self.getUpsamplingImage('grad', None, 'bvals'), "gradient value bvals encoding file"),
(self.getUpsamplingImage('grad', None, 'bvecs'), "gradient vector bvecs encoding file"),
(self.getRegistrationImage('mask', 'resample'), 'brain mask'))
def isDirty(self):
return Images((self.getImage("dwi", "tensor"), "dipy tensor"),
(self.getImage('dwi', 'v1'), "selected eigenvector 1"),
(self.getImage('dwi', 'v2'), "selected eigenvector 2"),
(self.getImage('dwi', 'v3'), "selected eigenvector 3"),
(self.getImage('dwi', 'fa'), "fractional anisotropy"),
(self.getImage('dwi', 'md'), "mean diffusivity MD"),
(self.getImage('dwi', 'ad'), "selected eigenvalue(s) AD"),
(self.getImage('dwi', 'rd'), "selected eigenvalue(s) RD"))
#"apparent diffusion coefficient" : self.getImage(self.workingDir, 'dwi', 'adc')}
def qaSupplier(self):
"""Create and supply images for the report generated by qa task
"""
qaImages = Images()
softwareName = 'dipy'
#Get images
mask = self.getRegistrationImage('mask', 'resample')
#Build qa images
tags = (
('fa', 'Fractional anisotropy'),
('ad', 'Axial Diffusivity'),
('md', 'Mean Diffusivity'),
('rd', 'Radial Diffusivity'),
)
for postfix, description in tags:
image = self.getImage('dwi', postfix)
if image:
qaImage = self.buildName(image, softwareName, 'png')
self.slicerPng(image, qaImage, boundaries=mask)
qaImages.extend(Images((qaImage, description)))
return qaImages
| gpl-2.0 | -1,260,300,960,972,631,300 | 43.791304 | 135 | 0.629975 | false | 3.377705 | false | false | false |
EricSB/nupic | tests/unit/nupic/frameworks/opf/common_models/cluster_params_test.py | 3 | 2894 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for model selection via cluster params."""
import unittest
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.clamodel import CLAModel
from nupic.frameworks.opf.common_models.cluster_params import (
getScalarMetricWithTimeOfDayAnomalyParams)
class ClusterParamsTest(TestCaseBase):
def testModelParams(self):
"""
Test that clusterParams loads returns a valid dict that can be instantiated
as a CLAModel.
"""
params = getScalarMetricWithTimeOfDayAnomalyParams([0],
minVal=23.42,
maxVal=23.420001)
encodersDict= (
params['modelConfig']['modelParams']['sensorParams']['encoders'])
model = ModelFactory.create(modelConfig=params['modelConfig'])
self.assertIsInstance(model,
CLAModel,
"JSON returned cannot be used to create a model")
# Ensure we have a time of day field
self.assertIsNotNone(encodersDict['c0_timeOfDay'])
# Ensure resolution doesn't get too low
if encodersDict['c1']['type'] == 'RandomDistributedScalarEncoder':
self.assertGreaterEqual(encodersDict['c1']['resolution'], 0.001,
"Resolution is too low")
# Ensure tm_cpp returns correct json file
params = getScalarMetricWithTimeOfDayAnomalyParams([0], tmImplementation="tm_cpp")
self.assertEqual(params['modelConfig']['modelParams']['tpParams']['temporalImp'], "tm_cpp",
"Incorrect json for tm_cpp tmImplementation")
# Ensure incorrect tmImplementation throws exception
with self.assertRaises(ValueError):
getScalarMetricWithTimeOfDayAnomalyParams([0], tmImplementation="")
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -1,303,442,580,877,357,600 | 39.194444 | 95 | 0.661023 | false | 4.593651 | true | false | false |
espenak/enkel | enkel/wansgli/http.py | 1 | 5660 | # This file is part of the Enkel web programming library.
#
# Copyright (C) 2007 Espen Angell Kristiansen (espen@wsgi.net)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" Defines a basic standalone WSGI server/handler.
WSGI is specified in PEP 333 which can be found
U{here <http://www.python.org/dev/peps/pep-0333>}.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn, ForkingMixIn
from os import environ
from datetime import datetime
from sys import stderr
import logging
from server_base import WsgiServerMixIn, LoggerAsErrorFile
from apprunner import run_app, Response
from utils import rfc1123_date
from env import urlpath_to_environ
class HttpServerResponse(Response):
""" Adds automatic adding of required http headers
to the L{apprunner.Response} class. Headers are only added
when not supplied by the app. These headers are handled:
- server (defaults to L{__init__} parameter I{server_info})
- date (defaults to the UTC/GMT time when the response is sent)
"""
def __init__(self, server_info, *args, **kw):
super(HttpServerResponse, self).__init__(*args, **kw)
self.server_info = server_info
def validate_header(self, name, value):
if name in ("server", "date"):
try:
del self.extra_headers[name]
except KeyError:
pass
def generate_headers(self):
self.extra_headers["server"] = self.server_info
self.extra_headers["date"] = rfc1123_date(datetime.utcnow())
return super(HttpServerResponse, self).generate_headers()
class WsgiRequestHandler(BaseHTTPRequestHandler):
""" A WSGI request handler. You do not call this directly,
but send it as a parameter to L{Server.__init__}.
@cvar ENV: Default values for the WSGI environ dict. See
L{create_env} for more information.
"""
ENV = {}
def do_GET(self):
self.handle_wsgi_request("GET")
def do_POST(self):
self.handle_wsgi_request("POST")
def do_OPTIONS(self):
self.handle_wsgi_request("OPTIONS")
def do_HEAD(self):
self.handle_wsgi_request("HEAD")
def do_PUT(self):
self.handle_wsgi_request("PUT")
def do_DELETE(self):
self.handle_wsgi_request("DELETE")
def do_TRACE(self):
self.handle_wsgi_request("TRACE")
def do_CONNECT(self):
self.handle_wsgi_request("CONNECT")
def create_env(self, method):
""" Create the WSGI environ dict.
These variables are defined:
- byte strings:
- REQUEST_METHOD
- SERVER_PROTOCOL
- SERVER_NAME
- SERVER_PORT
- CONTENT_TYPE
- CONTENT_LENGTH
- REMOTE_ADDR
- wsgi.url_scheme
- wsgi.version
- wsgi.input (file-like object)
- wsgi.errors (file-like object)
- wsgi.multithread (bool)
- wsgi.run_once (bool)
And all HTTP-headers provided by the client prefixed with
'HTTP_'.
@note: This is the most minimal environment allowed by
PEP 333. You might wish to subclass this to provide
more environment variables.
@return: The WSGI environ dict to be sent to the application.
"""
env = self.ENV.copy()
if not (len(self.server.server_address) == 2 and \
isinstance(self.server.server_address[1], int)):
raise ValueError("can only listen to internet protocol "\
"server_address'es, like ('localhost', 8000).")
env.update({
"REQUEST_METHOD": method,
"SERVER_PROTOCOL": self.protocol_version,
"SERVER_NAME": self.server.server_address[0],
"SERVER_PORT": str(self.server.server_address[1]),
"CONTENT_TYPE": self.headers.get("content-type", ""),
"CONTENT_LENGTH": self.headers.get("content-length", ""),
"REMOTE_ADDR": self.client_address[0],
"wsgi.input": self.rfile
})
self.server.add_common_wsgienv(env)
# Add all http headers client provided
for name in self.headers:
value = self.headers.get(name)
env["HTTP_" + name.upper()] = value
return env
def handle_wsgi_request(self, method):
""" Create a WSGI environ dict (using L{create_env} and run
the app. """
# Create the WSGI environ dict
env = self.create_env(method)
self.server.log.info("connected by %s" % str(self.client_address))
# parse path
urlpath_to_environ(env, self.path)
req = HttpServerResponse(self.server.server_info, self.wfile, env,
self.server.debug)
run_app(self.server.app, req)
class Server(HTTPServer, WsgiServerMixIn):
""" A synchronous HTTP WSGI server.
Works more or less like L{scgi.Server} which is
much better documented.
"""
REQUEST_HANDLER = WsgiRequestHandler
url_scheme = "http"
log = logging.getLogger("enkel.wansgli.http.server")
applog = LoggerAsErrorFile(logging.getLogger(
"enkel.wansgli.http.app"))
def __init__(self, app, server_address=("",9000)):
"""
@param app: A WSGI app as defined in PEP 333.
"""
self.app = app
HTTPServer.__init__(self, server_address, self.REQUEST_HANDLER)
class ThreadingServer(ThreadingMixIn, Server):
""" A threading HTTP WSGI server. """
MULTITHREAD = True
class ForkingServer(ForkingMixIn, Server):
""" A forking HTTP WSGI server. """
MULTIPROCESS = True
| gpl-2.0 | -4,034,770,585,486,457,000 | 28.789474 | 81 | 0.712721 | false | 3.349112 | false | false | false |
danielru/pySDC | pySDC/implementations/problem_classes/AdvectionEquation_1D_FD.py | 1 | 6257 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import splu
from pySDC.core.Problem import ptype
from pySDC.core.Errors import ParameterError, ProblemError
# noinspection PyUnusedLocal
class advection1d(ptype):
"""
Example implementing the unforced 1D advection equation with periodic BC in [0,1],
discretized using upwinding finite differences
Attributes:
A: FD discretization of the gradient operator using upwinding
dx: distance between two spatial nodes
"""
def __init__(self, problem_params, dtype_u, dtype_f):
"""
Initialization routine
Args:
problem_params (dict): custom parameters for the example
dtype_u: mesh data type (will be passed parent class)
dtype_f: mesh data type (will be passed parent class)
"""
# these parameters will be used later, so assert their existence
essential_keys = ['nvars', 'c', 'freq']
for key in essential_keys:
if key not in problem_params:
msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))
raise ParameterError(msg)
# we assert that nvars looks very particular here.. this will be necessary for coarsening in space later on
if (problem_params['nvars']) % 2 != 0:
raise ProblemError('setup requires nvars = 2^p')
if problem_params['freq'] >= 0 and problem_params['freq'] % 2 != 0:
raise ProblemError('need even number of frequencies due to periodic BCs')
if 'order' not in problem_params:
problem_params['order'] = 1
if 'type' not in problem_params:
problem_params['type'] = 'upwind'
# invoke super init, passing number of dofs, dtype_u and dtype_f
super(advection1d, self).__init__(init=problem_params['nvars'], dtype_u=dtype_u, dtype_f=dtype_f,
params=problem_params)
# compute dx and get discretization matrix A
self.dx = 1.0 / self.params.nvars
self.A = self.__get_A(self.params.nvars, self.params.c, self.dx, self.params.order, self.params.type)
@staticmethod
def __get_A(N, c, dx, order, type):
"""
Helper function to assemble FD matrix A in sparse format
Args:
N (int): number of dofs
c (float): diffusion coefficient
dx (float): distance between two spatial nodes
order (int): specifies order of discretization
type (string): upwind or centered differences
Returns:
scipy.sparse.csc_matrix: matrix A in CSC format
"""
coeff = None
stencil = None
zero_pos = None
if type == 'center':
if order == 2:
stencil = [-1.0, 0.0, 1.0]
zero_pos = 2
coeff = 1.0 / 2.0
elif order == 4:
stencil = [1.0, -8.0, 0.0, 8.0, -1.0]
zero_pos = 3
coeff = 1.0 / 12.0
elif order == 6:
stencil = [-1.0, 9.0, -45.0, 0.0, 45.0, -9.0, 1.0]
zero_pos = 4
coeff = 1.0 / 60.0
else:
raise ProblemError("Order " + str(order) + " not implemented.")
else:
if order == 1:
stencil = [-1.0, 1.0]
coeff = 1.0
zero_pos = 2
elif order == 2:
stencil = [1.0, -4.0, 3.0]
coeff = 1.0 / 2.0
zero_pos = 3
elif order == 3:
stencil = [1.0, -6.0, 3.0, 2.0]
coeff = 1.0 / 6.0
zero_pos = 3
elif order == 4:
stencil = [-5.0, 30.0, -90.0, 50.0, 15.0]
coeff = 1.0 / 60.0
zero_pos = 4
elif order == 5:
stencil = [3.0, -20.0, 60.0, -120.0, 65.0, 12.0]
coeff = 1.0 / 60.0
zero_pos = 5
else:
raise ProblemError("Order " + str(order) + " not implemented.")
dstencil = np.concatenate((stencil, np.delete(stencil, zero_pos - 1)))
offsets = np.concatenate(([N - i - 1 for i in reversed(range(zero_pos - 1))],
[i - zero_pos + 1 for i in range(zero_pos - 1, len(stencil))]))
doffsets = np.concatenate((offsets, np.delete(offsets, zero_pos - 1) - N))
A = sp.diags(dstencil, doffsets, shape=(N, N), format='csc')
A *= c * coeff * (1.0 / dx)
return A
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
f = self.dtype_f(self.init)
f.values = -1.0 * self.A.dot(u.values)
return f
def solve_system(self, rhs, factor, u0, t):
"""
Simple linear solver for (I+factor*A)u = rhs
Args:
rhs (dtype_f): right-hand side for the linear system
factor (float) : abbrev. for the node-to-node stepsize (or any other factor required)
u0 (dtype_u): initial guess for the iterative solver (not used here so far)
t (float): current time (e.g. for time-dependent BCs)
Returns:
dtype_u: solution as mesh
"""
me = self.dtype_u(self.init)
L = splu(sp.eye(self.params.nvars, format='csc') + factor * self.A)
me.values = L.solve(rhs.values)
return me
def u_exact(self, t):
"""
Routine to compute the exact solution at time t
Args:
t (float): current time
Returns:
dtype_u: exact solution
"""
me = self.dtype_u(self.init)
if self.params.freq >= 0:
xvalues = np.array([i * self.dx for i in range(self.params.nvars)])
me.values = np.sin(np.pi * self.params.freq * (xvalues - self.params.c * t))
else:
np.random.seed(1)
me.values = np.random.rand(self.params.nvars)
return me
| bsd-2-clause | 5,972,382,351,035,305,000 | 32.639785 | 115 | 0.523733 | false | 3.731067 | false | false | false |
yonglehou/docker-fabric | dockerfabric/utils/files.py | 1 | 3529 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import shutil
import tarfile
import tempfile
from fabric.api import run, sudo
from fabric.context_managers import documented_contextmanager
from dockermap.shortcuts import rm, chmod, chown
from .output import single_line_stdout
_safe_name = lambda tarinfo: tarinfo.name[0] != '/' and not '..' in tarinfo.name
def get_remote_temp():
"""
Creates a temporary directory on the remote end. Uses the command ``mktemp`` to do so.
:return: Path to the temporary directory.
:rtype: unicode
"""
return single_line_stdout('mktemp -d')
def remove_ignore(path, use_sudo=False):
"""
Recursively removes a file or directory, ignoring any errors that may occur. Should only be used for temporary
files that can be assumed to be cleaned up at a later point.
:param path: Path to file or directory to remove.
:type path: unicode
:param use_sudo: Use the `sudo` command.
:type use_sudo: bool
"""
which = sudo if use_sudo else run
which(rm(path, recursive=True), warn_only=True)
def is_directory(path, use_sudo=False):
"""
Check if the remote path exists and is a directory.
:param path: Remote path to check.
:type path: unicode
:param use_sudo: Use the `sudo` command.
:type use_sudo: bool
:return: `True` if the path exists and is a directory; `False` if it exists, but is a file; `None` if it does not
exist.
:rtype: bool or ``None``
"""
result = single_line_stdout('if [[ -f {0} ]]; then echo 0; elif [[ -d {0} ]]; then echo 1; else echo -1; fi'.format(path), sudo=use_sudo, quiet=True)
if result == '0':
return False
elif result == '1':
return True
else:
return None
@documented_contextmanager
def temp_dir(apply_chown=None, apply_chmod=None):
"""
Creates a temporary directory on the remote machine. The directory is removed when no longer needed. Failure to do
so will be ignored.
:param apply_chown: Optional; change the owner of the directory.
:type apply_chown: bool
:param apply_chmod: Optional; change the permissions of the directory.
:type apply_chmod: bool
:return: Path to the temporary directory.
:rtype: unicode
"""
path = get_remote_temp()
if apply_chmod:
run(chmod(apply_chmod, path))
if apply_chown:
sudo(chown(apply_chown, path))
yield path
remove_ignore(path, True)
@documented_contextmanager
def local_temp_dir():
"""
Creates a local temporary directory. The directory is removed when no longer needed. Failure to do
so will be ignored.
:return: Path to the temporary directory.
:rtype: unicode
"""
path = tempfile.mkdtemp()
yield path
shutil.rmtree(path, ignore_errors=True)
def extract_tar(filename, dest_path, **kwargs):
"""
Extracts a TAR archive. All element names starting with ``/`` (indicating an absolute path) or that contain ``..``
as references to a parent directory are not extracted.
:param filename: Path to the tar file.
:type filename: unicode
:param dest_path: Destination path to extract the contents to.
:type dest_path: unicode
:param kwargs: Additional kwargs for opening the TAR file (:func:`tarfile.open`).
"""
with tarfile.open(filename, 'r', **kwargs) as tf:
safe_members = [name for name in tf.getmembers() if _safe_name(name)]
if safe_members:
tf.extractall(dest_path, safe_members)
| mit | -5,438,494,212,372,370,000 | 30.230088 | 153 | 0.667328 | false | 3.85262 | false | false | false |
llimllib/limbo | limbo/plugins/weather.py | 1 | 3803 | # -*- coding: utf-8 -*-
"""!weather <zip or place name> return the 5-day forecast
Three environment variables control the behavior of this plugin:
MAPBOX_API_TOKEN: must be set to a valid Mapbox API token
https://docs.mapbox.com/api/search/#geocoding
OPENWEATHER_API_KEY: must be set to a valid OpenWeather API key
https://openweathermap.org/current
https://openweathermap.org/forecast5
WEATHER_CELSIUS: if this environment variable is present with any value,
the plugin will report temperatures in celsius instead of
farenheit
"""
try:
from urllib import quote
except ImportError:
from urllib.request import quote
import json
import os
import re
from datetime import datetime
import requests
# https://openweathermap.org/weather-conditions
ICONMAP = {
"01d": ":sunny:",
"01n": ":moon:",
"02d": ":sun_behind_cloud:",
"02n": ":sun_behind_cloud:",
"03d": ":cloud:",
"03n": ":cloud:",
"04d": ":cloud:",
"04n": ":cloud:",
"09d": ":rain_cloud:",
"09n": ":rain_cloud:",
"10d": ":sun_behind_rain_cloud:",
"10n": ":sun_behind_rain_cloud:",
"11d": ":thunder_cloud_and_rain:",
"11n": ":thunder_cloud_and_rain:",
"13d": ":snowflake:",
"13n": ":snowflake:",
"50d": ":fog:",
"50n": ":fog:",
}
CELSIUS = "metric"
IMPERIAL = "imperial"
MAPBOX_API_TOKEN = os.environ.get("MAPBOX_API_TOKEN")
OPENWEATHER_API_KEY = os.environ.get("OPENWEATHER_API_KEY")
def weather(searchterm):
"""Get the weather for a place given by searchterm
Returns a title and a list of forecasts.
The title describes the location for the forecast (i.e. "Portland, ME USA")
The list of forecasts is a list of dictionaries in slack attachment fields
format (see https://api.slack.com/docs/message-attachments)
"""
unit = CELSIUS if os.environ.get("WEATHER_CELSIUS") else IMPERIAL
unit_abbrev = "f" if unit == IMPERIAL else "c"
geo = requests.get(
"https://api.mapbox.com/geocoding/v5/mapbox.places/{}.json?limit=1&access_token={}".format(
quote(searchterm.encode("utf8")), MAPBOX_API_TOKEN
)
).json()
citystate = geo["features"][0]["place_name"]
lon, lat = geo["features"][0]["center"]
title = "Weather for {}: ".format(citystate)
forecast = requests.get(
"https://api.openweathermap.org/data/2.5/forecast/daily?lat={:.2f}&lon={:.2f}&cnt=4&units={}&appid={}".format(
lat, lon, unit, OPENWEATHER_API_KEY
)
).json()
if forecast["cod"] != "200":
raise KeyError("Invalid OpenWeatherMap key")
messages = []
for cast in forecast["list"]:
# do I need to mess with tz at all, or is this accurate enough?
dt = datetime.fromtimestamp(cast["dt"]).strftime("%A")
high = int(round(cast["temp"]["max"]))
icon = ICONMAP.get(cast["weather"][0]["icon"], ":question:")
messages.append(
{
"title": dt,
"value": u"{} {}°{}".format(icon, high, unit_abbrev),
"short": True,
}
)
return title, messages
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"!weather (.*)", text)
if not match:
return
try:
title, forecasts = weather(match[0])
except KeyError as err:
return "KeyError: {}".format(err.args[0])
attachment = {"fallback": title, "pretext": title, "fields": forecasts[0:4]}
server.slack.post_message(
msg["channel"],
"",
as_user=server.slack.username,
attachments=json.dumps([attachment]),
thread_ts=msg.get("thread_ts", None),
)
on_bot_message = on_message
| mit | -6,648,842,372,692,528,000 | 29.910569 | 118 | 0.593372 | false | 3.400716 | false | false | false |
MySmile/mysmile | apps/pages/admin.py | 1 | 3328 | import os
from django.contrib import admin
from django.conf import settings
from apps.pages.models import Page, Page_translation
from apps.pages.forms import Page_translationInlineForm, PageForm
class Page_translationInline(admin.StackedInline):
model = Page_translation
form = Page_translationInlineForm
verbose_name = 'Lang'
extra = 0
fieldsets = [
('Content', {'fields': ['lang', 'menu', 'name', 'col_central',
'youtube', 'photo_description', 'col_right',
'col_bottom_1', 'col_bottom_2', 'col_bottom_3'],
'classes': ['collapse']}),
('SEO', {'fields': ['meta_title', 'meta_description', 'meta_keywords',
'photo_alt'], 'classes': ['collapse']}),
]
search_fields = ['col_central', 'col_right', 'col_bottom_1', 'col_bottom_2',
'col_bottom_3']
max_num = len(settings.LANGUAGES)
def get_queryset(self, request):
return Page_translation.objects.filter(lang__in=[x[0] for x in settings.LANGUAGES])
class PageAdmin(admin.ModelAdmin):
form = PageForm
inlines = [Page_translationInline]
save_on_top = True
readonly_fields = ('photo_thumb',)
view_on_site = True
def date_update(self, model):
return model.updated_at.strftime('%d %B %Y, %H:%M')
def waiting_for_translation(self, model):
""" Flag doesn't display if translation prepared
"""
flags = ''
for item in settings.LANGUAGES:
if not Page_translation.objects.filter(page_id=model.id, lang=item[0]):
flags += """<img src="/static/themes/""" + settings.MYSMILE_THEME + \
"""/images/flags/""" + item[0] + """.png" alt= " """ + \
item[1] + """ "/>"""
return flags
waiting_for_translation.short_description = 'waiting for translation'
waiting_for_translation.allow_tags = True
def get_list_display(self, request):
"""
Hide empty colums "photo_thumb" and "waiting_for_translation"
"""
pages = Page.objects.all().count()
pages_translation = Page_translation.objects.all().count()
pages_blankphoto = Page.objects.filter(photo='').count()
self.list_display = ('slug', 'status', 'ptype', 'sortorder',)
if pages_blankphoto < pages: # at least one photo exist
self.list_display += ('photo_thumb', )
if pages*len(settings.LANGUAGES) != pages_translation:
self.list_display += ('waiting_for_translation',)
return self.list_display + ('date_update',)
def get_fieldsets(self, request, obj=None):
fieldsets = super(PageAdmin, self).get_fieldsets(request, obj)
if obj:
photo = Page.objects.filter(id=obj.id).values_list('photo', flat=True)[0]
if photo:
fieldsets = [('Settings', {'fields': ['slug', 'status', 'ptype', 'sortorder',
'color', ('photo', 'photo_thumb')]}), ]
else:
fieldsets = [('Settings', {'fields': ['slug', 'status', 'ptype', 'sortorder',
'color', ('photo',)]}), ]
return fieldsets
admin.site.register(Page, PageAdmin)
| bsd-3-clause | -3,575,291,305,965,562,000 | 38.152941 | 93 | 0.5628 | false | 4 | false | false | false |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert/sched/sched_file.py | 1 | 1911 | # Copyright (C) 2012 Statoil ASA, Norway.
#
# The file 'sched_file.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from ert.cwrap import BaseCClass, CWrapper
from ert.sched import SCHED_LIB
from ert.util import CTime
class SchedFile(BaseCClass):
def __init__(self, filename, start_time):
c_ptr = SchedFile.cNamespace().parse(filename, CTime(start_time))
super(SchedFile, self).__init__(c_ptr)
@property
def length(self):
""" @rtype: int """
return SchedFile.cNamespace().length(self)
def write(self, filename, num_dates, add_end=True):
SchedFile.cNamespace().write(self, num_dates, filename, add_end)
def free(self):
SchedFile.cNamespace().free(self)
cwrapper = CWrapper(SCHED_LIB)
cwrapper.registerType("sched_file", SchedFile)
cwrapper.registerType("sched_file_obj", SchedFile.createPythonObject)
cwrapper.registerType("sched_file_ref", SchedFile.createCReference)
SchedFile.cNamespace().parse = cwrapper.prototype("c_void_p sched_file_parse_alloc( char*, time_t )")
SchedFile.cNamespace().write = cwrapper.prototype("void sched_file_fprintf_i( sched_file , int , char* , bool)")
SchedFile.cNamespace().length = cwrapper.prototype("int sched_file_get_num_restart_files( sched_file )")
SchedFile.cNamespace().free = cwrapper.prototype("void sched_file_free( sched_file )")
| gpl-3.0 | -5,625,121,413,102,356,000 | 40.543478 | 112 | 0.714809 | false | 3.294828 | false | false | false |
zerosum0x0/koadic | modules/implant/gather/user_hunter.py | 1 | 2845 | import core.implant
import core.job
import string
import uuid
class UserHunterJob(core.job.Job):
def create(self):
self.fork32Bit = True
self.options.set("DLLUUID", uuid.uuid4().hex)
self.options.set("MANIFESTUUID", uuid.uuid4().hex)
self.options.set("DIRECTORY", self.options.get('DIRECTORY').replace("\\", "\\\\").replace('"', '\\"'))
def report(self, handler, data, sanitize = False):
data = data.decode('latin-1')
task = handler.get_header(self.options.get("UUIDHEADER"), False)
if task == self.options.get("DLLUUID"):
handler.send_file(self.options.get("DYNWRAPXDLL"))
return
if task == self.options.get("MANIFESTUUID"):
handler.send_file(self.options.get("DYNWRAPXMANIFEST"))
return
if len(data) == 0:
handler.reply(200)
return
if data == "Complete":
super(UserHunterJob, self).report(handler, data)
elif "***" in data:
self.parse_sessions_data(data)
handler.reply(200)
def parse_sessions_data(self, data):
self.print_good("Session data retrieved")
sessions = data.split("***")
for session in sessions:
if session:
user = session.split(":")[0]
if "$" in user:
continue # not concerned with machine accounts
comps = ", ".join(list(set(session.split(":")[1].split(","))))
self.shell.print_plain(user + " => " + comps)
self.results += user + " => " + comps + "\n"
def done(self):
self.display()
def display(self):
pass
# try:
# self.print_good(self.data)
# except:
# pass
class UserHunterImplant(core.implant.Implant):
NAME = "User Hunter"
DESCRIPTION = "Identifies and locates all logged in users"
AUTHORS = ["TheNaterz"]
STATE = "implant/gather/user_hunter"
def load(self):
self.options.register("DIRECTORY", "%TEMP%", "writeable directory on zombie", required=False)
self.options.register("DYNWRAPXDLL", "data/bin/dynwrapx.dll", "relative path to dynwrapx.dll", required=True, advanced=True)
self.options.register("DYNWRAPXMANIFEST", "data/bin/dynwrapx.manifest", "relative path to dynwrapx.manifest", required=True, advanced=True)
self.options.register("UUIDHEADER", "ETag", "HTTP header for UUID", advanced=True)
self.options.register("DLLUUID", "", "HTTP header for UUID", hidden=True)
self.options.register("MANIFESTUUID", "", "UUID", hidden=True)
def job(self):
return UserHunterJob
def run(self):
workloads = {}
workloads["js"] = "data/implant/gather/user_hunter.js"
self.dispatch(workloads, self.job)
| apache-2.0 | 80,671,390,199,258,530 | 33.277108 | 147 | 0.589455 | false | 3.77822 | false | false | false |
jinankjain/zamboni | lib/urls_base.py | 1 | 2123 | from django.conf import settings
from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.shortcuts import redirect
from django.views.decorators.cache import cache_page
from django.views.i18n import javascript_catalog
admin.autodiscover()
handler403 = 'amo.views.handler403'
handler404 = 'amo.views.handler404'
handler500 = 'amo.views.handler500'
urlpatterns = patterns('',
# AMO homepage or Marketplace Developer Hub? Choose your destiny.
url('^$', settings.HOME, name='home'),
# Add-ons.
('', include('addons.urls')),
# Tags.
('', include('tags.urls')),
# Files
('^files/', include('files.urls')),
# AMO admin (not django admin).
('^admin/', include('zadmin.urls')),
# App versions.
('pages/appversions/', include('applications.urls')),
# Services
('', include('amo.urls')),
# Paypal
('^services/', include('paypal.urls')),
# Javascript translations.
url('^jsi18n.js$', cache_page(60 * 60 * 24 * 365)(javascript_catalog),
{'domain': 'javascript', 'packages': ['zamboni']}, name='jsi18n'),
# Redirect persona/xxx
('^getpersonas$',
lambda r: redirect('http://www.getpersonas.com/gallery/All/Popular',
permanent=True)),
url('^persona/(?P<persona_id>\d+)', 'addons.views.persona_redirect',
name='persona'),
# Redirect top-tags to tags/top
('^top-tags/?',
lambda r: redirect('tags.top_cloud', permanent=True)),
('^addons/contribute/(\d+)/?$',
lambda r, id: redirect('addons.contribute', id, permanent=True)),
)
if settings.TEMPLATE_DEBUG:
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns('',
(r'^%s/(?P<path>.*)$' % media_url, 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
if settings.SERVE_TMP_PATH and settings.DEBUG:
urlpatterns += patterns('',
(r'^tmp/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.TMP_PATH}),
)
| bsd-3-clause | 5,694,466,639,472,303,000 | 28.082192 | 74 | 0.628356 | false | 3.666667 | false | true | false |
thisismyrobot/motorsport-dev | py-gps/src/output/visual/opengl.py | 1 | 2609 | import pyglet.window
import pyglet.image
class IsoRenderer(object):
""" Renders a 3D view of a map
"""
cam_rx, cam_ry, cam_rz = 45, 0, 0
cam_x, cam_y, cam_z = 0, 0, -1000
w, h = 640, 480
far = 10000
fov = 60
def __init__(self):
self.load_map()
self.create_window()
self.setup_gl_params()
def create_window(self):
self.window = pyglet.window.Window(fullscreen=False, resizable=True)
self.window.width=1280
self.window.height=800
self.window.on_resize=self.resize_view
def setup_gl_params(self):
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
pyglet.gl.glDepthFunc(pyglet.gl.GL_LEQUAL)
pyglet.gl.glEnable(pyglet.gl.GL_LINE_SMOOTH)
pyglet.gl.glHint(pyglet.gl.GL_LINE_SMOOTH_HINT, pyglet.gl.GL_DONT_CARE)
def load_map(self):
""" takes a PIL image.
"""
map_img = pyglet.image.load('img.gif')
self.map_tex = map_img.get_texture()
def draw_map(self):
pyglet.gl.glPushMatrix()
pyglet.gl.glTranslatef(-512, 0, 512)
pyglet.gl.glRotatef(-90, 1.0, 0.0, 0.0)
self.map_tex.blit(0, 0, 0, 1024, 1024)
pyglet.gl.glPopMatrix()
pyglet.gl.glColor4f(1.0, 0.0, 0.0, 1)
pyglet.gl.glLineWidth(1.5);
pyglet.gl.glBegin(pyglet.gl.GL_LINE_LOOP)
pyglet.gl.glVertex3f(512, 0, 512)
pyglet.gl.glVertex3f(-512, 0, 512)
pyglet.gl.glVertex3f(-512, 0, -512)
pyglet.gl.glVertex3f(512, 0, -512)
pyglet.gl.glEnd()
pyglet.gl.glColor4f(1.0, 1.0, 1.0, 1)
def render(self):
self.window.dispatch_events()
pyglet.gl.glClear(pyglet.gl.GL_COLOR_BUFFER_BIT | pyglet.gl.GL_DEPTH_BUFFER_BIT)
self.apply_camera()
self.draw_map()
self.window.flip()
def apply_camera(self):
pyglet.gl.glLoadIdentity()
pyglet.gl.glTranslatef(self.cam_x, self.cam_y, self.cam_z)
pyglet.gl.glRotatef(self.cam_rx,1,0,0)
pyglet.gl.glRotatef(self.cam_ry,0,1,0)
pyglet.gl.glRotatef(self.cam_rz,0,0,1)
def resize_view(self, width, height):
self.w,self.h=width,height
pyglet.gl.glViewport(0, 0, width, height)
pyglet.gl.glMatrixMode(pyglet.gl.GL_PROJECTION)
pyglet.gl.glLoadIdentity()
pyglet.gl.gluPerspective(self.fov, float(self.w)/self.h, 0.1, self.far)
pyglet.gl.glMatrixMode(pyglet.gl.GL_MODELVIEW) | unlicense | 8,750,948,577,670,690,000 | 32.813333 | 88 | 0.592947 | false | 2.928171 | false | false | false |
andrewsy97/Treehacks | groupy_files/groupy/object/attachments.py | 1 | 6253 | """
.. module:: attachments
:platform: Unix, Windows
:synopsis: A module containing all attachment classes
.. moduleauthor:: Robert Grant <rhgrant10@gmail.com>
This module contains classes for the different types of attachments.
"""
from ..api import endpoint
class Attachment:
"""Base class for attachments.
:param str type_: the type of the attachment
"""
def __init__(self, type_):
self.type = type_
def as_dict(self):
"""Return the attachment as a dictionary.
:returns: the attachment as a dictionary
:rtype: :class:`dict`
"""
return self.__dict__
class GenericAttachment(Attachment):
"""A generic attachment.
This attachment accepts any keyword arguments, but must be given a
particular type.
:param str type: the type of attachment
"""
def __init__(self, type, **kwargs):
super().__init__(type)
for k, v in kwargs.items():
setattr(self, k, v)
class Image(Attachment):
"""An image attachemnt.
Image attachments do not contain an image. Instead, they specify a URL from
which the image can be downloaded and must have a domain of
"i.groupme.com". Such URLs are known as "i" URLs, and are from the GroupMe
image service.
.. note::
Use the direct initializer *if and only if* the image already has a
known GroupMe image service URL. Otherwise, use the
:func:`~groupy.object.attachments.Image.file` method.
:param str url: the URL at which the image can be fetched from the GroupMe
image service
:param str source_url: the original URL of the image (optional)
"""
def __init__(self, url, source_url=None):
super().__init__('image')
self.url = url
self.source_url = source_url
def __repr__(self):
return "Image(url={!r})".format(self.url)
@classmethod
def file(cls, image):
"""Upload an image file and return it as an attachment.
:param image: the file containing the image data
:type image: :class:`file`
:returns: an image attachment
:rtype: :class:`~groupy.object.attachments.Image`
"""
return cls(endpoint.Images.create(image)['url'])
def download(self):
"""Download the image data of the image attachment.
:returns: the actual image the image attachment references
:rtype: :class:`PIL.Image.Image`
"""
return endpoint.Images.download(self.url)
class Location(Attachment):
"""An attachment that specifies a geo-location.
In addition to latitude and longitude, every location attachment also
specifies a name. Some (especially older) location attachments also contain
a ``foursquare_venue_id`` attribute.
:param str name: the location name
:param float lat: the latitude
:param float lng: the longitude
:param str foursquare_venue_id: the FourSquare venue ID (optional)
"""
def __init__(self, name, lat, lng, foursquare_venue_id=None):
super().__init__('location')
self.name = name
self.lat = lat
self.lng = lng
self.foursquare_venue_id = foursquare_venue_id
def __repr__(self):
return "Location(name={!r}, lat={!r}, lng={!r})".format(
self.name, self.lat, self.lng)
class Emoji(Attachment):
"""An attachment containing emoticons.
Emoji attachments do not contain any emoticon images. Instead, a
placeholder specifies the location of the emoticon in the text, and a
``charmap`` facilitates translation into the emoticons.
:param str placeholder: a high-point/invisible character indicating the
position of the emoticon
:param list charmap: a list of lists containing pack IDs and offsets
"""
def __init__(self, placeholder, charmap):
super().__init__('emoji')
self.placeholder = placeholder
self.charmap = charmap
def __repr__(self):
return "Emoji(placeholder={!r}, charmap={!r})".format(
self.placeholder, self.charmap)
class Split(Attachment):
"""An attachment containing information for splitting a bill.
This type of attachment is depreciated. However, such attachments are still
present in older messages.
:param str token: the token that splits the bill
"""
def __init__(self, token):
super().__init__('split')
self.token = token
def __repr__(self):
return "Split(token={!r})".format(self.token)
class Mentions(Attachment):
"""An attachment that specifies "@" mentions.
Mentions are a new addition to the types of attachments. Each contains two
parallel lists: ``user_ids`` and ``loci``. The elements in ``loci`` specify
the start index and length of the mention, while the elements in
``user_ids`` specify by user_id which user was mentioned in the
corresponding element of ``loci``.
.. note::
The length of ``user_ids`` must be equal to the length of ``loci``!
:param list user_ids: a list of user IDs
:param list loci: a list of ``(start, length)`` elements
"""
def __init__(self, user_ids, loci=None):
super().__init__('mentions')
self.user_ids = user_ids
self.loci = loci
def __repr__(self):
return "Mentions({!r})".format(self.user_ids)
class AttachmentFactory:
"""A factory for creating attachments from dictionaries.
"""
_factories = {
'image': Image,
'location': Location,
'emoji': Emoji,
'mentions': Mentions,
'split': Split
}
@classmethod
def create(cls, **kwargs):
"""Create and return an attachment.
:param str type: the type of attachment to create; if unrecognized, a
generic attachment is returned
:returns: a subclass of :class:`~groupy.object.attachments.Attachment`
"""
t = kwargs.pop('type', None)
try:
return cls._factories[t](**kwargs)
except (TypeError, KeyError):
# Either kwargs contianed an unexpected keyword for attachment type
# t, or t is not a known attachment type
return GenericAttachment(t, **kwargs)
| mit | 1,290,227,850,733,663,000 | 29.955446 | 79 | 0.632017 | false | 4.177021 | false | false | false |
JulienPobot/LeCubeMedia | src/lecube/mod/cmnfc.py | 1 | 1054 | # -*- coding: utf-8 -*-
import Tkinter as tk
import thread
import logging
class Application(tk.Frame):
def __init__(self, cube, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.createWidgets()
self.cube = cube
def createWidgets(self):
self.quitButton = tk.Button(self, text='Tag Event',
command=self.onTag)
self.quitButton.grid()
self.tagStr = tk.StringVar()
self.userEntry = tk.Entry(self,textvariable=self.tagStr)
self.tagStr.set('USER:giloux@localhost:giloux')
self.userEntry.grid()
def onTag(self):
self.cube.tag_detection('NFC',self.userEntry.get())
def start_simulator(title,cube) :
app = Application(cube)
app.master.title(title)
app.mainloop()
def init(cube, params):
logging.info("Launching NFC simulator thread")
thread.start_new_thread(start_simulator,('NFC Simulator', cube))
#start_simulator() | apache-2.0 | 8,025,032,704,159,344,000 | 30.029412 | 68 | 0.58444 | false | 3.75089 | false | false | false |
droundy/deft | papers/thesis-scheirer/RG_generateID3.py | 2 | 1573 | import sys
import os
import pylab as plt
import time
temp=plt.linspace(.6,1.28,20)
temp=[temp[20]]
ID = int(sys.argv[0].split('ID')[1].split('.py')[0])
print ID
print temp
for i in range(0,len(temp)):
#if i %4 != ID:
# continue
t=time.time()
print '%d of %d'%(i,len(temp))
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
os.system('python RG_fn.py %f'%temp[i])
elapsed = time.time()-t
print(elapsed)
| gpl-2.0 | 4,527,302,552,630,158,000 | 32.468085 | 52 | 0.544183 | false | 2.608624 | false | false | false |
defionscode/ansible | lib/ansible/modules/net_tools/nios/nios_host_record.py | 17 | 8805 | #!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_host_record
version_added: "2.5"
author: "Peter Sprygada (@privateip)"
short_description: Configure Infoblox NIOS host records
description:
- Adds and/or removes instances of host record objects from
Infoblox NIOS servers. This module manages NIOS C(record:host) objects
using the Infoblox WAPI interface over REST.
- Updates instances of host record object from Infoblox NIOS servers.
requirements:
- infoblox-client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the fully qualified hostname to add or remove from
the system. User can also update the hostname as it is possible
to pass a dict containing I(new_name), I(old_name). See examples.
required: true
view:
description:
- Sets the DNS view to associate this host record with. The DNS
view must already be configured on the system
required: true
default: default
aliases:
- dns_view
configure_for_dns:
version_added: "2.7"
description:
- Sets the DNS to particular parent. If user needs to bypass DNS
user can make the value to false.
type: bool
required: false
default: true
aliases:
- dns
ipv4addrs:
description:
- Configures the IPv4 addresses for this host record. This argument
accepts a list of values (see suboptions)
aliases:
- ipv4
suboptions:
ipv4addr:
description:
- Configures the IPv4 address for the host record
required: true
aliases:
- address
configure_for_dhcp:
description:
- Configure the host_record over DHCP instead of DNS, if user
changes it to true, user need to mention MAC address to configure
required: false
aliases:
- dhcp
mac:
description:
- Configures the hardware MAC address for the host record. If user makes
DHCP to true, user need to mention MAC address.
required: false
aliases:
- mac
ipv6addrs:
description:
- Configures the IPv6 addresses for the host record. This argument
accepts a list of values (see options)
aliases:
- ipv6
suboptions:
ipv6addr:
description:
- Configures the IPv6 address for the host record
required: true
aliases:
- address
configure_for_dhcp:
description:
- Configure the host_record over DHCP instead of DNS, if user
changes it to true, user need to mention MAC address to configure
required: false
aliases:
- dhcp
aliases:
version_added: "2.6"
description:
- Configures an optional list of additional aliases to add to the host
record. These are equivalent to CNAMEs but held within a host
record. Must be in list format.
ttl:
description:
- Configures the TTL to be associated with this host record
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure an ipv4 host record
nios_host_record:
name: host.ansible.com
ipv4:
- address: 192.168.10.1
aliases:
- cname.ansible.com
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: add a comment to an existing host record
nios_host_record:
name: host.ansible.com
ipv4:
- address: 192.168.10.1
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove a host record from the system
nios_host_record:
name: host.ansible.com
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: update an ipv4 host record
nios_host_record:
name: {new_name: host-new.ansible.com, old_name: host.ansible.com}
ipv4:
- address: 192.168.10.1
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: create an ipv4 host record bypassing DNS
nios_host_record:
name: new_host
ipv4:
- address: 192.168.10.1
dns: false
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: create an ipv4 host record over DHCP
nios_host_record:
name: host.ansible.com
ipv4:
- address: 192.168.10.1
dhcp: true
mac: 00-80-C8-E3-4C-BD
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.net_tools.nios.api import WapiModule
from ansible.module_utils.net_tools.nios.api import NIOS_HOST_RECORD
def ipaddr(module, key, filtered_keys=None):
''' Transforms the input value into a struct supported by WAPI
This function will transform the input from the playbook into a struct
that is valid for WAPI in the form of:
{
ipv4addr: <value>,
mac: <value>
}
This function does not validate the values are properly formatted or in
the acceptable range, that is left to WAPI.
'''
filtered_keys = filtered_keys or list()
objects = list()
for item in module.params[key]:
objects.append(dict([(k, v) for k, v in iteritems(item) if v is not None and k not in filtered_keys]))
return objects
def ipv4addrs(module):
return ipaddr(module, 'ipv4addrs', filtered_keys=['address', 'dhcp'])
def ipv6addrs(module):
return ipaddr(module, 'ipv6addrs', filtered_keys=['address', 'dhcp'])
def main():
''' Main entry point for module execution
'''
ipv4addr_spec = dict(
ipv4addr=dict(required=True, aliases=['address'], ib_req=True),
configure_for_dhcp=dict(type='bool', required=False, aliases=['dhcp'], ib_req=True),
mac=dict(required=False, aliases=['mac'], ib_req=True)
)
ipv6addr_spec = dict(
ipv6addr=dict(required=True, aliases=['address'], ib_req=True),
configure_for_dhcp=dict(type='bool', required=False, aliases=['configure_for_dhcp'], ib_req=True),
mac=dict(required=False, aliases=['mac'], ib_req=True)
)
ib_spec = dict(
name=dict(required=True, ib_req=True),
view=dict(default='default', aliases=['dns_view'], ib_req=True),
ipv4addrs=dict(type='list', aliases=['ipv4'], elements='dict', options=ipv4addr_spec, transform=ipv4addrs),
ipv6addrs=dict(type='list', aliases=['ipv6'], elements='dict', options=ipv6addr_spec, transform=ipv6addrs),
configure_for_dns=dict(type='bool', default=True, required=False, aliases=['dns'], ib_req=True),
aliases=dict(type='list'),
ttl=dict(type='int'),
extattrs=dict(type='dict'),
comment=dict(),
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run(NIOS_HOST_RECORD, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,154,821,391,732,588,000 | 29.894737 | 115 | 0.644634 | false | 4.009563 | true | false | false |
dr-guangtou/KungPao | kungpao/io.py | 1 | 4252 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""File Input/Output."""
import os
import pickle
import warnings
import numpy as np
from astropy.io import fits
__all__ = ['save_to_pickle', 'save_to_hickle', 'save_to_csv',
'save_to_fits', 'parse_reg_ellipse', 'psfex_extract',
'read_from_pickle', 'save_to_dill', 'read_from_dill']
def read_from_pickle(name, py2=False):
"""Read the data from Pickle file."""
if py2:
return pickle.load(open(name, "rb"), encoding='latin1')
return pickle.load(open(name, "rb"))
def save_to_pickle(obj, name):
"""Save an object to a cPickle/Pickle format binary file."""
output = open(name, 'wb')
pickle.dump(obj, output, protocol=2)
output.close()
return
def save_to_hickle(obj, name):
"""Save an object to a hickle/HDF5 format binary file."""
try:
import hickle
except ImportError:
raise Exception("### The Hickle package is required!")
output = open(name, 'wb')
hickle.dump(obj, output, protocol=2)
output.close()
return
def save_to_csv(array, name):
"""Save a numpy array to a CSV file.
Use the dtype.name as column name if possible
"""
output = open(name, 'w')
colNames = array.dtype.names
output.write("#" + ', '.join(colNames) + '\n')
for item in array:
line = ''
for i in range(0, len(colNames)-1):
col = colNames[i]
line += str(item[col]) + ' , '
line += str(item[colNames[-1]]) + '\n'
output.write(line)
output.close()
return
def save_to_fits(img, fits_file, wcs=None, header=None, overwrite=True):
"""Save an image to FITS file."""
if wcs is not None:
wcs_header = wcs.to_header()
img_hdu = fits.PrimaryHDU(img, header=wcs_header)
else:
img_hdu = fits.PrimaryHDU(img)
if header is not None:
if 'SIMPLE' in header and 'BITPIX' in header:
img_hdu.header = header
else:
img_hdu.header.extend(header)
if os.path.islink(fits_file):
os.unlink(fits_file)
img_hdu.writeto(fits_file, overwrite=overwrite)
return
def parse_reg_ellipse(reg_file):
"""Parse a DS9 .reg files.
convert the Ellipse or Circle regions
into arrays of parameters for ellipse:
x, y, a, b, theta
"""
if os.path.isfile(reg_file):
raise Exception("### Can not find the .reg file!")
# Parse the .reg file into lines
lines = [line.strip() for line in open(reg_file, 'r')]
# Coordinate type of this .reg file: e.g. 'image'
coord_type = lines[2].strip()
# Parse each region
regs = [reg.split(" ") for reg in lines[3:]]
xc = []
yc = []
ra = []
rb = []
theta = []
for reg in regs:
if reg[0].strip() == 'ellipse' and len(reg) == 6:
xc.append(float(reg[1]))
yc.append(float(reg[2]))
ra.append(float(reg[3]))
rb.append(float(reg[4]))
theta.append(float(reg[5]) * np.pi / 180.0)
elif reg[0].strip() == 'circle' and len(reg) == 4:
xc.append(float(reg[1]))
yc.append(float(reg[2]))
ra.append(float(reg[3]))
rb.append(float(reg[3]))
theta.append(0.0)
else:
warnings.warn("Wrong shape, only Ellipse or Circle are availabe")
xc = np.array(xc, dtype=np.float32)
yc = np.array(yc, dtype=np.float32)
ra = np.array(ra, dtype=np.float32)
rb = np.array(rb, dtype=np.float32)
theta = np.array(theta, dtype=np.float32)
return xc, yc, ra, rb, theta, coord_type
def psfex_extract(psfex_file, row, col):
"""Extract PSF image from PSFex result."""
try:
import psfex
except ImportError:
raise Exception("Need to install PSFex library first!")
return psfex.PSFEx(psfex_file).get_rec(row, col)
def save_to_dill(obj, name):
"""Save the Python object in a dill file."""
import dill
with open(name, "wb") as dill_file:
dill.dump(obj, dill_file)
def read_from_dill(name):
"""Read saved Python object from a dill file."""
import dill
with open(name, "rb") as dill_file:
content = dill.load(dill_file)
return content
| gpl-3.0 | 82,197,762,229,528,700 | 25.409938 | 77 | 0.584196 | false | 3.213908 | false | false | false |
liulion/mayavi | mayavi/action/modules.py | 2 | 2530 | """Actions to start various modules.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2008, Enthought, Inc.
# License: BSD Style.
import new
# Local imports.
from mayavi.core.registry import registry
from mayavi.core.metadata import ModuleMetadata
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.action.filters import FilterAction
######################################################################
# `ModuleAction` class.
######################################################################
class ModuleAction(FilterAction):
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self, event):
""" Performs the action. """
callable = self.metadata.get_callable()
obj = callable()
mv = self.mayavi
mv.add_module(obj)
mv.engine.current_selection = obj
######################################################################
# `AddModuleManager` class.
######################################################################
class AddModuleManager(ModuleAction):
""" An action that adds a ModuleManager to the tree. """
tooltip = "Add a ModuleManager to the current source/filter"
description = "Add a ModuleManager to the current source/filter"
metadata = ModuleMetadata(id="AddModuleManager",
class_name="mayavi.core.module_manager.ModuleManager",
menu_name="&Add ModuleManager",
tooltip="Add a ModuleManager to the current source/filter",
description="Add a ModuleManager to the current source/filter",
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
)
def perform(self, event):
""" Performs the action. """
from mayavi.core.module_manager import ModuleManager
mm = ModuleManager()
mv = self.mayavi
mv.add_module(mm)
mv.engine.current_selection = mm
######################################################################
# Creating the module actions automatically.
for module in registry.modules:
d = {'tooltip': module.tooltip,
'description': module.desc,
'metadata': module}
action = new.classobj(module.id, (ModuleAction,), d)
globals()[module.id] = action
| bsd-3-clause | 353,088,184,930,345,200 | 35.142857 | 79 | 0.505929 | false | 5.184426 | false | false | false |
nervenXC/topical_word_embeddings | TWE-2/last_last_step.py | 3 | 1257 | #!/usr/bin/env python2
#-*- coding: UTF-8 -*-
#File:
#Date:
#Author: Yang Liu <largelymfs@gmail.com>
#Description:
if __name__=="__main__":
with open("test.log","w") as logout, open("result.out") as f, open("result.out.out","w") as fout, open("log.txt") as log:
#log loading
content2id = {}
id2word = {}
for l in log:
word, word_number, topic_number, content_id, _ = l.strip().split()
word_number = int(word_number)
topic_number = int(topic_number)
content_id = int(content_id)
content2id[(word_number, topic_number)] = content_id
id2word[word_number] = word
print "LOADING COMPLETED"
for (line_num, l) in enumerate(f):
word1, topic1, word2, topic2, score = l.strip().split()
word1 = int(word1)
topic1 = int(topic1)
word2 = int(word2)
topic2 = int(topic2)
try:
content1 = content2id[(word1, topic1)]
content2 = content2id[(word2, topic2)]
except:
print line_num
continue
print >> fout, content1, content2, score
print >>logout, id2word[word1], id2word[word2]
| mit | 998,559,788,393,184,800 | 34.914286 | 125 | 0.529037 | false | 3.481994 | false | false | false |
philipkershaw/crypto-cookie | setup.py | 1 | 1426 | #!/usr/bin/env python
"""Distribution Utilities for crypto-cookie package
"""
__author__ = "@philipkershaw"
__date__ = "09/07/15"
__copyright__ = "(C) 2015 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
from setuptools import setup, find_packages
import os
THIS_DIR = os.path.dirname(__file__)
DESCRIPTION = 'Package to encrypt and sign cookies'
try:
LONG_DESCR = open(os.path.join(THIS_DIR, 'README.md')).read()
except IOError:
LONG_DESCR = ""
setup(
name = 'crypto-cookie',
version = '0.2.0',
description = DESCRIPTION,
long_description = LONG_DESCR,
author = 'Philip Kershaw',
author_email = 'Philip.Kershaw@stfc.ac.uk',
maintainer = 'Philip Kershaw',
maintainer_email = 'Philip.Kershaw@stfc.ac.uk',
url = 'https://github.com/cedadev/crypto-cookie',
license = 'BSD - See LICENCE file for details',
install_requires = ["cryptography"],
extras_require = {'SecureCookie': ['Paste']},
# dependency_links = ["http://dist.ceda.ac.uk/pip/"],
packages = find_packages(),
entry_points = None,
test_suite = 'crypto_cookie.test',
zip_safe = False
)
| bsd-3-clause | -8,888,149,941,170,016,000 | 34.65 | 71 | 0.57223 | false | 3.371158 | false | false | false |
ilikesounds/jt_portfolio | jt_portfolio/settings/prod.py | 1 | 1766 | import os
import dj_database_url
from jt_portfolio.settings.base import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': dj_database_url.config(
default=os.environ.get('DATABASE_URL')
)
}
}
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ['EMAIL_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_PW']
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = os.environ['EMAIL_DEFAULT']
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
ACCOUNT_ACTIVATION_DAYS = 3
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s [%(asctime)s] %(module)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': './logs/jtp.log',
'maxBytes': 1024000,
'backupCount': 3,
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['file', 'console', 'mail_admins'],
'propagate': True,
'level': 'DEBUG',
},
}
}
| mit | -6,825,678,805,469,355,000 | 25.358209 | 74 | 0.548698 | false | 3.55332 | false | false | false |
viswimmer1/PythonGenerator | data/python_files/32677266/github.py | 1 | 13970 | import httplib
import logging
import urllib2
from django import forms
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.hostingsvcs.errors import AuthorizationError, \
SSHKeyAssociationError
from reviewboard.hostingsvcs.forms import HostingServiceForm
from reviewboard.hostingsvcs.service import HostingService
from reviewboard.scmtools.errors import FileNotFoundError
from reviewboard.site.urlresolvers import local_site_reverse
class GitHubPublicForm(HostingServiceForm):
github_public_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the repository. This is the '
'<repo_name> in '
'http://github.com/<username>/<repo_name>/'))
class GitHubPrivateForm(HostingServiceForm):
github_private_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the repository. This is the '
'<repo_name> in '
'http://github.com/<username>/<repo_name>/'))
class GitHubPublicOrgForm(HostingServiceForm):
github_public_org_name = forms.CharField(
label=_('Organization name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the organization. This is the '
'<org_name> in '
'http://github.com/<org_name>/<repo_name>/'))
github_public_org_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the repository. This is the '
'<repo_name> in '
'http://github.com/<org_name>/<repo_name>/'))
class GitHubPrivateOrgForm(HostingServiceForm):
github_private_org_name = forms.CharField(
label=_('Organization name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the organization. This is the '
'<org_name> in '
'http://github.com/<org_name>/<repo_name>/'))
github_private_org_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the repository. This is the '
'<repo_name> in '
'http://github.com/<org_name>/<repo_name>/'))
class GitHub(HostingService):
name = _('GitHub')
plans = [
('public', {
'name': _('Public'),
'form': GitHubPublicForm,
'repository_fields': {
'Git': {
'path': 'git://github.com/%(hosting_account_username)s/'
'%(github_public_repo_name)s.git',
'mirror_path': 'git@github.com:'
'%(hosting_account_username)s/'
'%(github_public_repo_name)s.git',
}
},
'bug_tracker_field': 'http://github.com/'
'%(hosting_account_username)s/'
'%(github_public_repo_name)s/issues#issue/%%s',
}),
('public-org', {
'name': _('Public Organization'),
'form': GitHubPublicOrgForm,
'repository_fields': {
'Git': {
'path': 'git://github.com/%(github_public_org_name)s/'
'%(github_public_org_repo_name)s.git',
'mirror_path': 'git@github.com:%(github_public_org_name)s/'
'%(github_public_org_repo_name)s.git',
}
},
'bug_tracker_field': 'http://github.com/'
'%(github_public_org_name)s/'
'%(github_public_org_repo_name)s/'
'issues#issue/%%s',
}),
('private', {
'name': _('Private'),
'form': GitHubPrivateForm,
'repository_fields': {
'Git': {
'path': 'git@github.com:%(hosting_account_username)s/'
'%(github_private_repo_name)s.git',
'mirror_path': '',
},
},
'bug_tracker_field': 'http://github.com/'
'%(hosting_account_username)s/'
'%(github_private_repo_name)s/'
'issues#issue/%%s',
}),
('private-org', {
'name': _('Private Organization'),
'form': GitHubPrivateOrgForm,
'repository_fields': {
'Git': {
'path': 'git@github.com:%(github_private_org_name)s/'
'%(github_private_org_repo_name)s.git',
'mirror_path': '',
},
},
'bug_tracker_field': 'http://github.com/'
'%(github_private_org_name)s/'
'%(github_private_org_repo_name)s/'
'issues#issue/%%s',
}),
]
needs_authorization = True
supports_repositories = True
supports_bug_trackers = True
supports_ssh_key_association = True
supported_scmtools = ['Git']
API_URL = 'https://api.github.com/'
RAW_MIMETYPE = 'application/vnd.github.v3.raw'
def authorize(self, username, password, local_site_name=None,
*args, **kwargs):
site = Site.objects.get_current()
siteconfig = SiteConfiguration.objects.get_current()
site_url = '%s://%s%s' % (
siteconfig.get('site_domain_method'),
site.domain,
local_site_reverse('root', local_site_name=local_site_name))
try:
body = {
'scopes': [
'user',
'repo',
],
'note': 'Access for Review Board',
'note_url': site_url,
}
# If the site is using a registered GitHub application,
# send it in the requests. This will gain the benefits of
# a GitHub application, such as higher rate limits.
if (hasattr(settings, 'GITHUB_CLIENT_ID') and
hasattr(settings, 'GITHUB_CLIENT_SECRET')):
body.update({
'client_id': settings.GITHUB_CLIENT_ID,
'client_secret': settings.GITHUB_CLIENT_SECRET,
})
rsp, headers = self._json_post(
url=self.API_URL + 'authorizations',
username=username,
password=password,
body=simplejson.dumps(body))
except (urllib2.HTTPError, urllib2.URLError), e:
data = e.read()
try:
rsp = simplejson.loads(data)
except:
rsp = None
if rsp and 'message' in rsp:
raise AuthorizationError(rsp['message'])
else:
raise AuthorizationError(str(e))
self.account.data['authorization'] = rsp
self.account.save()
def is_authorized(self):
return ('authorization' in self.account.data and
'token' in self.account.data['authorization'])
def get_file(self, repository, path, revision, *args, **kwargs):
url = self._build_api_url(repository, 'git/blobs/%s' % revision)
try:
return self._http_get(url, headers={
'Accept': self.RAW_MIMETYPE,
})[0]
except (urllib2.URLError, urllib2.HTTPError):
raise FileNotFoundError(path, revision)
def get_file_exists(self, repository, path, revision, *args, **kwargs):
url = self._build_api_url(repository, 'git/blobs/%s' % revision)
try:
self._http_get(url, headers={
'Accept': self.RAW_MIMETYPE,
})
return True
except (urllib2.URLError, urllib2.HTTPError):
return False
def is_ssh_key_associated(self, repository, key):
if not key:
return False
formatted_key = self._format_public_key(key)
# The key might be a deploy key (associated with a repository) or a
# user key (associated with the currently authorized user account),
# so check both.
deploy_keys_url = self._build_api_url(repository, 'keys')
user_keys_url = ('%suser/keys?access_token=%s'
% (self.API_URL,
self.account.data['authorization']['token']))
for url in (deploy_keys_url, user_keys_url):
keys_resp = self._key_association_api_call(self._json_get, url)
keys = [
item['key']
for item in keys_resp
if 'key' in item
]
if formatted_key in keys:
return True
return False
def associate_ssh_key(self, repository, key, *args, **kwargs):
url = self._build_api_url(repository, 'keys')
if key:
post_data = {
'key': self._format_public_key(key),
'title': 'Review Board (%s)' %
Site.objects.get_current().domain,
}
self._key_association_api_call(self._http_post, url,
content_type='application/json',
body=simplejson.dumps(post_data))
def _key_association_api_call(self, instance_method, *args,
**kwargs):
"""Returns response of API call, or raises SSHKeyAssociationError.
The `instance_method` should be one of the HostingService http methods
(e.g. _http_post, _http_get, etc.)
"""
try:
response, headers = instance_method(*args, **kwargs)
return response
except (urllib2.HTTPError, urllib2.URLError), e:
try:
rsp = simplejson.loads(e.read())
status_code = e.code
except:
rsp = None
status_code = None
if rsp and status_code:
api_msg = self._get_api_error_message(rsp, status_code)
raise SSHKeyAssociationError('%s (%s)' % (api_msg, e))
else:
raise SSHKeyAssociationError(str(e))
def _format_public_key(self, key):
"""Return the server's SSH public key as a string (if it exists)
The key is formatted for POSTing to GitHub's API.
"""
# Key must be prepended with algorithm name
return '%s %s' % (key.get_name(), key.get_base64())
def _get_api_error_message(self, rsp, status_code):
"""Return the error(s) reported by the GitHub API, as a string
See: http://developer.github.com/v3/#client-errors
"""
if 'message' not in rsp:
msg = _('Unknown GitHub API Error')
elif 'errors' in rsp and status_code == httplib.UNPROCESSABLE_ENTITY:
errors = [e['message'] for e in rsp['errors'] if 'message' in e]
msg = '%s: (%s)' % (rsp['message'], ', '.join(errors))
else:
msg = rsp['message']
return msg
def _http_get(self, url, *args, **kwargs):
data, headers = super(GitHub, self)._http_get(url, *args, **kwargs)
self._check_rate_limits(headers)
return data, headers
def _http_post(self, url, *args, **kwargs):
data, headers = super(GitHub, self)._http_post(url, *args, **kwargs)
self._check_rate_limits(headers)
return data, headers
def _check_rate_limits(self, headers):
rate_limit_remaining = headers.get('X-RateLimit-Remaining', None)
try:
if (rate_limit_remaining is not None and
int(rate_limit_remaining) <= 100):
logging.warning('GitHub rate limit for %s is down to %s',
self.account.username, rate_limit_remaining)
except ValueError:
pass
def _build_api_url(self, repository, api_path):
return '%s%s?access_token=%s' % (
self._get_repo_api_url(repository),
api_path,
self.account.data['authorization']['token'])
def _get_repo_api_url(self, repository):
plan = repository.extra_data['repository_plan']
if plan == 'public':
repo_name = repository.extra_data['github_public_repo_name']
owner = self.account.username
elif plan == 'private':
repo_name = repository.extra_data['github_private_repo_name']
owner = self.account.username
elif plan == 'public-org':
repo_name = repository.extra_data['github_public_org_repo_name']
owner = repository.extra_data['github_public_org_name']
elif plan == 'private-org':
repo_name = repository.extra_data['github_private_org_repo_name']
owner = repository.extra_data['github_private_org_name']
return '%srepos/%s/%s/' % (self.API_URL, owner, repo_name)
| gpl-2.0 | 4,872,648,449,457,827,000 | 37.065395 | 80 | 0.522477 | false | 4.196455 | false | false | false |
rahul67/hue | apps/filebrowser/src/filebrowser/forms.py | 22 | 7669 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib
from django import forms
from django.contrib.auth.models import User, Group
from django.forms import FileField, CharField, BooleanField, Textarea
from django.forms.formsets import formset_factory, BaseFormSet, ManagementForm
from desktop.lib import i18n
from filebrowser.lib import rwx
from hadoop.fs import normpath
from django.utils.translation import ugettext_lazy as _
logger = logging.getLogger(__name__)
class FormSet(BaseFormSet):
def __init__(self, data=None, prefix=None, *args, **kwargs):
self.prefix = prefix or self.get_default_prefix()
if data:
self.data = {}
# Add management field info
# This is hard coded given that none of these keys or info is exportable
# This could be a problem point if the management form changes in later releases
self.data['%s-TOTAL_FORMS' % self.prefix] = len(data)
self.data['%s-INITIAL_FORMS' % self.prefix] = len(data)
self.data['%s-MAX_NUM_FORMS' % self.prefix] = 0
# Add correct data
for i in range(0, len(data)):
prefix = self.add_prefix(i)
for field in data[i]:
self.data['%s-%s' % (prefix, field)] = data[i][field]
BaseFormSet.__init__(self, self.data, self.prefix, *args, **kwargs)
class PathField(CharField):
def __init__(self, label, help_text=None, **kwargs):
kwargs.setdefault('required', True)
kwargs.setdefault('min_length', 1)
forms.CharField.__init__(self, label=label, help_text=help_text, **kwargs)
def clean(self, value):
return normpath(CharField.clean(self, value))
class EditorForm(forms.Form):
path = PathField(label=_("File to edit"))
contents = CharField(widget=Textarea, label=_("Contents"), required=False)
encoding = CharField(label=_('Encoding'), required=False)
def clean_path(self):
return urllib.unquote(self.cleaned_data.get('path', ''))
def clean_contents(self):
return self.cleaned_data.get('contents', '').replace('\r\n', '\n')
def clean_encoding(self):
encoding = self.cleaned_data.get('encoding', '').strip()
if not encoding:
return i18n.get_site_encoding()
return encoding
class RenameForm(forms.Form):
op = "rename"
src_path = CharField(label=_("File to rename"), help_text=_("The file to rename."))
dest_path = CharField(label=_("New name"), help_text=_("Rename the file to:"))
class BaseRenameFormSet(FormSet):
op = "rename"
RenameFormSet = formset_factory(RenameForm, formset=BaseRenameFormSet, extra=0)
class CopyForm(forms.Form):
op = "copy"
src_path = CharField(label=_("File to copy"), help_text=_("The file to copy."))
dest_path = CharField(label=_("Destination location"), help_text=_("Copy the file to:"))
class BaseCopyFormSet(FormSet):
op = "copy"
CopyFormSet = formset_factory(CopyForm, formset=BaseCopyFormSet, extra=0)
class UploadFileForm(forms.Form):
op = "upload"
# The "hdfs" prefix in "hdfs_file" triggers the HDFSfileUploadHandler
hdfs_file = FileField(forms.Form, label=_("File to Upload"))
dest = PathField(label=_("Destination Path"), help_text=_("Filename or directory to upload to."))
class UploadArchiveForm(forms.Form):
op = "upload"
archive = FileField(forms.Form, label=_("Archive to Upload"))
dest = PathField(label=_("Destination Path"), help_text=_("Archive to upload to."))
class RemoveForm(forms.Form):
op = "remove"
path = PathField(label=_("File to remove"))
class RmDirForm(forms.Form):
op = "rmdir"
path = PathField(label=_("Directory to remove"))
class RmTreeForm(forms.Form):
op = "rmtree"
path = PathField(label=_("Directory to remove (recursively)"))
class BaseRmTreeFormset(FormSet):
op = "rmtree"
RmTreeFormSet = formset_factory(RmTreeForm, formset=BaseRmTreeFormset, extra=0)
class RestoreForm(forms.Form):
op = "rmtree"
path = PathField(label=_("Path to restore"))
class BaseRestoreFormset(FormSet):
op = "restore"
RestoreFormSet = formset_factory(RestoreForm, formset=BaseRestoreFormset, extra=0)
class TrashPurgeForm(forms.Form):
op = "purge_trash"
class MkDirForm(forms.Form):
op = "mkdir"
path = PathField(label=_("Path in which to create the directory"))
name = PathField(label=_("Directory Name"))
class TouchForm(forms.Form):
op = "touch"
path = PathField(label=_("Path in which to create the file"))
name = PathField(label=_("File Name"))
class ChownForm(forms.Form):
op = "chown"
path = PathField(label=_("Path to change user/group ownership"))
# These could be "ChoiceFields", listing only users and groups
# that the current user has permissions for.
user = CharField(label=_("User"), min_length=1)
user_other = CharField(label=_("OtherUser"), min_length=1, required=False)
group = CharField(label=_("Group"), min_length=1)
group_other = CharField(label=_("OtherGroup"), min_length=1, required=False)
recursive = BooleanField(label=_("Recursive"), required=False)
def __init__(self, *args, **kwargs):
super(ChownForm, self).__init__(*args, **kwargs)
self.all_groups = [ group.name for group in Group.objects.all() ]
self.all_users = [ user.username for user in User.objects.all() ]
class BaseChownFormSet(FormSet):
op = "chown"
ChownFormSet = formset_factory(ChownForm, formset=BaseChownFormSet, extra=0)
class ChmodForm(forms.Form):
op = "chmod"
path = PathField(label=_("Path to change permissions"))
# By default, BooleanField only validates when
# it's checked.
user_read = BooleanField(required=False)
user_write = BooleanField(required=False)
user_execute = BooleanField(required=False)
group_read = BooleanField(required=False)
group_write = BooleanField(required=False)
group_execute = BooleanField(required=False)
other_read = BooleanField(required=False)
other_write = BooleanField(required=False)
other_execute = BooleanField(required=False)
sticky = BooleanField(required=False)
recursive = BooleanField(required=False)
names = ("user_read", "user_write", "user_execute",
"group_read", "group_write", "group_execute",
"other_read", "other_write", "other_execute",
"sticky")
def __init__(self, initial, *args, **kwargs):
logging.info(dir(self))
logging.info(dir(type(self)))
# Convert from string representation.
mode = initial.get("mode")
if mode is not None:
mode = int(mode, 8)
bools = rwx.expand_mode(mode)
for name, b in zip(self.names, bools):
initial[name] = b
logging.debug(initial)
kwargs['initial'] = initial
forms.Form.__init__(self, *args, **kwargs)
def full_clean(self):
forms.Form.full_clean(self)
if hasattr(self, "cleaned_data"):
self.cleaned_data["mode"] = rwx.compress_mode(map(lambda name: self.cleaned_data[name], self.names))
class BaseChmodFormSet(FormSet):
op = "chmod"
ChmodFormSet = formset_factory(ChmodForm, formset=BaseChmodFormSet, extra=0)
| apache-2.0 | 1,458,078,050,195,964,000 | 33.859091 | 106 | 0.701656 | false | 3.583645 | false | false | false |
dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/Saf/saf_format.py | 1 | 2399 | # Copyright 2001 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Martel based parser to read SAF formatted files.
This is a huge regular regular expression for SAF, built using
the 'regular expressiona on steroids' capabilities of Martel.
http://www.embl-heidelberg.de/predictprotein/Dexa/optin_safDes.html
Notes:
Just so I remember -- the new end of line syntax is:
New regexp syntax - \R
\R means "\n|\r\n?"
[\R] means "[\n\r]"
This helps us have endlines be consistent across platforms.
"""
# standard library
#http://www.embl-heidelberg.de/predictprotein/Dexa/optin_safDes.html
import string
# Martel
import Martel
from Martel import RecordReader
from Martel import Str
from Martel import AnyEol
from Martel import ToEol
from Martel import Group
from Martel import Alt
from Martel import Rep
from Martel import Rep1
from Martel import Any
from Martel import AnyBut
from Martel import RepN
from Martel import Opt
from Martel import ToSep
from Martel.Expression import Assert
# --- first set up some helper constants and functions
# Copyright 2001 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
digits = "0123456789"
valid_sequence_characters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-. \t'
white_space = "\t "
valid_residue_characters = digits + white_space + chr( 0x2e )
residue_number_line = Group( "residue_number_line", \
Rep1( Any( valid_residue_characters ) ) +
AnyEol())
comment_line = Group( "comment_line", \
Str( "#" ) +
ToEol() )
ignored_line = Group( "ignored_line", \
Alt( comment_line, residue_number_line ) )
candidate_line = Group( "candidate_line", \
Assert( Str( "#" ), 1 ) +
Assert( Any( valid_residue_characters ), 1 ) +
ToSep( sep = ' ' ) +
Rep( Any( valid_sequence_characters ) ) +
ToEol() )
saf_record = Group( "saf_record", \
candidate_line + Rep( Alt( candidate_line, ignored_line ) ) + Opt( Str( "#" ) ) )
| apache-2.0 | -35,273,515,758,513,052 | 28.256098 | 87 | 0.678616 | false | 3.564636 | false | false | false |
Simpsonpt/django-acra | acra/models.py | 1 | 2726 | from django.db import models
from django.utils.timezone import utc
from datetime import datetime
import json
REPORT_STATUS = (
("solved","Solved"),
("unsolved","Unsolved"),
)
class CrashReport(models.Model):
stack_trace = models.TextField(default="")
logcat = models.TextField(default="")
shared_preferences = models.TextField(default="")
environment = models.TextField(default="")
total_mem_size = models.BigIntegerField(default=0,verbose_name='Total Memory Size')
initial_configuration = models.TextField(default="")
display = models.TextField(default="")
available_mem_size = models.BigIntegerField(default=0,verbose_name='Available Memory Size')
phone_model = models.CharField(max_length=50,default="")
user_comment = models.TextField(default="")
crash_configuration = models.TextField(default="")
device_features = models.TextField(default="")
settings_system = models.TextField(default="",verbose_name='System Settings')
file_path = models.CharField(max_length=100,default="")
installation_id = models.CharField(max_length=100,default="")
user_crash_date = models.CharField(max_length=50,default="",verbose_name='Crash Date')
app_version_name = models.CharField(max_length=50,default="",verbose_name='Version Name')
user_app_start_date = models.CharField(max_length=50,default="",verbose_name='Application Start Date')
settings_global = models.TextField(default="",verbose_name='Global Settings')
build = models.TextField(default="")
settings_secure = models.TextField(default="",verbose_name='Secure Settings')
dumpsys_meminfo = models.TextField(default="")
user_email = models.CharField(max_length=50,default="")
report_id = models.CharField(max_length=100,default="")
product = models.CharField(max_length=50,default="")
package_name = models.CharField(max_length=100,default="",verbose_name='Package Name')
brand = models.CharField(max_length=50,default="")
android_version = models.CharField(max_length=50,default="")
app_version_code = models.CharField(max_length=50,default="",verbose_name='Version Code')
is_silent = models.CharField(max_length=50,default="")
custom_data = models.TextField(default="")
description = models.TextField(default="")
solved = models.CharField(max_length=10,choices=REPORT_STATUS,default="unsolved",verbose_name='Status')
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return ('Device: %s %s - Android: %s - Application: %s Version: %s') % (self.brand,self.product,self.android_version,self.app_version_name,self.app_version_code)
def __unicode__(self):
return ('Device: %s %s - Android: %s - Application: %s Version: %s') % (self.brand,self.product,self.android_version,self.app_version_name,self.app_version_code)
| gpl-3.0 | 7,411,365,509,683,363,000 | 52.45098 | 163 | 0.747249 | false | 3.446271 | false | false | false |
jcherqui/searx | searx/engines/btdigg.py | 2 | 2621 | """
BTDigg (Videos, Music, Files)
@website https://btdig.com
@provide-api yes (on demand)
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content, seed, leech, magnetlink
"""
from lxml import html
from operator import itemgetter
from searx.engines.xpath import extract_text
from searx.url_utils import quote, urljoin
from searx.utils import get_torrent_size
# engine dependent config
categories = ['videos', 'music', 'files']
paging = True
# search-url
url = 'https://btdig.com'
search_url = url + '/search?q={search_term}&p={pageno}'
# do search-request
def request(query, params):
params['url'] = search_url.format(search_term=quote(query),
pageno=params['pageno'] - 1)
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
search_res = dom.xpath('//div[@class="one_result"]')
# return empty array if nothing is found
if not search_res:
return []
# parse results
for result in search_res:
link = result.xpath('.//div[@class="torrent_name"]//a')[0]
href = urljoin(url, link.attrib.get('href'))
title = extract_text(link)
excerpt = result.xpath('.//div[@class="torrent_excerpt"]')[0]
content = html.tostring(excerpt, encoding='unicode', method='text', with_tail=False)
# it is better to emit <br/> instead of |, but html tags are verboten
content = content.strip().replace('\n', ' | ')
content = ' '.join(content.split())
filesize = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[0]
filesize_multiplier = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[1]
files = (result.xpath('.//span[@class="torrent_files"]/text()') or ['1'])[0]
# convert filesize to byte if possible
filesize = get_torrent_size(filesize, filesize_multiplier)
# convert files to int if possible
try:
files = int(files)
except:
files = None
magnetlink = result.xpath('.//div[@class="torrent_magnet"]//a')[0].attrib['href']
# append result
results.append({'url': href,
'title': title,
'content': content,
'filesize': filesize,
'files': files,
'magnetlink': magnetlink,
'template': 'torrent.html'})
# return results sorted by seeder
return results
| agpl-3.0 | 7,239,155,016,122,336,000 | 29.835294 | 97 | 0.585273 | false | 3.837482 | false | false | false |
rabipanda/tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py | 25 | 9406 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
random_seed.set_random_seed(23)
rng = np.random.RandomState(0)
class AssertZeroImagPartTest(test.TestCase):
def test_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([0., 2, 3])
with self.test_session():
# Should not raise.
linear_operator_util.assert_zero_imag_part(x, message="ABC123").run()
def test_complex_tensor_with_imag_zero_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([0., 0, 0])
z = math_ops.complex(x, y)
with self.test_session():
# Should not raise.
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
def test_complex_tensor_with_nonzero_imag_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.test_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
class AssertNoEntriesWithModulusZeroTest(test.TestCase):
def test_nonzero_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 2, 3])
with self.test_session():
# Should not raise.
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123").run()
def test_nonzero_complex_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.test_session():
# Should not raise.
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123").run()
def test_zero_real_tensor_raises(self):
x = ops.convert_to_tensor([1., 0, 3])
with self.test_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123").run()
def test_zero_complex_tensor_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.test_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123").run()
class BroadcastMatrixBatchDimsTest(test.TestCase):
def test_zero_batch_matrices_returned_as_empty_list(self):
self.assertAllEqual(
[], linear_operator_util.broadcast_matrix_batch_dims([]))
def test_one_batch_matrix_returned_after_tensor_conversion(self):
arr = rng.rand(2, 3, 4)
tensor, = linear_operator_util.broadcast_matrix_batch_dims([arr])
self.assertTrue(isinstance(tensor, ops.Tensor))
with self.test_session():
self.assertAllClose(arr, tensor.eval())
def test_static_dims_broadcast(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5)
y = rng.rand(4, 1, 3, 7)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.test_session() as sess:
self.assertAllEqual(x_bc_expected.shape, x_bc.get_shape())
self.assertAllEqual(y_bc_expected.shape, y_bc.get_shape())
x_bc_, y_bc_ = sess.run([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_static_dims_broadcast_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [1, 3, 1]
# broadcast batch shape = [1, 3, 2]
x = rng.rand(1, 2, 1, 5)
y = rng.rand(1, 3, 2, 3, 7)
batch_of_zeros = np.zeros((1, 3, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.test_session() as sess:
self.assertAllEqual(x_bc_expected.shape, x_bc.get_shape())
self.assertAllEqual(y_bc_expected.shape, y_bc.get_shape())
x_bc_, y_bc_ = sess.run([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_dynamic_dims_broadcast_32bit(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5).astype(np.float32)
y = rng.rand(4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder(dtypes.float32)
y_ph = array_ops.placeholder(dtypes.float32)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
with self.test_session() as sess:
x_bc_, y_bc_ = sess.run([x_bc, y_bc], feed_dict={x_ph: x, y_ph: y})
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_dynamic_dims_broadcast_32bit_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [3, 4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(1, 2, 1, 5).astype(np.float32)
y = rng.rand(3, 4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder(dtypes.float32)
y_ph = array_ops.placeholder(dtypes.float32)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
with self.test_session() as sess:
x_bc_, y_bc_ = sess.run([x_bc, y_bc], feed_dict={x_ph: x, y_ph: y})
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_less_than_two_dims_raises_static(self):
x = rng.rand(3)
y = rng.rand(1, 1)
with self.assertRaisesRegexp(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.assertRaisesRegexp(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([y, x])
class MatmulWithBroadcastTest(test.TestCase):
def test_static_dims_broadcast(self):
# batch_shape = [2]
# for each batch member, we have a 1x3 matrix times a 3x7 matrix ==> 1x7
x = rng.rand(2, 1, 3)
y = rng.rand(3, 7)
y_broadcast = y + np.zeros((2, 1, 1))
with self.test_session():
result = linear_operator_util.matmul_with_broadcast(x, y)
self.assertAllEqual((2, 1, 7), result.get_shape())
expected = math_ops.matmul(x, y_broadcast)
self.assertAllEqual(expected.eval(), result.eval())
def test_dynamic_dims_broadcast_32bit(self):
# batch_shape = [2]
# for each batch member, we have a 1x3 matrix times a 3x7 matrix ==> 1x7
x = rng.rand(2, 1, 3)
y = rng.rand(3, 7)
y_broadcast = y + np.zeros((2, 1, 1))
x_ph = array_ops.placeholder(dtypes.float64)
y_ph = array_ops.placeholder(dtypes.float64)
with self.test_session() as sess:
result, expected = sess.run(
[linear_operator_util.matmul_with_broadcast(x_ph, y_ph),
math_ops.matmul(x, y_broadcast)],
feed_dict={x_ph: x, y_ph: y})
self.assertAllEqual(expected, result)
class DomainDimensionStubOperator(object):
def __init__(self, domain_dimension):
self._domain_dimension = ops.convert_to_tensor(domain_dimension)
def domain_dimension_tensor(self):
return self._domain_dimension
class AssertCompatibleMatrixDimensionsTest(test.TestCase):
def test_compatible_dimensions_do_not_raise(self):
with self.test_session():
x = ops.convert_to_tensor(rng.rand(2, 3, 4))
operator = DomainDimensionStubOperator(3)
# Should not raise
linear_operator_util.assert_compatible_matrix_dimensions(
operator, x).run()
def test_incompatible_dimensions_raise(self):
with self.test_session():
x = ops.convert_to_tensor(rng.rand(2, 4, 4))
operator = DomainDimensionStubOperator(3)
with self.assertRaisesOpError("Incompatible matrix dimensions"):
linear_operator_util.assert_compatible_matrix_dimensions(
operator, x).run()
if __name__ == "__main__":
test.main()
| apache-2.0 | -4,110,878,620,793,600,000 | 35.316602 | 80 | 0.651393 | false | 3.158496 | true | false | false |
boeserwolf/sublime3-js2coffee | js2coffeescript.py | 1 | 1340 | import sublime, sublime_plugin
import subprocess
class JsToCoffeescriptCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
# get non-empty selections
regions = [s for s in view.sel() if not s.empty()]
# if there's no non-empty selection, filter the whole document
if len(regions) == 0:
regions = [ sublime.Region(0, view.size()) ]
for region in reversed(regions):
content = view.substr(region)
new_content = self.js2coffee(content)
view.replace(edit, region, new_content)
def js2coffee(self, contents):
indentation = 2
command = "js2coffee -i%d" % (indentation)
js2coffee = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, error = js2coffee.communicate(bytearray(contents, "utf-8"));
if error:
# self.write_to_console(error)
# self.window.run_command("show_panel", {"panel": "output.exec"})
print("JsToCoffeescript: ERROR!")
print("Result: %s" % error)
return None
return output.decode("utf-8")
| mit | -2,358,411,413,688,469,000 | 36.222222 | 81 | 0.551493 | false | 4.148607 | false | false | false |
ivannz/study_notes | year_14_15/course_project/code/project/matlab_test.py | 1 | 3085 | import numpy as np
from numpy.random import RandomState
from fgn import fbm
import scipy.io as io
N, H = 2**20 + 1, 0.5
generator = fbm( N = N, H = H, time = True )
generator.set_rnd( RandomState( 123 ) )
T, X = generator( )
io.savemat( './output/data.mat', { 'T': T, 'X': X, 'H': H, 'N': N } )
# import numpy as np
# import scipy.io as io
# mat = io.loadmat( './output/data.mat' )
# H, X, T, N = mat['H'][0], mat['X'][0], mat['T'][0], mat['N'][0]
from crossing_tree import xtree_build, f_get_w
delta = 1e-3 # np.std( np.diff( X ) )
Tnk, Xnk, Znk, Vnk, Wnk = xtree_build( T, X, delta = delta )
print np.unique( Znk[1], return_counts = True )
## %% clear all ; format long ;
## %%
## %% cd c:\study_notes\year_14_15\course_project\code
## %%
## %% load('C:\study_notes\year_14_15\course_project\code\project\output\data.mat')
## %%
## %% Zr = 2 : 2 : 40 ;
## %% delta = 1e-3 ; % std( diff( X ) ) ;
## %% [ w, subx, hp, ht ] = f_get_w( X, T, [ 0 : 16 ], delta, 0 ) ;
## %% Z = [ subx{ 1+1 } ] ;
## %% for z = Zr
## %% sum( Z == z ) %/ length( Z )
## %% end
if False :
## # ht, hp, hx, hv = f_get_w( T, X, range( 0, 17 ), delta )
## # print np.all( [ np.allclose( a0,a1 ) for a0, a1 in zip( Xnk, hp ) ] )
io.savemat( './output/xtree.mat', { 'Xnk': Xnk, 'Tnk': Tnk } )
##
Z = ( X - X[ 0 ] ) / delta
Z_floor = np.floor( Z, np.empty_like( Z, np.float64 ) )
Z_ceil = np.ceil( Z, np.empty_like( Z, np.float64 ) )
io.savemat( './output/ceil_floor.mat', {
'py_ceilz': Z_ceil,
'py_floorz': Z_floor,
} )
################################################################################
delta = np.std( np.diff( X ) )
Tnk, Xnk, Znk, Vnk, Wnk = xtree_build( T, X, delta = delta )
Nn = np.zeros( ( 1 + max_levels + 1, 1 ), dtype = np.int )
for n, Xk in enumerate( Xnk, 0 ) :
n = max_levels + 1 if n > max_levels + 1 else n
Nn[ n ] += len( Xk ) - 1
Dnk = np.zeros( ( max_levels + 1, max_crossings // 2 ), dtype = np.int )
for n, Zk in enumerate( Znk[ 1: ], 0 ) :
n = max_levels if n > max_levels else n
Z_count, Z_freq = np.unique( Zk, return_counts = True )
Z_count = np.minimum( Z_count, max_crossings )
mask = ( Z_count < max_crossings )
Dnk[ n, Z_count[ mask ] // 2 - 1 ] += Z_freq[ mask ]
Dnk[ n, max_crossings // 2 - 1 ] += np.sum( Z_freq[ ~mask ] )
Vnde = np.zeros( ( max_levels + 1, 2, 2 ), dtype = np.int )
for n, Vk in enumerate( Vnk[ 1: ], 0 ) :
n = max_levels if n > max_levels else n
Vnde[ n, 0 ] += np.sum( Vk[ Vk[ :, 2 ] < 0 ], axis = 0 )[:2]
Vnde[ n, 1 ] += np.sum( Vk[ Vk[ :, 2 ] > 0 ], axis = 0 )[:2]
prc = np.array( [ 0.5, 1.0, 2.5, 5.0, 10, 25, 50, 75, 90, 95, 97.5, 99, 99.5 ] )
Wnp = np.zeros( ( max_levels, ) + prc.shape, dtype = np.float )
Wbarn = np.zeros( ( max_levels, 1 ), dtype = np.float )
Wstdn = np.zeros( ( max_levels, 1 ), dtype = np.float )
for n, Wk in enumerate( Wnk[1:], 0 ) :
if len( Wk ) and n < max_levels :
Wbarn[ n ], Wstdn[ n ], Wnp[ n ] = np.average( Wk ), np.std( Wk ), np.percentile( Wk, prc )
| mit | 5,884,203,619,127,614,000 | 33.45977 | 95 | 0.509887 | false | 2.382239 | false | false | false |
KitwareMedical/SlicerITKUltrasound | Documentation/ctk-cli-to-rst.py | 1 | 1065 | #!/usr/bin/env python
import ctk_cli
from rstcloth import rstcloth
import argparse
def cli2rst(cliFile, rstFile):
cli = ctk_cli.CLIModule(cliFile)
rst = rstcloth.RstCloth()
rst.title(cli.title)
rst.newline()
rst.content(cli.description)
rst.newline()
rst.field('Authors', cli.contributor)
rst.field('Version', cli.version)
rst.field('License', cli.license)
rst.newline()
rst.newline()
for parameterGroup in cli:
rst.h2(parameterGroup.label + ' Parameters')
rst.content(parameterGroup.description)
rst.newline()
for parameter in parameterGroup:
rst.definition(parameter.label, parameter.description, bold=True)
rst.newline()
rst.write(rstFile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert a CTK CLI XML description file to a reStructuredText documentation file.')
parser.add_argument('cliFile')
parser.add_argument('rstFile')
args = parser.parse_args()
cli2rst(args.cliFile, args.rstFile)
| apache-2.0 | 4,431,580,354,356,197,400 | 23.767442 | 132 | 0.6723 | false | 3.64726 | false | false | false |
efulet/library-sample | library/lib/library/library.py | 1 | 6410 | """
Define the implementation for Library interface and their class exceptions.
@created_at 2015-05-16
@author Exequiel Fuentes Lettura <efulet@gmail.com>
"""
import operator
from book import Book
class LibraryException(Exception):
"""Define an exception class for Library errors"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OutOfBooksException(LibraryException):
"""Define an exception class for OutOfBooks errors"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class IllegalRatingException(LibraryException):
"""Define an exception class for IllegalRating errors"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Steps:
# 1. Define a list of books for creating our library
# 2. Implement methods
# 3. Indicate time and space complexity
# 4. Document any unusual edge cases or problems
class Library:
"""Define a class for the library implementation"""
MIN_RATING = 1
MAX_RATING = 100
def __init__(self):
self.books = {}
def add_book(self, book):
"""Add a new book to the library. We dont have a db, so we create a
dictonary with books
"""
if not book:
raise LibraryException("Book is null or empty, please add a book")
id = self.library_length() + 1
# id: primary key of the book
# store a tuple: book, available
self.books[str(id)] = book
def library_length(self):
"""Return length of the library"""
return len(self.books)
def print_library(self):
"""Just for checking object in books"""
for id, book in self.books.items():
print "--"
print "book id: %s" % id
print "book title: %s" % book.get_title()
print "book rating: %s" % book.get_rating()
print "book available: %s" % book.get_available()
print "book get_checkin_timestamp: %s" % book.get_checkin_timestamp()
# So the best case scenario: O(n), average and worst case O(nlogn)
def checkout_book(self, genre):
"""Returns the book most recently checked in in this genre, and removes
it from the pool of available items.
:param genre: the genre to which the book belongs
:return: the Book object that has just been checked out
:raises OutOfBooksException: if there are no books in that genre available
"""
if not genre:
raise LibraryException("Missed genre parameter")
# It has to go for all books in the list, so it's O(n)
genre_list = []
for id, book in self.books.items():
if book.get_genre() == genre and book.get_available():
genre_list.append(book)
# Check if there is books avaible for sorting
if not genre_list:
raise OutOfBooksException("There is no books in that genre available")
# Sorting by the highest rated book.
# Best case scenario: O(n), average and worst case O(nlogn)
recent_book = sorted(genre_list, key=operator.attrgetter('checkin_timestamp'), reverse=True)[0]
recent_book.set_available(False)
return recent_book
# So the average and worst case O(n)
def checkin_book(self, returned_book, rating):
"""Returns the book to the library's availability pool, making it the
last checked-in book and rating the book in the process.
:param returned_book: the Book that is being checked back in
:param rating: an integer from 1 to 100 (inclusive) specifying the
rating. The last person to rate the book overwrites any
previous rating
:raises IllegalRatingException: if a rating less than 1 or more than 100
is specified
"""
if not returned_book:
raise LibraryException("Book is null or empty, please add a book")
if rating < self.MIN_RATING:
raise IllegalRatingException("Rating less than " + self.MIN_RATING)
if rating > self.MAX_RATING:
raise IllegalRatingException("Rating greater than " + self.MAX_RATING)
# I'm guessing that the title is unique, then I'm checking if the book is
# on the dict.
# This implementation take O(n), where n is the number of books into the
# library.
# If the library is a db it should take less time guessing we have a
# code for locate the book in table
for id, book in self.books.items():
if book.get_title() == returned_book.get_title():
book.set_rating(rating)
book.set_available(True)
book.set_checkin_timestamp()
break
# What happens if the returned book is not into the list?
# So the best case scenario: O(n), average and worst case O(nlogn)
def peek_highest_rated_book(self, genre):
"""Returns the highest rated book in the specified genre, but does not
remove it from availability.
param genre: the genre for which we'd like to retrieve the highest-rated
book
:return: a Book that is the highest-rated book currently available in
the genre
:raises OutOfBooksException: if there are no books in that genre available
"""
if not genre:
raise LibraryException("Missed genre parameter")
# It has to go for all books in the list, so it's O(n)
genre_list = []
for id, book in self.books.items():
if book.get_genre() == genre and book.get_available():
genre_list.append(book)
# Check if there is books avaible for sorting
if not genre_list:
raise OutOfBooksException("There is no books in that genre available")
# Sorting by the highest rated book.
# Best case scenario: O(n), average and worst case O(nlogn)
return sorted(genre_list, key=operator.attrgetter('rating'), reverse=True)[0]
| mit | -8,818,376,643,218,544,000 | 36.052023 | 103 | 0.599532 | false | 4.307796 | false | false | false |
MariiaSurmenok/insight-data-challenge | src/process_log.py | 1 | 1315 | """ Main file. All 4 features are running here. """
import sys
from collections import Counter
from datetime import timedelta
import active_users
import busiest_hours
import failed_login
import popular_resources
from io_utils import date_formatting, read_from_file, write_information_to_file
# File paths
filename = sys.argv[1]
hosts_filename = sys.argv[2]
hours_filename = sys.argv[3]
resources_filename = sys.argv[4]
blocked_filename = sys.argv[5]
# FEATURES 1 and 2
ip_frequency = Counter()
resources = Counter()
for entry in read_from_file(filename):
active_users.count_host_frequency(ip_frequency, entry)
popular_resources.count_bandwidth_resources(resources, entry)
top_hosts = ip_frequency.most_common(10)
# Write results to a file
for host in top_hosts:
information = host[0] + "," + str(host[1])
write_information_to_file(hosts_filename, information)
top_resources = resources.most_common(10)
# Write results to a file
for resource in top_resources:
write_information_to_file(resources_filename, resource[0])
# FEATURE 3
load_meter = busiest_hours.LoadMeter(filename, hours_filename)
load_meter.find_busiest_hours()
# FEATURE 4
access_blocker = failed_login.AccessBlocker(blocked_filename)
for entry in read_from_file(filename):
access_blocker.check_request(entry)
| mit | 3,266,234,163,898,952,700 | 23.351852 | 79 | 0.755133 | false | 3.30402 | false | false | false |
ZwaConnection/TheGuildWeb | apps/association/models.py | 1 | 1465 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from ..member.models import *
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Association(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
year_of_creation = models.DateField(auto_now=False, auto_now_add=False, null=True)
address = models.CharField(max_length=100)
phone = models.IntegerField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# email = models.EmailField(max_length=254, default="")
initials = models.CharField(max_length=25, default="")
logo = models.ImageField(upload_to='Images/Association/logo', blank=True)
country = models.ForeignKey('member.Country', null=True)
identifier = models.OneToOneField(User, null=True, on_delete=models.CASCADE)
email_confirmed = models.BooleanField(default=False)
def __str__(self):
return self.name
# @receiver(post_save, sender=User)
# def create_association(sender, instance, created, **kwargs):
# if created:
# Association.objects.get_or_create(identifier=instance)
#
# @receiver(post_save, sender=User)
# def save_association(sender, instance, **kwargs):
# instance.association.save()
| gpl-3.0 | 7,468,152,057,293,517,000 | 39.694444 | 86 | 0.708532 | false | 3.737245 | false | false | false |