repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
JacekPierzchlewski/RxCS | rxcs/console.py | 1 | 34322 | """
Module contains console printing functions for the RXCS. |br|
All of the console print in RxCS should be done using functions
from this module.
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <jap@es.aau.dk>
*Version*:
0.1 | 14-MAY-2014 : * Initial version. |br|
0.2 | 15-MAY-2014 : * Docstrings added.
0.21 | 15-MAY-2014 : * New colors ('PARAM' + 'OK') added to the dictionary
0.22 | 14-AUG-2015 : * New function (progress_doneNL) is added
0.23 | 20-AUG-2015 : * New function (newline) is added
0.24 | 30-NOV-2015 : * Progress bar is added
0.26 | 15-JAN-2016 : * Note, warning and info starts with a new line
*License*:
BSD 2-Clause
"""
from __future__ import division
import sys
import numpy as np
import time
# =====================================================================
# Print a new line
# =====================================================================
def newline():
sys.stdout.write('\n')
return
# =====================================================================
# Print signal pack header
# =====================================================================
def pack(inxPack):
"""
.. role:: bash(code)
:language: bash
Function prints header of the signal pack processed by RxCS. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.pack(1)
gives an output:
:bash:`>>> SIGNAL PACK #1:`
Args:
inxPack (int): Index of the current signal pack
Returns:
nothing
"""
strPackNumber = '#%d' % (inxPack)
sys.stdout.write('\n')
sys.stdout.write(_colors('PROGRESS') + '>>> ' + _colors('ENDC'))
sys.stdout.write('SIGNAL PACK ')
sys.stdout.write(_colors('PROGRESS') + strPackNumber + _colors('ENDC'))
sys.stdout.write(':' + '\n')
sys.stdout.flush()
return
# =====================================================================
# Print the sys progress sign + current stage + name of the current module
# =====================================================================
def progress(strStage, strModule):
"""
.. role:: bash(code)
:language: bash
Function prints progress of the RxCS frames. |br|
It prints the progress sign ('>>') + the current stage (signal generation,
sampler, reconstruction, etc...) + name of the current module. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.progress('Signal generator', 'Random multitone')
gives an output:
:bash:`| >> Signal generator: Random multitone`
Args:
strStage (string): name of the stage |br|
strModule (string): name of the module
Returns:
nothing
"""
sys.stdout.write(_colors('PROGRESS') + ' >> ' + _colors('ENDC'))
sys.stdout.write(strStage + ': ' + strModule + ' \n')
sys.stdout.flush()
return
# =====================================================================
# Print the module progress sign (>) + start the timer
# =====================================================================
def module_progress(strInfo):
"""
Function prints an info about a module progress.
The info is preceded by a tabulator and a module progress sign ('>'). |br|
Additionally, the function starts a time counter. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.module_progress('The module X is starting')
gives an output:
:bash:`| > The module X is starting...`
Args:
strInfo (string): progress info to be printed
Returns:
tStart (float): time stamp of the start
"""
sys.stdout.write(_colors('PROGRESS') + '\n > ' + _colors('ENDC'))
sys.stdout.write(strInfo + '...')
sys.stdout.flush()
# Start the timer
tStart = time.time()
return tStart
# =====================================================================
# Finish the progress print + print the tme of execution
# =====================================================================
def progress_done(tStart):
"""
Function adds 'done' to a console message previously printed by a
'module_progress' function. |br|
Additionally, the function print an info about an execution time of a
module, based on the time stamp of the start of the module. |br|
The function takes care of the proper coloring of the console output. |br|
>>> tStart = console.module_progress('The module X is starting')
>>> time.sleep(1)
>>> console.module_progress_done(tStart)
gives an output:
:bash:`| > The module X is starting...done in 1.00 seconds`
Args:
tStart (float): time stamp of the start
Returns:
nothing
"""
# Measure the time
tTime = time.time() - tStart
strTime = ('done in %.2f seconds') % (tTime)
sys.stdout.write(_colors('OK') + strTime + _colors('ENDC'))
sys.stdout.flush()
return
# =====================================================================
# Finish the progress print + print the tme of execution + new line
# =====================================================================
def progress_doneNL(tStart):
"""
Function adds 'done' to a console message previously printed by a
'module_progress' function. |br|
Additionally, the function print an info about an execution time of a
module, based on the time stamp of the start of the module, + a new line.|br|
The function takes care of the proper coloring of the console output. |br|
>>> tStart = console.module_progressNL('The module X is starting')
>>> time.sleep(1)
>>> console.module_progress_done(tStart)
gives an output:
:bash:`| > The module X is starting...done in 1.00 seconds`
Args:
tStart (float): time stamp of the start
Returns:
nothing
"""
# Measure the time
tTime = time.time() - tStart
strTime = ('done in %.2f seconds') % (tTime)
sys.stdout.write(_colors('OK') + strTime + _colors('ENDC') + '\n')
sys.stdout.flush()
return
# =====================================================================
# Start a progress bar
# =====================================================================
def progress_bar_start(strInfo, iPrintIter, iMilestone, iLineBreak,
bPrintSteps=1, bIteration0=0, bPrintTime=0):
"""
Function starts a progress bar
The start is preceded by a tabulator and a module progress sign ('>>>'). |br|
Additionally, the function starts a time counter. |br|
The function takes care of the proper coloring of the console output. |br|
The function returns a progress bar dictionary. |br|
>>> console.progress_bar_start('The module X:')
gives an output:
:bash:`| > The module X:`
Args:
strInfo (string): info to be printed
iPrintIter (integer): print a step after 'iPrintIter' iterations
iMilestone (integer): print X after 'iMilestone' iterations
iLineBreak (integer): break the line after 'iLineBreak' iterations
bPrintSteps (integer): 0 - do not print the number of iterations at the end
1 - print the number of iterations at the end
bIteration0 (integer): 0 - iteration #0 is not allowed
1 - iteration #0 is allowed
bPrintTime (integer): 0 - do not print time at all
1 - print time and average time for the last
iteration (excluding iteration 0)
Returns:
dBar (dictionary): data with the progress bar
"""
# Correct the input arguments
iPrintIter = int(round(iPrintIter))
iMilestone = int(round(iMilestone))
iLineBreak = int(round(iLineBreak))
# Check if the settings are correct
if iMilestone % iPrintIter != 0:
strError = '\'iMilestone\' must be a multiplication of \'iPrintIter\'! (%d is not a multiplication of %d)!' \
% (iMilestone, iPrintIter)
raise ValueError(strError)
if iLineBreak % iMilestone != 0:
strError = '\'iLineBreak\' must be a multiplication of \'iMilestone\'! (%d is not a multiplication of %d)!' \
% (iLineBreak, iMilestone)
raise ValueError(strError)
#----------------------------------
# Construct the output dictionary
dBar = dict()
dBar['bActive'] = 1
dBar['iInfoLen'] = len(strInfo) # Length of the info string
dBar['iPrintIter'] = iPrintIter
dBar['iMilestone'] = iMilestone
dBar['iLineBreak'] = iLineBreak
dBar['bPrintSteps'] = bPrintSteps
dBar['bIteration0'] = bIteration0
dBar['bPrintTime'] = bPrintTime
# Start iterations
if bIteration0 == 0:
dBar['iLastIter'] = 0
else:
dBar['iLastIter'] = -1
# Construct a new line tabulator
if bIteration0 == 0:
dBar['strNewLine'] = '\n ' + (' ' * dBar['iInfoLen'])
else:
dBar['strNewLine'] = '\n ' + (' ' * (dBar['iInfoLen'] + 1))
#----------------------------------
# Begin a progress bar
sys.stdout.write(_colors('PROGRESS') + '\n >>> ' + _colors('ENDC'))
sys.stdout.write(strInfo + ' ')
sys.stdout.flush()
# Start the timer, if needed
if bPrintTime == 1:
tStart = time.time()
dBar['tStart'] = tStart
return dBar
def progress_bar(dBar, iIter):
"""
Function prints printing bar
Args:
dBar (string): info to be printed
iPrintIter (integer): print a step after 'iPrintIter' iterations
Returns:
dBar (dictionary): data with the progress bar
iIter (integer): the current iteration
"""
# Is the bar still actve
if dBar['bActive'] == 0:
return dBar
# Make iterations a round integer, in any case
iIter = int(round(iIter))
# Is it the end of the story?
if iIter < 0:
dBar['bActive'] = 0
if dBar['bPrintSteps'] == 1:
strMessage = ' (%d) ' % (dBar['iLastIter'])
sys.stdout.write(strMessage)
sys.stdout.flush()
if dBar['bPrintTime'] == 1:
sys.stdout.write(dBar['strNewLine'])
tTime = time.time() - dBar['tStart'] # Measure the time
strMessage = progress_bar_time(tTime, dBar['iLastIter'])
sys.stdout.write(strMessage)
sys.stdout.flush()
return dBar
# Was this iteration already given?
if iIter <= dBar['iLastIter']:
return dBar
iPreviousLastIter = dBar['iLastIter']
dBar['iLastIter'] = iIter # Mark the current iteration as the last iteration
# Loop over all the iterations
for iIter in range(iPreviousLastIter + 1, iIter + 1):
if iIter == 0:
if dBar['bIteration0'] == 1:
sys.stdout.write(_colors('PROGRESS') + '0' + _colors('ENDC'))
return dBar
elif (iIter % dBar['iMilestone']) == 0:
sys.stdout.write(_colors('PROGRESS') + 'X' + _colors('ENDC'))
sys.stdout.flush()
elif (iIter % dBar['iPrintIter']) == 0:
sys.stdout.write('.')
sys.stdout.flush()
# Break the line, if it is needed
if (iIter % dBar['iLineBreak']) == 0:
sys.stdout.write(dBar['strNewLine'])
sys.stdout.flush()
return dBar
def progress_bar_time(tTime, iIter):
"""
Time service for the progress bar.
"""
iHour = 3600
iMin = 60
strMessage = 'Total time = %.1f [s]' % (tTime)
# Hours
if tTime >= 1 * iHour:
nHours = np.floor(tTime / iHour)
tTimeSec = tTime - nHours * iHour
if nHours == 1:
strMessage = strMessage + ' (%d [hour]' % (nHours)
else:
strMessage = strMessage + ' (%d [hours]' % (nHours)
if tTimeSec >= 1 * iMin:
nMins = np.floor(tTimeSec / iMin)
tTimeSec = tTimeSec - nMins * iMin
strMessage = strMessage + ' %d [mins]' % (nMins)
strMessage = strMessage + ' %.1f [sec])' % (tTimeSec)
# Minutes
elif tTime >= 10 * iMin:
nMins = np.floor(tTime / iMin)
tTimeSec = tTime - nMins * iMin
strMessage = strMessage + ' (%d [mins]' % (nMins)
strMessage = strMessage + ' %.1f [sec])' % (tTimeSec)
# One iteration
tTimeIter = tTime / iIter
# Microseconds
if tTimeIter < 1e-3:
strMessage = strMessage + ' (%.1f [us] p. iteration)' % (tTimeIter * 1e6)
# Miliseconds
elif tTimeIter < 1:
strMessage = strMessage + ' (%.3f [ms] p. iteration)' % (tTimeIter * 1e3)
else:
strMessage = strMessage + ' (%.3f [s] p. iteration)' % (tTimeIter)
return strMessage
# =====================================================================
# Finish the module progress print + print the tme of execution
# =====================================================================
def module_progress_done(tStart):
"""
Function adds 'done' to a console message previously printed by a
'module_progress' function. |br|
Additionally, the function print an info about an execution time of a
module, based on the time stamp of the start of the module. |br|
The function takes care of the proper coloring of the console output. |br|
>>> tStart = console.module_progress('The module X is starting')
>>> time.sleep(1)
>>> console.module_progress_done(tStart)
gives an output:
:bash:`| > The module X is starting...done in 1.00 seconds`
Args:
tStart (float): time stamp of the start
Returns:
nothing
"""
# Measure the time
tTime = time.time() - tStart
if (tTime < 1) and (tTime >= 1e-3): # Miliseconds range
tTime = tTime * 1e3
strTime = ('done in %.2f ms') % (tTime)
elif (tTime < 1e-3) and (tTime >= 1e-6): # Microseconds range
tTime = tTime * 1e6
strTime = ('done in %.2f us') % (tTime)
else:
strTime = ('done in %.2f s') % (tTime)
sys.stdout.write(_colors('OK') + strTime + _colors('ENDC') + '\n\n\n')
sys.stdout.flush()
return
# =====================================================================
# Finish the module progress print + print the time of execution
# (with 1 newline instead of 3)
# =====================================================================
def module_progress_doneNoNew(tStart):
"""
Function adds 'done' to a console message previously printed by a
'module_progress' function. |br|
Additionally, the function print an info about an execution time of a
module, based on the time stamp of the start of the module. |br|
This function do not add new lines after 'done'.
The function takes care of the proper coloring of the console output. |br|
>>> tStart = console.module_progress('The module X is starting')
>>> time.sleep(1)
>>> console.module_progress_doneNoNew(tStart)
gives an output:
:bash:`| > The module X is starting...done in 1.00 seconds`
Args:
tStart (float): time stamp of the start
Returns:
nothing
"""
# Measure the time
tTime = time.time() - tStart
strTime = ('done in %.2f seconds') % (tTime)
sys.stdout.write(_colors('OK') + strTime + _colors('ENDC') + '\n')
sys.stdout.flush()
return
# =====================================================================
# Print a warning
# =====================================================================
def warning(strWarn):
"""
Function prints a warning preceded by a proper tabulator. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.warning('Mind the gap!')
:bash:`| Mind the gap!`
Args:
strWarn (string): warning to be printed
Returns:
nothing
"""
# Add a tabulator to the warning message
strWarn = ('\n %s') % (strWarn)
# Write the warning
sys.stdout.write(_colors('WARN'))
sys.stdout.write(strWarn)
sys.stdout.write(_colors('ENDC') + '\n')
sys.stdout.flush()
return
# =====================================================================
# Print information
# =====================================================================
def info(strInfo):
"""
Function prints an info preceded by a proper tabulator. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.info('Very important info')
:bash:`| Very important info`
Args:
strInfo (string): info to be printed
Returns:
nothing
"""
# Add a tabulator to the info message
strInfo = ('\n %s') % (strInfo)
# Write the info
sys.stdout.write(_colors('INFO'))
sys.stdout.write(strInfo)
sys.stdout.write(_colors('ENDC') + '\n')
sys.stdout.flush()
return
# =====================================================================
# Print a bullet + information description + ':' + information
# =====================================================================
def bullet_info(strDesc, strInfo):
"""
Function prints an info preceded by a proper tabulator, an info
bullet '*' and a description of the info. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.bullet_info('Please remeber', 'mind the gap!')
gives an output
:bash:`| * Please remeber: mind the gap!`
Args:
strDesc (string): description of the info |br|
strInfo (string): info to be printed
Returns:
nothing
"""
# Write the tabulator with a bullet
sys.stdout.write('\n' + _colors('BULLET') + ' * ' + _colors('ENDC'))
# Write the description
sys.stdout.write(strDesc + ': ')
# Write the info
sys.stdout.write(_colors('BULLET_INFO'))
sys.stdout.write(strInfo)
sys.stdout.write(_colors('ENDC'))
sys.stdout.write('\n')
sys.stdout.flush()
return
# =====================================================================
# Print a note (an information without coloring)
# =====================================================================
def note(strNote):
"""
Function prints a note preceded by a proper tabulator. |br|
There is no coloring of the output. |br|
>>> console.note('mind the gap!')
:bash:`| mind the gap!`
Args:
strInfo (string): info to be printed
Returns:
nothing
"""
# Add a tabulator to the info message
strNote = ('\n %s') % (strNote)
# Write the info
sys.stdout.write(strNote)
sys.stdout.write('\n')
sys.stdout.flush()
return
# =====================================================================
# Print name of the parameter + the parameter
# =====================================================================
def param(strName, iVal, strForm, strUnit):
"""
Function prints a parameter and a parameter unit.
The parameter is preceeded by a tabulator and a parameter name. |br|
The parameter value is recalculated to a requested order of magnitude,
or the function may decide itself about the order of magnitude. The
formatting string (3rd parameter) controls the order of magnitude of
a printed value. If it contains the '-' character, the function will
decide about an order of magnitude. If it contains a magnitude unit
symbol, the function recalculates the value to the given order of
magnitude. |br|
The formatting string (3rd parameter) must contain one or two
characters. If there are two characters, the value is printed in two
orders of magnitude, second is in the parantheses. |br|
Available symbols of orders of magnitude:
(femto): 'f' |br|
(pico): 'p' |br|
(nano): 'n' |br|
(micro): 'u' |br|
(mili): 'm' |br|
(none): ' ' |br|
(kilo): 'k' |br|
(Mega): 'M' |br|
(Giga): 'G' |br|
(Tera): 'T' |br|
(second) 's' |br|
(hour): 'h' |br|
|br|
If the first character in the formatting string is 's', then the
parameter is treated as time expressed in seconds. In this case
the second character may either not exists in the string, or be equal
to 'h'. In the latter case the time will be also expressed in hours. |br|
The last argument is a unit name which will be printed after the values
of the paramter. If the first character in the formatting string is
's', then the last argument shuld be empty. |br|
The function takes care of the proper coloring of the console output. |br|
Usage examples:
>>> console.param('Size of a hard drive',500*1e9,'G ','bytes')
:bash:`| Size of a hard drive: 500.000 G (500000000000) [bytes]`
>>> console.param('Dist. from Aalborg to Auckland',10889,'k ','miles')
:bash:`| Dist. from Aalborg to Auckland: 10.889 k (10889) [miles]`
>>> console.param('The number of people in DK',5627235,'k-','souls')
:bash:`| The number of people in DK: 5627.235 k (5.627 M) [souls]`
>>> console.param('>E.T.< running time',115*60,'sh','')
:bash:`| >E.T< running time: 6900.0 [seconds] (1.92 [hours])`
>>> console.param('Honda Civic Type R 0-60',6.6,'s','')
:bash:`| Honda Civic Type R 0-60: 6.6 [seconds]`
Args:
strName (string): name of the parameter |br|
iVal (float): value |br|
strForm (string): format string |br|
strUnit (string): unit |br|
Returns:
nothing
"""
# Write the tabulator
sys.stdout.write(' ')
# Run the engine of parameter print
_param(strName, iVal, strForm, strUnit)
return
# =====================================================================
# Print a bullet + name of the parameter + the parameter
# =====================================================================
def bullet_param(strName, iVal, strForm, strUnit):
"""
Function prints a parameter preceded by a proper tabulator, a bullet
and a parameter name. |br|
The function is identical to the previous 'param' function, the only
difference is a bullet added before the parameter name. Please refer
to the 'param' function for description of the function and its input
parameters. |br|
"""
# Write the tabulator with a bullet
sys.stdout.write('\n' + _colors('BULLET') + ' * ' + _colors('ENDC'))
# Run the engine of parameter print
_param(strName, iVal, strForm, strUnit)
return
# =====================================================================
# The engine of parameter print
# =====================================================================
def _param(strName, iVal, strForm, strUnit):
"""
It is an engine of the formated parameter printing. |br|
The input to the fuctcion is identical to the previous 'param' function.
Please refer to the 'param' function for description of the function and
its input parameters. |br|
"""
# The name of the function (for error purposes)
strFunc = 'rxcs.console._param'
# ----------------------------------------------------------------
# Write the parameter name
sys.stdout.write(strName + ': ')
# Check the length of the format string, it should be 1 or 2
lForm = len(strForm)
if lForm < 1 or lForm > 2:
strErr = strFunc + ' : '
strErr = strErr + ('Parameter format string must be 1 or 2 characters')
raise Exception(strErr)
# ----------------------------------------------------------------
# Recalculate the unit to coefficient, if it is asked for
if strForm[0] == '-': # <- the function should recalculate the unit
(iCoef, strUnitRecalc) = _val2unit(iVal)
elif strForm[0] == 's': # <- the parameter contains seconds
_param_time_write(iVal, strForm)
return
else: # <- there is a correct unit already given
# Get the name of the magnitude unit
strUnitRecalc = strForm[0]
# Get the correct coefficient for the 2nd representation
iCoef = _unit2coef(strUnitRecalc)
# Recalculate the value of the parameter
iVal_recal = iVal / iCoef
# Create a string with value
if iVal == 0: # <- the value is zero
strVal = '0'
elif iCoef == 1: # <- there is no need to recalculate the value
# Put the number as it is, but pay attention if it is float or int
if isinstance(iVal, int):
strVal = ('%d') % (iVal_recal)
else:
strVal = ('%.3f') % (iVal_recal)
elif np.isinf(iCoef): # <- the value is an infinite
strVal = ('inf')
else: # <- the value should be recalculated
strVal = ('%.3f %s') % (iVal_recal, strUnitRecalc)
# Write the value
sys.stdout.write(_colors('PARAM') + strVal + _colors('ENDC') + ' ')
# ----------------------------------------------------------------
# 2nd representation:
# If the string has 2 characters, print also the recalculated number
# (the 2nd representation)
if lForm == 2:
# Check if the user wants it to be recalculated to a given magnitude
# or the function should decide
if strForm[1] == '-': # <- the function should decide
# Get the correct coefficient and magnitude unit
(iCoef2, strUnit2Recalc) = _val2unit(iVal)
else: # <- the user gives the magnitude representation
# Get the name of the magnitude unit
strUnit2Recalc = strForm[1]
# Get the correct coefficient for the 2nd representation
iCoef2 = _unit2coef(strUnit2Recalc)
# If the magnitudes are identical, do no print the 2nd representation
if iCoef != iCoef2:
# Recalculate the value to the 2nd representation
iVal_2Rep = iVal / iCoef2
# Create the string with the 2nd representation
if iCoef2 == 1:
strVal2 = ('%d') % (iVal_2Rep)
else:
strVal2 = ('%.3f %s') % (iVal_2Rep, strUnit2Recalc)
# Print out the 2nd representation
sys.stdout.write('(')
sys.stdout.write(_colors('PARAM') + strVal2 + _colors('ENDC'))
sys.stdout.write(')' + ' ')
# ----------------------------------------------------------------
# Print the unit, if it is not empty
lUnit = len(strUnit)
if lUnit > 0:
sys.stdout.write(_colors('PARAM'))
sys.stdout.write('[' + strUnit + ']')
sys.stdout.write(_colors('ENDC'))
# ----------------------------------------------------------------
sys.stdout.write('\n')
return
# =====================================================================
# The engine of time paramer print
# =====================================================================
def _param_time_write(iVal, strForm):
"""
It is an engine of the formated time parameter printing. |br|
Args:
iVal (float): value
strForm (string): format string
Returns:
nothing
"""
# The name of the function (for error purposes)
strFunc = 'rxcs.console._param_time_write'
# ----------------------------------------------------------------
# Create a string with seconds
strSeconds = ('%.1f [seconds]') % (iVal)
# Print the seconds
sys.stdout.write(_colors('PARAM') + strSeconds + _colors('ENDC') + ' ')
# Get the length of the format string
lForm = len(strForm)
# ----------------------------------------------------------------
# Add an info about the hours, if needed
if lForm == 2:
if not (strForm[1] == 'h'):
strErr = strFunc + ' : '
strErr = strErr + ('If the first argument with parameter format ')
strErr = strErr + (' is >s< then the second must be >h< or empty!')
raise Exception(strErr)
# Recalculate seconds to hours and create a propoer string with hours
iHours = iVal / 3600
strHours = ('%.2f [hours]') % (iHours)
# Print the hours
sys.stdout.write('(')
sys.stdout.write(_colors('PARAM') + strHours + _colors('ENDC'))
sys.stdout.write(')')
# ----------------------------------------------------------------
sys.stdout.write('\n')
return
# =====================================================================
# Recalculate a unit symbol to a unit coefficient
# =====================================================================
def _unit2coef(strUnit):
"""
Function returns a unit coefficient based on a unit symbol.
Available unit names, symbols and coefficients:
(femto): 'f' = 1e-15
(pico): 'p' = 1e-12
(nano): 'n' = 1e-9
(micro): 'u' = 1e-6
(mili): 'm' = 1e-3
(none): ' ' = 1
(kilo): 'k' = 1e3
(Mega): 'M' = 1e6
(Giga): 'G' = 1e9
(Tera): 'T' = 1e12
(hour): 'h' = 3600
Args:
strUnit (string): key of the unit
Returns:
iCoef (int): unit coefficient
"""
# The name of the function (for error purposes)
strFunc = 'rxcs.console._unit2coef'
# ----------------------------------------------------------------
# femto
if strUnit == 'f':
iCoef = 1e-15
# pico
elif strUnit == 'p':
iCoef = 1e-12
# nano
elif strUnit == 'n':
iCoef = 1e-9
# micro
elif strUnit == 'u':
iCoef = 1e-6
# mili
elif strUnit == 'm':
iCoef = 1e-3
# none
elif strUnit == ' ':
iCoef = 1
# kilo
elif strUnit == 'k':
iCoef = 1e3
# Mega
elif strUnit == 'M':
iCoef = 1e6
# Giga
elif strUnit == 'G':
iCoef = 1e9
# Tera
elif strUnit == 'T':
iCoef = 1e12
# hour
elif strUnit == 'h':
iCoef = 3600
# ----------------------------------------------------------------
# Unknown unit
else:
strErr = strFunc + ' : '
strErr = strErr + ('> %s < is an unknown unit symbol') % (strUnit)
raise Exception(strErr)
# ----------------------------------------------------------------
return iCoef
# =====================================================================
# Recalculate a value to a unit symbol and a unit coefficient
# =====================================================================
def _val2unit(iVal):
"""
Function returns the unit coefficient and a unit symbol.
Args:
iVal (float): value
Returns:
iCoef (int): unit coefficient
strUnit (string): unit symbol
"""
# femto
if iVal < 1e-12:
iCoef = 1e-15
strUnit = 'f'
# pico
elif iVal < 1e-9:
iCoef = 1e-12
strUnit = 'p'
# nano
elif iVal < 1e-6:
iCoef = 1e-9
strUnit = 'n'
# micro
elif iVal < 1e-3:
iCoef = 1e-6
strUnit = 'u'
# mili
elif iVal < 1:
iCoef = 1e-3
strUnit = 'm'
# none
elif iVal < 1e3:
iCoef = 1
strUnit = ' '
# kilo
elif iVal < 1e6:
iCoef = 1e3
strUnit = 'k'
# Mega
elif iVal < 1e9:
iCoef = 1e6
strUnit = 'M'
# Giga
elif iVal < 1e12:
iCoef = 1e9
strUnit = 'G'
# Infinite
elif np.isinf(iVal):
iCoef = np.inf
strUnit = ''
# Tera
else:
iCoef = 1e12
strUnit = 'T'
# ----------------------------------------------------------------
return (iCoef, strUnit)
# =====================================================================#
# Colors dictionary
# =====================================================================
def _colors(strKey):
"""
Function gives access to the RxCS console colors dictionary. The
Function returns a proper console color formating string (ANSI colors)
based on the key given to the function. |br|
Available keys:
'PURPLE'
'BLUE'
'GREEN'
'YELLOW'
'RED'
'BLACK'
'DARK_MAGENTA'
'AQUA'
'BLUE_BG'
'DARK_BLUE'
'DARK_GREEN'
'GREY30'
'GREY70'
'PROGRESS' -> color for progress signs ('>>>', '>>', '>')
'INFO' -> color for info messages
'BULLET_INFO' -> color for bullet info messages
'BULLET' -> color for bullets ('*')
'WARN' -> color for warning messages
'PARAM' -> color for parameters printing
'OK' -> color for good messages
'ENDC' -> console formatting string which switches of
the coloring
Args:
strKey (string): key of the color
Returns:
strColor (string): console color formating string
"""
# Define colors
dColors = {}
dColors['PURPLE'] = '\033[95m'
dColors['BLUE'] = '\033[94m'
dColors['GREEN'] = '\033[92m'
dColors['YELLOW'] = '\033[93m'
dColors['RED'] = '\033[91m'
dColors['BLACK'] = '\033[30m'
dColors['DARK_MAGENTA'] = '\033[35m'
dColors['AQUA'] = '\033[96m'
dColors['BLUE_BG'] = '\033[44m'
dColors['DARK_BLUE'] = '\033[34m'
dColors['DARK_GREEN'] = '\033[32m'
dColors['GREY30'] = '\033[30m'
dColors['GREY70'] = '\033[97m'
# Define colors for communication
dColors['PROGRESS'] = dColors['DARK_MAGENTA']
dColors['INFO'] = dColors['DARK_GREEN']
dColors['BULLET_INFO'] = dColors['AQUA']
dColors['BULLET'] = dColors['DARK_MAGENTA']
dColors['WARN'] = dColors['RED']
dColors['PARAM'] = dColors['AQUA']
dColors['OK'] = dColors['DARK_GREEN']
dColors['ENDC'] = '\033[0m'
# Return the correct color
strColor = dColors[strKey]
return strColor
| bsd-2-clause | -31,688,921,326,145,204 | 28.335043 | 117 | 0.519259 | false | 3.952326 | false | false | false |
jptomo/rpython-lang-scheme | rpython/rtyper/rbuiltin.py | 1 | 28498 | from collections import OrderedDict
from rpython.annotator import model as annmodel
from rpython.flowspace.model import Constant
from rpython.rlib import rarithmetic, objectmodel
from rpython.rtyper import raddress, rptr, extregistry, rrange
from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem import lltype, llmemory, rstr
from rpython.rtyper import rclass
from rpython.rtyper.rmodel import Repr
from rpython.tool.pairtype import pairtype
BUILTIN_TYPER = {}
def typer_for(func):
def wrapped(rtyper_func):
BUILTIN_TYPER[func] = rtyper_func
return rtyper_func
return wrapped
class __extend__(annmodel.SomeBuiltin):
def rtyper_makerepr(self, rtyper):
if not self.is_constant():
raise TyperError("non-constant built-in function!")
return BuiltinFunctionRepr(self.const)
def rtyper_makekey(self):
const = getattr(self, 'const', None)
if extregistry.is_registered(const):
const = extregistry.lookup(const)
return self.__class__, const
class __extend__(annmodel.SomeBuiltinMethod):
def rtyper_makerepr(self, rtyper):
assert self.methodname is not None
result = BuiltinMethodRepr(rtyper, self.s_self, self.methodname)
return result
def rtyper_makekey(self):
# NOTE: we hash by id of self.s_self here. This appears to be
# necessary because it ends up in hop.args_s[0] in the method call,
# and there is no telling what information the called
# rtype_method_xxx() will read from that hop.args_s[0].
# See test_method_join in test_rbuiltin.
# There is no problem with self.s_self being garbage-collected and
# its id reused, because the BuiltinMethodRepr keeps a reference
# to it.
return (self.__class__, self.methodname, id(self.s_self))
def call_args_expand(hop):
hop = hop.copy()
from rpython.annotator.argument import ArgumentsForTranslation
arguments = ArgumentsForTranslation.fromshape(
hop.args_s[1].const, # shape
range(hop.nb_args-2))
assert arguments.w_stararg is None
keywords = arguments.keywords
# prefix keyword arguments with 'i_'
kwds_i = {}
for key in keywords:
kwds_i['i_' + key] = keywords[key]
return hop, kwds_i
class BuiltinFunctionRepr(Repr):
lowleveltype = lltype.Void
def __init__(self, builtinfunc):
self.builtinfunc = builtinfunc
def findbltintyper(self, rtyper):
"Find the function to use to specialize calls to this built-in func."
try:
return BUILTIN_TYPER[self.builtinfunc]
except (KeyError, TypeError):
pass
if extregistry.is_registered(self.builtinfunc):
entry = extregistry.lookup(self.builtinfunc)
return entry.specialize_call
raise TyperError("don't know about built-in function %r" % (
self.builtinfunc,))
def _call(self, hop2, **kwds_i):
bltintyper = self.findbltintyper(hop2.rtyper)
hop2.llops._called_exception_is_here_or_cannot_occur = False
v_result = bltintyper(hop2, **kwds_i)
if not hop2.llops._called_exception_is_here_or_cannot_occur:
raise TyperError("missing hop.exception_cannot_occur() or "
"hop.exception_is_here() in %s" % bltintyper)
return v_result
def rtype_simple_call(self, hop):
hop2 = hop.copy()
hop2.r_s_popfirstarg()
return self._call(hop2)
def rtype_call_args(self, hop):
# calling a built-in function with keyword arguments:
# mostly for rpython.objectmodel.hint()
hop, kwds_i = call_args_expand(hop)
hop2 = hop.copy()
hop2.r_s_popfirstarg()
hop2.r_s_popfirstarg()
# the RPython-level keyword args are passed with an 'i_' prefix and
# the corresponding value is an *index* in the hop2 arguments,
# to be used with hop.inputarg(arg=..)
return self._call(hop2, **kwds_i)
class BuiltinMethodRepr(Repr):
def __init__(self, rtyper, s_self, methodname):
self.s_self = s_self
self.self_repr = rtyper.getrepr(s_self)
self.methodname = methodname
# methods of a known name are implemented as just their 'self'
self.lowleveltype = self.self_repr.lowleveltype
def convert_const(self, obj):
return self.self_repr.convert_const(obj.__self__)
def rtype_simple_call(self, hop):
# methods: look up the rtype_method_xxx()
name = 'rtype_method_' + self.methodname
try:
bltintyper = getattr(self.self_repr, name)
except AttributeError:
raise TyperError("missing %s.%s" % (
self.self_repr.__class__.__name__, name))
# hack based on the fact that 'lowleveltype == self_repr.lowleveltype'
hop2 = hop.copy()
assert hop2.args_r[0] is self
if isinstance(hop2.args_v[0], Constant):
c = hop2.args_v[0].value # get object from bound method
c = c.__self__
hop2.args_v[0] = Constant(c)
hop2.args_s[0] = self.s_self
hop2.args_r[0] = self.self_repr
return bltintyper(hop2)
class __extend__(pairtype(BuiltinMethodRepr, BuiltinMethodRepr)):
def convert_from_to((r_from, r_to), v, llops):
# convert between two MethodReprs only if they are about the same
# methodname. (Useful for the case r_from.s_self == r_to.s_self but
# r_from is not r_to.) See test_rbuiltin.test_method_repr.
if r_from.methodname != r_to.methodname:
return NotImplemented
return llops.convertvar(v, r_from.self_repr, r_to.self_repr)
def parse_kwds(hop, *argspec_i_r):
lst = [i for (i, r) in argspec_i_r if i is not None]
lst.sort()
if lst != range(hop.nb_args - len(lst), hop.nb_args):
raise TyperError("keyword args are expected to be at the end of "
"the 'hop' arg list")
result = []
for i, r in argspec_i_r:
if i is not None:
if r is None:
r = hop.args_r[i]
result.append(hop.inputarg(r, arg=i))
else:
result.append(None)
del hop.args_v[hop.nb_args - len(lst):]
return result
# ____________________________________________________________
@typer_for(bool)
def rtype_builtin_bool(hop):
# not called any more?
assert hop.nb_args == 1
return hop.args_r[0].rtype_bool(hop)
@typer_for(int)
def rtype_builtin_int(hop):
if isinstance(hop.args_s[0], annmodel.SomeString):
assert 1 <= hop.nb_args <= 2
return hop.args_r[0].rtype_int(hop)
assert hop.nb_args == 1
return hop.args_r[0].rtype_int(hop)
@typer_for(float)
def rtype_builtin_float(hop):
assert hop.nb_args == 1
return hop.args_r[0].rtype_float(hop)
@typer_for(chr)
def rtype_builtin_chr(hop):
assert hop.nb_args == 1
return hop.args_r[0].rtype_chr(hop)
@typer_for(unichr)
def rtype_builtin_unichr(hop):
assert hop.nb_args == 1
return hop.args_r[0].rtype_unichr(hop)
@typer_for(unicode)
def rtype_builtin_unicode(hop):
return hop.args_r[0].rtype_unicode(hop)
@typer_for(bytearray)
def rtype_builtin_bytearray(hop):
return hop.args_r[0].rtype_bytearray(hop)
@typer_for(list)
def rtype_builtin_list(hop):
return hop.args_r[0].rtype_bltn_list(hop)
#def rtype_builtin_range(hop): see rrange.py
#def rtype_builtin_xrange(hop): see rrange.py
#def rtype_builtin_enumerate(hop): see rrange.py
#def rtype_r_dict(hop): see rdict.py
@typer_for(rarithmetic.intmask)
def rtype_intmask(hop):
hop.exception_cannot_occur()
vlist = hop.inputargs(lltype.Signed)
return vlist[0]
@typer_for(rarithmetic.longlongmask)
def rtype_longlongmask(hop):
hop.exception_cannot_occur()
vlist = hop.inputargs(lltype.SignedLongLong)
return vlist[0]
@typer_for(min)
def rtype_builtin_min(hop):
v1, v2 = hop.inputargs(hop.r_result, hop.r_result)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_min, v1, v2)
def ll_min(i1, i2):
if i1 < i2:
return i1
return i2
@typer_for(max)
def rtype_builtin_max(hop):
v1, v2 = hop.inputargs(hop.r_result, hop.r_result)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_max, v1, v2)
def ll_max(i1, i2):
if i1 > i2:
return i1
return i2
@typer_for(reversed)
def rtype_builtin_reversed(hop):
hop.exception_cannot_occur()
return hop.r_result.newiter(hop)
@typer_for(getattr(object.__init__, 'im_func', object.__init__))
def rtype_object__init__(hop):
hop.exception_cannot_occur()
@typer_for(getattr(EnvironmentError.__init__, 'im_func',
EnvironmentError.__init__))
def rtype_EnvironmentError__init__(hop):
hop.exception_cannot_occur()
v_self = hop.args_v[0]
r_self = hop.args_r[0]
if hop.nb_args <= 2:
v_errno = hop.inputconst(lltype.Signed, 0)
if hop.nb_args == 2:
v_strerror = hop.inputarg(rstr.string_repr, arg=1)
r_self.setfield(v_self, 'strerror', v_strerror, hop.llops)
else:
v_errno = hop.inputarg(lltype.Signed, arg=1)
v_strerror = hop.inputarg(rstr.string_repr, arg=2)
r_self.setfield(v_self, 'strerror', v_strerror, hop.llops)
if hop.nb_args >= 4:
v_filename = hop.inputarg(rstr.string_repr, arg=3)
r_self.setfield(v_self, 'filename', v_filename, hop.llops)
r_self.setfield(v_self, 'errno', v_errno, hop.llops)
try:
WindowsError
except NameError:
pass
else:
@typer_for(
getattr(WindowsError.__init__, 'im_func', WindowsError.__init__))
def rtype_WindowsError__init__(hop):
hop.exception_cannot_occur()
if hop.nb_args == 2:
raise TyperError("WindowsError() should not be called with "
"a single argument")
if hop.nb_args >= 3:
v_self = hop.args_v[0]
r_self = hop.args_r[0]
v_error = hop.inputarg(lltype.Signed, arg=1)
r_self.setfield(v_self, 'winerror', v_error, hop.llops)
@typer_for(objectmodel.hlinvoke)
def rtype_hlinvoke(hop):
_, s_repr = hop.r_s_popfirstarg()
r_callable = s_repr.const
r_func, nimplicitarg = r_callable.get_r_implfunc()
s_callable = r_callable.get_s_callable()
nbargs = len(hop.args_s) - 1 + nimplicitarg
s_sigs = r_func.get_s_signatures((nbargs, (), False))
if len(s_sigs) != 1:
raise TyperError("cannot hlinvoke callable %r with not uniform"
"annotations: %r" % (r_callable,
s_sigs))
args_s, s_ret = s_sigs[0]
rinputs = [hop.rtyper.getrepr(s_obj) for s_obj in args_s]
rresult = hop.rtyper.getrepr(s_ret)
args_s = args_s[nimplicitarg:]
rinputs = rinputs[nimplicitarg:]
new_args_r = [r_callable] + rinputs
for i in range(len(new_args_r)):
assert hop.args_r[i].lowleveltype == new_args_r[i].lowleveltype
hop.args_r = new_args_r
hop.args_s = [s_callable] + args_s
hop.s_result = s_ret
assert hop.r_result.lowleveltype == rresult.lowleveltype
hop.r_result = rresult
return hop.dispatch()
typer_for(range)(rrange.rtype_builtin_range)
typer_for(xrange)(rrange.rtype_builtin_xrange)
typer_for(enumerate)(rrange.rtype_builtin_enumerate)
# annotation of low-level types
@typer_for(lltype.malloc)
def rtype_malloc(hop, i_flavor=None, i_zero=None, i_track_allocation=None,
i_add_memory_pressure=None):
assert hop.args_s[0].is_constant()
vlist = [hop.inputarg(lltype.Void, arg=0)]
opname = 'malloc'
kwds_v = parse_kwds(
hop,
(i_flavor, lltype.Void),
(i_zero, None),
(i_track_allocation, None),
(i_add_memory_pressure, None))
(v_flavor, v_zero, v_track_allocation, v_add_memory_pressure) = kwds_v
flags = {'flavor': 'gc'}
if v_flavor is not None:
flags['flavor'] = v_flavor.value
if i_zero is not None:
flags['zero'] = v_zero.value
if i_track_allocation is not None:
flags['track_allocation'] = v_track_allocation.value
if i_add_memory_pressure is not None:
flags['add_memory_pressure'] = v_add_memory_pressure.value
vlist.append(hop.inputconst(lltype.Void, flags))
assert 1 <= hop.nb_args <= 2
if hop.nb_args == 2:
vlist.append(hop.inputarg(lltype.Signed, arg=1))
opname += '_varsize'
hop.has_implicit_exception(MemoryError) # record that we know about it
hop.exception_is_here()
return hop.genop(opname, vlist, resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.free)
def rtype_free(hop, i_flavor, i_track_allocation=None):
vlist = [hop.inputarg(hop.args_r[0], arg=0)]
v_flavor, v_track_allocation = parse_kwds(hop,
(i_flavor, lltype.Void),
(i_track_allocation, None))
#
assert v_flavor is not None and v_flavor.value == 'raw'
flags = {'flavor': 'raw'}
if i_track_allocation is not None:
flags['track_allocation'] = v_track_allocation.value
vlist.append(hop.inputconst(lltype.Void, flags))
#
hop.exception_cannot_occur()
hop.genop('free', vlist)
@typer_for(lltype.render_immortal)
def rtype_render_immortal(hop, i_track_allocation=None):
vlist = [hop.inputarg(hop.args_r[0], arg=0)]
v_track_allocation = parse_kwds(hop,
(i_track_allocation, None))
hop.exception_cannot_occur()
if i_track_allocation is None or v_track_allocation.value:
hop.genop('track_alloc_stop', vlist)
@typer_for(lltype.typeOf)
@typer_for(lltype.nullptr)
@typer_for(lltype.getRuntimeTypeInfo)
@typer_for(lltype.Ptr)
def rtype_const_result(hop):
hop.exception_cannot_occur()
return hop.inputconst(hop.r_result.lowleveltype, hop.s_result.const)
@typer_for(lltype.cast_pointer)
def rtype_cast_pointer(hop):
assert hop.args_s[0].is_constant()
assert isinstance(hop.args_r[1], rptr.PtrRepr)
v_type, v_input = hop.inputargs(lltype.Void, hop.args_r[1])
hop.exception_cannot_occur()
return hop.genop('cast_pointer', [v_input], # v_type implicit in r_result
resulttype = hop.r_result.lowleveltype)
@typer_for(lltype.cast_opaque_ptr)
def rtype_cast_opaque_ptr(hop):
assert hop.args_s[0].is_constant()
assert isinstance(hop.args_r[1], rptr.PtrRepr)
v_type, v_input = hop.inputargs(lltype.Void, hop.args_r[1])
hop.exception_cannot_occur()
return hop.genop('cast_opaque_ptr', [v_input], # v_type implicit in r_result
resulttype = hop.r_result.lowleveltype)
@typer_for(lltype.length_of_simple_gcarray_from_opaque)
def rtype_length_of_simple_gcarray_from_opaque(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
v_opaque_ptr, = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('length_of_simple_gcarray_from_opaque', [v_opaque_ptr],
resulttype = hop.r_result.lowleveltype)
@typer_for(lltype.direct_fieldptr)
def rtype_direct_fieldptr(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
assert hop.args_s[1].is_constant()
vlist = hop.inputargs(hop.args_r[0], lltype.Void)
hop.exception_cannot_occur()
return hop.genop('direct_fieldptr', vlist,
resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.direct_arrayitems)
def rtype_direct_arrayitems(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('direct_arrayitems', vlist,
resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.direct_ptradd)
def rtype_direct_ptradd(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
vlist = hop.inputargs(hop.args_r[0], lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('direct_ptradd', vlist,
resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.cast_primitive)
def rtype_cast_primitive(hop):
assert hop.args_s[0].is_constant()
TGT = hop.args_s[0].const
v_type, v_value = hop.inputargs(lltype.Void, hop.args_r[1])
hop.exception_cannot_occur()
return gen_cast(hop.llops, TGT, v_value)
_cast_to_Signed = {
lltype.Signed: None,
lltype.Bool: 'cast_bool_to_int',
lltype.Char: 'cast_char_to_int',
lltype.UniChar: 'cast_unichar_to_int',
lltype.Float: 'cast_float_to_int',
lltype.Unsigned: 'cast_uint_to_int',
lltype.SignedLongLong: 'truncate_longlong_to_int',
}
_cast_from_Signed = {
lltype.Signed: None,
lltype.Char: 'cast_int_to_char',
lltype.UniChar: 'cast_int_to_unichar',
lltype.Float: 'cast_int_to_float',
lltype.Unsigned: 'cast_int_to_uint',
lltype.SignedLongLong: 'cast_int_to_longlong',
}
def gen_cast(llops, TGT, v_value):
ORIG = v_value.concretetype
if ORIG == TGT:
return v_value
if (isinstance(TGT, lltype.Primitive) and
isinstance(ORIG, lltype.Primitive)):
if ORIG in _cast_to_Signed and TGT in _cast_from_Signed:
op = _cast_to_Signed[ORIG]
if op:
v_value = llops.genop(op, [v_value], resulttype=lltype.Signed)
op = _cast_from_Signed[TGT]
if op:
v_value = llops.genop(op, [v_value], resulttype=TGT)
return v_value
elif ORIG is lltype.Signed and TGT is lltype.Bool:
return llops.genop('int_is_true', [v_value], resulttype=lltype.Bool)
else:
# use the generic operation if there is no alternative
return llops.genop('cast_primitive', [v_value], resulttype=TGT)
elif isinstance(TGT, lltype.Ptr):
if isinstance(ORIG, lltype.Ptr):
if (isinstance(TGT.TO, lltype.OpaqueType) or
isinstance(ORIG.TO, lltype.OpaqueType)):
return llops.genop('cast_opaque_ptr', [v_value], resulttype=TGT)
else:
return llops.genop('cast_pointer', [v_value], resulttype=TGT)
elif ORIG == llmemory.Address:
return llops.genop('cast_adr_to_ptr', [v_value], resulttype=TGT)
elif isinstance(ORIG, lltype.Primitive):
v_value = gen_cast(llops, lltype.Signed, v_value)
return llops.genop('cast_int_to_ptr', [v_value], resulttype=TGT)
elif TGT == llmemory.Address and isinstance(ORIG, lltype.Ptr):
return llops.genop('cast_ptr_to_adr', [v_value], resulttype=TGT)
elif isinstance(TGT, lltype.Primitive):
if isinstance(ORIG, lltype.Ptr):
v_value = llops.genop('cast_ptr_to_int', [v_value],
resulttype=lltype.Signed)
elif ORIG == llmemory.Address:
v_value = llops.genop('cast_adr_to_int', [v_value],
resulttype=lltype.Signed)
else:
raise TypeError("don't know how to cast from %r to %r" % (ORIG,
TGT))
return gen_cast(llops, TGT, v_value)
raise TypeError("don't know how to cast from %r to %r" % (ORIG, TGT))
@typer_for(lltype.cast_ptr_to_int)
def rtype_cast_ptr_to_int(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('cast_ptr_to_int', vlist,
resulttype=lltype.Signed)
@typer_for(lltype.cast_int_to_ptr)
def rtype_cast_int_to_ptr(hop):
assert hop.args_s[0].is_constant()
v_type, v_input = hop.inputargs(lltype.Void, lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('cast_int_to_ptr', [v_input],
resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.identityhash)
def rtype_identity_hash(hop):
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('gc_identityhash', vlist, resulttype=lltype.Signed)
@typer_for(lltype.runtime_type_info)
def rtype_runtime_type_info(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('runtime_type_info', vlist,
resulttype=hop.r_result.lowleveltype)
# _________________________________________________________________
# memory addresses
@typer_for(llmemory.raw_malloc)
def rtype_raw_malloc(hop):
v_size, = hop.inputargs(lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('raw_malloc', [v_size], resulttype=llmemory.Address)
@typer_for(llmemory.raw_malloc_usage)
def rtype_raw_malloc_usage(hop):
v_size, = hop.inputargs(lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('raw_malloc_usage', [v_size], resulttype=lltype.Signed)
@typer_for(llmemory.raw_free)
def rtype_raw_free(hop):
s_addr = hop.args_s[0]
if s_addr.is_null_address():
raise TyperError("raw_free(x) where x is the constant NULL")
v_addr, = hop.inputargs(llmemory.Address)
hop.exception_cannot_occur()
return hop.genop('raw_free', [v_addr])
@typer_for(llmemory.raw_memcopy)
def rtype_raw_memcopy(hop):
for s_addr in hop.args_s[:2]:
if s_addr.is_null_address():
raise TyperError("raw_memcopy() with a constant NULL")
v_list = hop.inputargs(llmemory.Address, llmemory.Address, lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('raw_memcopy', v_list)
@typer_for(llmemory.raw_memclear)
def rtype_raw_memclear(hop):
s_addr = hop.args_s[0]
if s_addr.is_null_address():
raise TyperError("raw_memclear(x, n) where x is the constant NULL")
v_list = hop.inputargs(llmemory.Address, lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('raw_memclear', v_list)
@typer_for(llmemory.offsetof)
def rtype_offsetof(hop):
TYPE, field = hop.inputargs(lltype.Void, lltype.Void)
hop.exception_cannot_occur()
return hop.inputconst(lltype.Signed,
llmemory.offsetof(TYPE.value, field.value))
# _________________________________________________________________
# non-gc objects
@typer_for(objectmodel.free_non_gc_object)
def rtype_free_non_gc_object(hop):
hop.exception_cannot_occur()
vinst, = hop.inputargs(hop.args_r[0])
flavor = hop.args_r[0].gcflavor
assert flavor != 'gc'
flags = {'flavor': flavor}
cflags = hop.inputconst(lltype.Void, flags)
return hop.genop('free', [vinst, cflags])
@typer_for(objectmodel.keepalive_until_here)
def rtype_keepalive_until_here(hop):
hop.exception_cannot_occur()
for v in hop.args_v:
hop.genop('keepalive', [v], resulttype=lltype.Void)
return hop.inputconst(lltype.Void, None)
@typer_for(llmemory.cast_ptr_to_adr)
def rtype_cast_ptr_to_adr(hop):
vlist = hop.inputargs(hop.args_r[0])
assert isinstance(vlist[0].concretetype, lltype.Ptr)
hop.exception_cannot_occur()
return hop.genop('cast_ptr_to_adr', vlist,
resulttype=llmemory.Address)
@typer_for(llmemory.cast_adr_to_ptr)
def rtype_cast_adr_to_ptr(hop):
assert isinstance(hop.args_r[0], raddress.AddressRepr)
adr, TYPE = hop.inputargs(hop.args_r[0], lltype.Void)
hop.exception_cannot_occur()
return hop.genop('cast_adr_to_ptr', [adr],
resulttype=TYPE.value)
@typer_for(llmemory.cast_adr_to_int)
def rtype_cast_adr_to_int(hop):
assert isinstance(hop.args_r[0], raddress.AddressRepr)
adr = hop.inputarg(hop.args_r[0], arg=0)
if len(hop.args_s) == 1:
mode = "emulated"
else:
mode = hop.args_s[1].const
hop.exception_cannot_occur()
return hop.genop('cast_adr_to_int',
[adr, hop.inputconst(lltype.Void, mode)],
resulttype=lltype.Signed)
@typer_for(llmemory.cast_int_to_adr)
def rtype_cast_int_to_adr(hop):
v_input, = hop.inputargs(lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('cast_int_to_adr', [v_input],
resulttype=llmemory.Address)
@typer_for(isinstance)
def rtype_builtin_isinstance(hop):
hop.exception_cannot_occur()
if hop.s_result.is_constant():
return hop.inputconst(lltype.Bool, hop.s_result.const)
if hop.args_s[1].is_constant() and hop.args_s[1].const in (str, list, unicode):
if hop.args_s[0].knowntype not in (str, list, unicode):
raise TyperError("isinstance(x, str/list/unicode) expects x to be known"
" statically to be a str/list/unicode or None")
rstrlist = hop.args_r[0]
vstrlist = hop.inputarg(rstrlist, arg=0)
cnone = hop.inputconst(rstrlist, None)
return hop.genop('ptr_ne', [vstrlist, cnone], resulttype=lltype.Bool)
assert isinstance(hop.args_r[0], rclass.InstanceRepr)
return hop.args_r[0].rtype_isinstance(hop)
@typer_for(objectmodel.instantiate)
def rtype_instantiate(hop, i_nonmovable=None):
hop.exception_cannot_occur()
s_class = hop.args_s[0]
assert isinstance(s_class, annmodel.SomePBC)
v_nonmovable, = parse_kwds(hop, (i_nonmovable, None))
nonmovable = (i_nonmovable is not None and v_nonmovable.value)
if len(s_class.descriptions) != 1:
# instantiate() on a variable class
if nonmovable:
raise TyperError("instantiate(x, nonmovable=True) cannot be used "
"if x is not a constant class")
vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper))
r_class = hop.args_r[0]
return r_class._instantiate_runtime_class(hop, vtypeptr,
hop.r_result.lowleveltype)
classdef = s_class.any_description().getuniqueclassdef()
return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops,
nonmovable=nonmovable)
@typer_for(hasattr)
def rtype_builtin_hasattr(hop):
hop.exception_cannot_occur()
if hop.s_result.is_constant():
return hop.inputconst(lltype.Bool, hop.s_result.const)
raise TyperError("hasattr is only suported on a constant")
@typer_for(OrderedDict)
@typer_for(objectmodel.r_dict)
@typer_for(objectmodel.r_ordereddict)
def rtype_dict_constructor(hop, i_force_non_null=None):
# 'i_force_non_null' is ignored here; if it has any effect, it
# has already been applied to 'hop.r_result'
hop.exception_cannot_occur()
r_dict = hop.r_result
cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
v_result = hop.gendirectcall(r_dict.ll_newdict, cDICT)
if r_dict.custom_eq_hash:
v_eqfn = hop.inputarg(r_dict.r_rdict_eqfn, arg=0)
v_hashfn = hop.inputarg(r_dict.r_rdict_hashfn, arg=1)
if r_dict.r_rdict_eqfn.lowleveltype != lltype.Void:
cname = hop.inputconst(lltype.Void, 'fnkeyeq')
hop.genop('setfield', [v_result, cname, v_eqfn])
if r_dict.r_rdict_hashfn.lowleveltype != lltype.Void:
cname = hop.inputconst(lltype.Void, 'fnkeyhash')
hop.genop('setfield', [v_result, cname, v_hashfn])
return v_result
# _________________________________________________________________
# weakrefs
import weakref
from rpython.rtyper.lltypesystem import llmemory
@typer_for(llmemory.weakref_create)
@typer_for(weakref.ref)
def rtype_weakref_create(hop):
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('weakref_create', vlist, resulttype=llmemory.WeakRefPtr)
@typer_for(llmemory.weakref_deref)
def rtype_weakref_deref(hop):
c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1])
assert v_wref.concretetype == llmemory.WeakRefPtr
hop.exception_cannot_occur()
return hop.genop('weakref_deref', [v_wref], resulttype=c_ptrtype.value)
@typer_for(llmemory.cast_ptr_to_weakrefptr)
def rtype_cast_ptr_to_weakrefptr(hop):
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('cast_ptr_to_weakrefptr', vlist,
resulttype=llmemory.WeakRefPtr)
@typer_for(llmemory.cast_weakrefptr_to_ptr)
def rtype_cast_weakrefptr_to_ptr(hop):
c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1])
assert v_wref.concretetype == llmemory.WeakRefPtr
hop.exception_cannot_occur()
return hop.genop('cast_weakrefptr_to_ptr', [v_wref],
resulttype=c_ptrtype.value)
| mit | -4,992,336,300,719,427,000 | 35.535897 | 84 | 0.634887 | false | 3.070574 | false | false | false |
nanuxbe/microbit-files | remote.py | 1 | 1244 | import radio
from microbit import button_a, button_b, display, Image, sleep, accelerometer
DIRECTIONS = [
(accelerometer.get_y, "backwards", "forwards"),
(accelerometer.get_x, "right", "left"),
]
ARROWS = {
"backwards": Image.ARROW_S,
"forwards": Image.ARROW_N,
"left": Image.ARROW_W,
"right": Image.ARROW_E,
}
SLEEP_TIME = 150
def get_direction(method, positive, negative):
value = (method() + 300) // 700
if value == 0:
return None
value = value // abs(value)
if value > 0:
return positive
return negative
radio.on()
while True:
rv = None
ct_a = button_a.get_presses()
if ct_a > 0:
rv = 'btn:a'
else:
ct_b = button_b.get_presses()
if ct_b > 0:
rv = 'btn:b'
if rv is not None:
print(rv)
radio.send(rv)
sleep(SLEEP_TIME)
value = None
for direction in DIRECTIONS:
value = get_direction(*direction)
if value is not None:
break
if value is None:
display.show(Image("00000:03730:07970:03730:00000"))
else:
display.show(ARROWS[value])
rv = "move:{}".format(value)
print(rv)
radio.send(rv)
sleep(SLEEP_TIME)
| mit | 4,887,598,229,450,565,000 | 20.448276 | 77 | 0.563505 | false | 3.181586 | false | false | false |
Lisergishnu/LTXKit | __init__.py | 1 | 1110 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Marco Benzi <marco.benzi@alumnos.usm.cl>
# @Date: 2015-06-08 16:12:27
# @Last Modified 2015-06-08
# @Last Modified time: 2015-06-08 17:38:03
#==========================================================================
#This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#==========================================================================
import cylLTX
import rectLTX
import uStripDesign
__all__ = ["cylLTX", "uStripDesign","rectLTX"]
| gpl-2.0 | -5,476,987,294,272,240,000 | 38.642857 | 75 | 0.61982 | false | 3.881119 | false | false | false |
aleixq/python3-brisa | brisa/core/log.py | 1 | 4549 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php or see LICENSE file.
# Copyright 2007-2008 Brisa Team <brisa-develop@garage.maemo.org>
""" Log module with colored logging feature. Common usage of this module can
be only importing it and calling one of the available functions: debug,
warning, info, critical, error.
"""
import os
import logging
from logging import getLogger
from brisa import __enable_logging__
from brisa.core import config
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))
RESET_SEQ = '\033[0m'
COLOR_SEQ = '\033[1;%dm'
BOLD_SEQ = '\033[1m'
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED}
def formatter_message(message, use_color = True):
""" Method to format the pattern in which the log messages will be
displayed.
@param message: message log to be displayed
@param use_color: Flag to indicates the use of colors or not
@type message: str
@type use_color: boolean
@return: the new formatted message
@rtype: str
"""
if use_color:
message = message.replace('$RESET', RESET_SEQ).replace('$BOLD',
BOLD_SEQ)
else:
message = message.replace('$RESET', '').replace('$BOLD', '')
return message
class ColoredFormatter(logging.Formatter):
""" ColoredFormatter class, which wrappers logging.Formatter. """
def __init__(self, msg, use_color = True):
""" Constructor of the ColoredFormatter class.
@param msg: message to be displayed
@param use_color: Flag to indicate the use of color or not
@type msg: str
@type use_color: boolean
"""
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
""" format method to the ColoredFormatter class that organizes the log
message.
@parameter record: information about the logger
@type record: Instance of Logger, either its RootLogger or not
"""
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname\
+ RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
class ColoredLogger(logging.Logger):
FORMAT = '%(created)f $BOLD%(levelname)s$RESET $BOLD%(module)s:%(lineno)d'\
':%(funcName)s()$RESET %(message)s'
COLOR_FORMAT = formatter_message(FORMAT, True)
def __init__(self, name):
""" Constructor for the ColoredLogger class.
@param name: name of the Logger.
@type name: str
"""
global level
logging.Logger.__init__(self, name, level)
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
self.addHandler(console)
log_dict = {'WARNING': logging.WARNING,
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR}
def setup_logging():
""" Method to setup the logging options. """
global debug, info, warning, critical, error, root_logger, set_level,\
setLevel, filename, level
level = log_dict.get(config.get_parameter('brisa', 'logging'),
logging.DEBUG)
filename = config.get_parameter('brisa', 'logging_output')
if filename == 'file':
filename = os.path.join(config.brisa_home, 'brisa.log')
logging.basicConfig(level=level, filename=filename,
format='%(created)f %(levelname)s %(module)s:'\
'%(lineno)d:%(funcName)s() %(message)s')
root_logger = logging.getLogger('RootLogger')
else:
logging.setLoggerClass(ColoredLogger)
root_logger = getLogger('RootLogger')
root_logger.setLevel(level)
def set_level(level):
""" Real implementation of the set level function. """
root_logger.setLevel(log_dict.get(level))
def setLevel(level):
""" Method to set the log level. """
set_level(level)
root_logger = getLogger()
if __enable_logging__:
setup_logging()
debug = root_logger.debug
info = root_logger.info
warning = root_logger.warning
critical = root_logger.critical
error = root_logger.error
| mit | -6,357,651,957,540,853,000 | 29.736486 | 79 | 0.621016 | false | 4.025664 | false | false | false |
Balannen/LSMASOMM | atom3/Kernel/GraphGrammar/ASG_GGEditMetaModel.py | 1 | 1865 | # __ASG_GGEditMetaModel.py_____________________________________________________
from ASG import *
from ATOM3Type import *
from ATOM3 import *
class ASG_GGEditMetaModel(ASG, ATOM3Type):
def __init__(self, parent= None, ASGroot = None):
ASG.__init__(self, ASGroot, ['ASG_GGEditMetaModel' ,'GGruleEdit'])
ATOM3Type.__init__(self)
self.parent = parent
self.generatedAttributes = { }
def show(self, parent, parentWindowInfo):
ATOM3Type.show(self, parent, parentWindowInfo)
self.containerFrame = Frame(parent)
return self.containerFrame
def toString(self, maxWide = None, maxLines = None ):
rs =
if maxWide: return self.strValue[0:maxWide-3]+'...'
else: return rs
def getValue(self):
return ()
def setValue(self, value):
pass
def writeConstructor2File(self, file, indent, objName="at", depth = 0, generatingCode = 0):
"Method that writes into a file the constructor and the value of the object. Must be overriden in children"
def writeValue2File(self, file, indent, objName="at", depth = 0, generatingCode = 0):
"Method that writes into a file the the value of the object. Must be overriden in children"
def clone(self):
cloneObject = GGEditMetaModel( self.parent )
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
ASGNode.copy(self, other)
def destroy(self):
self.containerFrame = None
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
| gpl-3.0 | 4,835,057,202,533,837,000 | 32.537037 | 113 | 0.624129 | false | 3.73 | false | false | false |
proggy/optparse2 | __init__.py | 1 | 18409 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright notice
# ----------------
#
# Copyright (C) 2012-2014 Daniel Jung
# Contact: djungbremen@gmail.com
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
#
# Original copyright statement by the authors of optparse
# =======================================================
#
# Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
# Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Extension of the command line option parser from the module *optparse*,
which was originally written by Gregory P. Ward.
The most important changes are:
- information about the default values is automatically appended to the
help strings of each option (if they do not already include the word
"%default")
- options and option groups are displayed in alphabetical order on the help
page
- option string conflicts may not necessarily lead to an exception. First it
is tried to move the option string to the new option (give it a new
meaning), as long as at least one option string remains at the old option,
thus overwriting the option string's meaning
- pydoc.pager is now used to display the help (behavior similar to the bash
command *less*)
- by default, the *help* and *version* options are moved to an option group
called "General options"
- the *help* option does no longer have the short option string "-h", but
instead "-?"
- write *None* instead of *none* in default option value help string
- preserve linebreaks in description (still needs improvement)"""
#
# To do:
# --> only use pydoc.pager for showing help if the length of the help is
# exceeding the terminal window height
#
__created__ = '2012-05-17'
__modified__ = '2013-02-06'
import optparse
import pydoc
import textwrap
class OptionContainer(optparse.OptionContainer):
"""Extended version of optparse.OptionContainer."""
# 2012-05-21 - 2012-05-23
def get_option_by_name(self, name):
"""Get option by option name. A little bit different than
*get_option()*, as it first checks *dest* before trying the option
strings, and also does not expect the dashes ("-" or "--") when
referencing the option strings."""
# 2012-05-21 - 2012-05-21
# check destinations
for option in self.option_list:
if option.dest and option.dest == name:
return option
# try option strings
return self._long_opt.get('--'+name) or self._short_opt.get('-'+name)
def add_option(self, *args, **kwargs):
"""Before calling the original method *add_option()*, this version
checks if the same option strings (long and short) do already exist in
another option definition. Instead of raising an exception rightaway,
it tries to "overwrite" the meaning of the option string, i.e. the
option string is deleted from the other option. However, this will only
be done if this option string is not *the only one* defined by the
other option, because at least one option string should persist for
each option."""
# 2012-05-23 - 2012-05-23
# cycle all option strings of the new option
for optionstring in args:
# check if this option string already exists in some option
if optionstring in self._short_opt:
option = self._short_opt[optionstring]
# make sure it is not the only option string of this option
if len(option._short_opts)+len(option._long_opts) > 1:
# delete this option string from the old option
option._short_opts.remove(optionstring)
del self._short_opt[optionstring]
elif optionstring in self._long_opt:
option = self._long_opt[optionstring]
# make sure it is not the only option string of this option
if len(option._short_opts)+len(option._long_opts) > 1:
# delete this option string from the oLegold option
option._long_opts.remove(optionstring)
del self._long_opt[optionstring]
# finally, call the original method
optparse.OptionContainer.add_option(self, *args, **kwargs)
class OptionGroup(optparse.OptionGroup, OptionContainer):
"""Just make sure the modified method *OptionContainer.add_option()* is
used also by *OptionGroup* (monkey patch). Otherwise, the original class
stays untouched."""
# 2012-05-23 - 2012-05-23
add_option = OptionContainer.add_option
class OptionParser(optparse.OptionParser, OptionContainer):
"""Improved version of *optparse.OptionParser* that overwrites some of its
methods and changes its behavior a little bit."""
# 2012-05-17 - 2013-02-06
# former hdp._MyOptionParser from 2011-09-14 until 2011-12-19
# former tb.MyOptionParser from 2011-08-03
def __init__(self, *args, **kwargs):
"""Improved version of the constructor. Sets the version string if the
user has not done so himself, because an empty version string would
lead to a bug lateron. If the keyword argument *general* is set to
*True*, move help and version options to the newly created option group
"General options" (default: *True*)."""
# 2012-05-17 - 2012-05-21
# former hdp._MyOptionParser.__init__ from 2011-11-11
# make sure the keyword argument "version" is set to a non-empty string
if not 'version' in kwargs:
kwargs.update(version=' ')
if not 'formatter' in kwargs:
kwargs.update(formatter=IndentedHelpFormatterWithNL())
# catch additional keyword arguments before calling the original method
general = kwargs.pop('general', True)
# call original initialisation method
optparse.OptionParser.__init__(self, *args, **kwargs)
# create an option group "general options" and move help and version
# option there
if general:
og = optparse.OptionGroup(self, 'General options')
self.move_option('help', og)
self.move_option('version', og)
self.add_option_group(og)
def cmp_opts(self, a, b):
"""Compare options by the first short option name or, if there is no
short option name, by the first long option name. Needed for sorting
the options."""
# 2012-05-17
# former hdp._MyOptionParser.cmp_opts from 2011-08-03
if len(a._short_opts) > 0:
aname = a._short_opts[0][1:]
else:
aname = a._long_opts[0][2:]
if len(b._short_opts) > 0:
bname = b._short_opts[0][1:]
else:
bname = b._long_opts[0][2:]
if aname == bname:
return 0
elif aname < bname:
return -1
else:
return 1
def print_help(self, file=None):
"""Like the original, except it uses *pydoc.pager* to display the help
text on the screen. The file argument no longer has any meaning, it
just stays there for compatibility reasons. Also, the method now sorts
all options and option groups before displaying the help text."""
# 2012-05-17
# former hdp._MyOptionParser.print_help from 2011-08-02 - 2011-12-19
# How can line breaks be preserved in epilog and description? Maybe
# look at the responsible mothod in optparse.OptionParser to get a hint
# sort options (also within option groups, and groups themselves)
self.option_list.sort(cmp=self.cmp_opts)
self.option_groups.sort(cmp=lambda a, b: -1
if a.title < b.title else 1)
for ind in xrange(len(self.option_groups)):
self.option_groups[ind].option_list.sort(cmp=self.cmp_opts)
#if file is None:
# file = _sys.stdout
encoding = self._get_encoding(file)
#file.write(self.format_help().encode(encoding, "replace"))
pydoc.pager(self.format_help().encode(encoding, 'replace'))
def _add_help_option(self):
"""Like the original method, but does not define the short option
string "-h". Instead, defines a short option "-?"."""
# 2012-05-17 - 2012-07-09
# former hdp._MyOptionParser.print_help 2011-08-03
self.add_option('-?', '--help', action='help',
help='show this help message and exit')
def add_all_default_values(self):
"""Automatically append the default values to the help strings of all
the options of this option parser. Those options that already contain
the substring "%default" are skipped."""
# 2012-05-18
self._add_default_values(self)
for og in self.option_groups:
self._add_default_values(og)
def _add_default_values(self, op):
"""Automatically append information about the default values to the
help string of the given option parser or option group object. Those
options that already contain the substring "%default" are skipped.
This method is used by *add_all_default_values()*, which is the one
that should be called by the user. There should be no need for the user
to call this method manually."""
# 2012-05-18 - 2012-05-22
# former hdp.BaseHDP.help_default from 2011-09-14
# former tb.BaseProc.help_default from 2011-02-11
for o in op.option_list:
if o.help and not '%default' in o.help and o.action == 'store' \
and str(o.default) != '':
# then append the information to the help string
if not o.help[-1] in '.!':
o.help += '.'
if o.help[-1] != ' ':
o.help += ' '
o.help += 'Default: %default'
def move_option(self, name, destination, source=None):
"""Move an already defined option from one option parser object to
another. By default, the source object is the option parser object
itself, but can also be set to any option group object. Also the
destination can be any option parser or option group object."""
# 2012-05-18 - 2012-05-21
# set source to this option parser object by default
if source is None:
source = self
# search for the given option name, remember its index
try:
index = source.option_list.index(self.get_option_by_name(name))
except ValueError:
raise KeyError('option "%s" not found' % name)
# move option object to new location
destination.option_list.append(source.option_list.pop(index))
def parse_args(self, args=None, values=None):
"""Does a little bit of extra stuff before calling the original method
*parse_args()*."""
# 2012-05-21 - 2012-05-22
# add the default values to all help strings
self.add_all_default_values()
# make sure line breaks are respected in epilog and description
#self.epilog = '\n'.join([s.strip() for s in self.epilog.split('\n')])
#self.description = '\n'.join([s.strip() \
#for s in self.description.split('\n')])
# How can line breaks be preserved in epilog and description? Maybe
# look at the responsible mothod in optparse.OptionParser to get a hint
# call original method
return optparse.OptionParser.parse_args(self, args=args, values=values)
# next thing will be to create an argument "dictionary" (or similar) to
# feed the Values instance with extra values again, recall "dest" or
# long or short option strings substitute kw2op with something more
# reasonable I think this can already be done with the argument
# "values" probably just say "values=optparse.Values(dictionary)" but
# then, only the true option names are allowed, i.e. option.dest
def get_option_group_by_title(self, title):
"""Get option group by group title. It is sufficient that the group
title starts with the given string. All strings are converted to lower
case before comparison."""
# 2012-05-21 - 2012-05-21
# check all groups
for group in self.option_groups:
if group.title.lower().startswith(title.lower()):
return group
else:
raise KeyError('option group %s not found' % title)
def walk(self):
"""Return iterator over all options of the option parser, including
those in option groups."""
### already exists by the name _get_all_options (but it is not an
### iterator)
# 2012-05-22 - 2012-05-22
for option in self.option_list:
yield option
for group in self.option_groups:
for option in group.option_list:
yield option
def search_option(self, name):
"""Search the whole option parser recursively (also in option groups)
for an option by the given name. If no matching option is found, return
*False*. Otherwise, return reference to the option object."""
# 2012-05-22 - 2012-05-22
for option in self.walk():
if option.dest and option.dest == name \
or '--'+name in option._long_opts \
or '-'+name in option._short_opts:
return option
else:
return False
add_option = OptionContainer.add_option
class IndentedHelpFormatterWithNL(optparse.IndentedHelpFormatter):
"""Solve the problem that newline characters are erased in the docstring.
Courtesy goes to Tim Chase:
https://groups.google.com/forum/?fromgroups#!topic/comp.lang.python/bfbmtUGhW8I"""
__created__ = '2013-02-06'
__modified__ = '2013-02-06'
NO_DEFAULT_VALUE = 'None'
def format_description(self, description):
if not description:
return ''
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
# the above is still the same
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit, desc_width, initial_indent=indent,
subsequent_indent=indent)
for bit in bits]
result = "\n".join(formatted_bits) + "\n"
return result
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
# everything is the same up through here
help_lines = []
for para in help_text.split("\n"):
help_lines.extend(textwrap.wrap(para, self.help_width))
# everything is the same after here
result.append("%*s%s\n" % (
indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
| gpl-2.0 | 7,814,770,479,823,910,000 | 42.830952 | 86 | 0.634364 | false | 4.256416 | false | false | false |
m4nolo/steering-all | src/world/gameover.py | 2 | 1854 | import pygame
import dotworld
import menuworld
from src.define import *
from src.dot.dottext import DotText
class GameOver(dotworld.DotWorld):
def __init__(self, score):
dotworld.DotWorld.__init__(self)
self.counter = 0
self.limit = 400
self.alpha = 0
self.animState = 1
self.label = DotText("Game Over", 32, (0, 0, 0), (255, 255, 255))
self.scorelabel = DotText("Score: " + str(int(score / GameDefine.SCORE_DECIMAL)), 24, (0, 0, 0), (255, 255, 255))
def onAttachScreen(self):
pygame.mixer.music.stop()
self.label.centerX(self.screen.width)
self.label.centerY(self.screen.height)
self.scorelabel.centerX(self.screen.width)
self.scorelabel.marginTop(dotget(1))
self.scorelabel.below(self.label)
def changeAlpha(self):
self.label.surface.set_alpha(self.alpha)
self.scorelabel.surface.set_alpha(self.alpha)
def listen(self, inputResult):
if inputResult == GameDefine.COMMAND_BOOST:
self.pause()
def step(self):
if self.active:
self.changeAlpha()
self.label.draw(self.screen.displaysurf)
self.scorelabel.draw(self.screen.displaysurf)
self.counter += 1
if self.animState == 1:
self.alpha += 2
if self.alpha > 255:
self.animState = 2
self.counter = 0
if self.animState == 2:
self.counter += 1
if self.counter > self.screen.fps * 3:
self.animState = 3
if self.animState == 3:
self.alpha -= 2
if self.alpha <= 0:
self.pause()
else:
self.screen.setWorld(menuworld.MenuWorld())
del self
| mit | 1,563,944,088,642,314,800 | 27.090909 | 121 | 0.552859 | false | 3.806982 | false | false | false |
mohsaad/Algorithms | machine_learning/gan/dcgan_tf.py | 1 | 15325 | from __future__ import print_function, division
import os
import util
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from datetime import datetime
LEARNING_RATE = 0.0002
BETA1 = 0.5
BATCH_SIZE = 64
EPOCHS = 2
SAVE_SAMPLE_PERIOD = 50
if not os.path.exists('samples'):
os.mkdir('samples')
def lrelu(x, alpha = 0.2):
return tf.maximum(alpha * x, x)
class ConvLayer:
def __init__(self, name, mi, mo, apply_batch_norm, filtersz = 5, stride = 2, f = tf.nn.relu):
self.W = tf.get_variable(
"W_%s" % name,
shape = (filtersz, filtersz, mi, mo),
initializer = tf.truncated_normal_initializer(stddev = 0.02)
)
self.b = tf.get_variable(
"W_%s" % name,
shape = (mo,),
initializer = tf.zeros_initializer()
)
self.f = f
self.name = name
self.stride = stride
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
def forward(self, X, reuse, is_training):
conv_out = tf.nn.conv2d(
X,
self.W,
strides=[1, self.stride, self.stride, 1],
padding='SAME'
)
conv_out = tf.nn.bias_add(conv_out, self.b)
if self.apply_batch_norm:
conv_out = tf.contrib.layers.batch_norm(
conv_out,
decay = 0.9,
updates_collections = None,
epsilon = 1e-5,
scale = True,
is_training = is_training,
reuse = reuse,
scope = self.name
)
return self.f(conv_out)
class FractionallyStridedConvLayer:
def __init__(self, name, mi, mo, output_shape, apply_batch_norm, filtersz = 5, stride = 2, f = tf.nn.relu):
self.W = tf.get_variable(
"W_%s" % name,
shape = (filtersz, filtersz, mo, mi),
initializer = tf.truncated_normal_initializer(stddev = 0.02)
)
self.b = tf.get_variable(
"W_%s" % name,
shape = (mo,),
initializer = tf.zeros_initializer()
)
self.f = f
self.name = name
self.stride = stride
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
self.output_shape = output_shape
def forward(self, X, reuse, is_training):
conv_out = tf.nn.conv2d_transpose(
value = X,
filter = self.W,
output_shape = self.output_shape,
strides=[1, self.stride, self.stride, 1],
)
conv_out = tf.nn.bias_add(conv_out, self.b)
if self.apply_batch_norm:
conv_out = tf.contrib.layers.batch_norm(
conv_out,
decay = 0.9,
updates_collections = None,
epsilon = 1e-5,
scale = True,
is_training = is_training,
reuse = reuse,
scope = self.name
)
return self.f(conv_out)
class DenseLayer(object):
def __init__(self, name, M1, M2, apply_batch_norm, f = tf.nn.relu):
self.W = tf.get_variable(
"W_%s" % name,
shape = (M1, M2),
initializer = tf.random_normal_initializer(stddev = 0.02)
)
self.b = tf.get_variable(
"b_%s" % name,
shape = (M2, ),
initializer = tf.zeros_initializer()
)
self.f = f
self.name = name
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
def forward(self, X, reuse, is_training):
a = tf.matmul(X, self.W) + self.b
if self.apply_batch_norm:
a = tf.contrib.layers.batch_norm(
a,
decay = 0.9,
updates_collections = None,
epsilon = 1e-5,
scale = True,
is_training = is_training,
reuse = reuse,
scope = self.name
)
return self.f(a)
class DCGAN:
def __init__(self, img_length, num_colors, d_sizes, g_sizes):
self.img_length = img_length
self.num_colors = num_colors
self.latent_dims = g_sizes['z']
self.X = tf.placeholder(
tf.float32,
shape = (None, img_length, img_length, num_colors),
name = 'X'
)
self.Z = tf.placeholder(
tf.float32,
shape=(None, self.latent_dims),
name = 'Z'
)
logits = self.build_discriminator(self.X, d_sizes)
self.sample_images = self.build_generator(self.Z, g_sizes)
with tf.variable_scope('generator') as scope:
scope.reuse_variables()
sample_logits = self.d_forward(self.sample_images, reuse = True)
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
self.sample_images_test = self.g_forward(
self.Z, reuse = True, is_training = False
)
self.d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits = logits,
labels = tf.ones_like(logits)
)
self.d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits = sample_logits,
labels = tf.zeros_like(sample_logits)
)
self.d_cost = tf.reduce_mean(self.d_cost_real) + tf.reduce_mean(self.d_cost_fake)
self.g_cost = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits = sample_logits,
labels = tf.ones_like(sample_logits)
)
)
real_predictions = tf.cast(logits > 0, tf.float32)
fake_predictions = tf.cast(sample_logits < 0, tf.float32)
num_predictions = 2.0 * BATCH_SIZE
num_correct = tf.reduce_sum(real_predictions) + tf.reduce_sum(fake_predictions)
self.d_accuracy = num_correct / num_predictions
# optimizers
self.d_params = [t for t in tf.trainable_variables() if t.name.startswith('d')]
self.g_params = [t for t in tf.trainable_variables() if t.name.startswith('g')]
self.d_train_op = tf.train.AdamOptimizer(
LEARNING_RATE, beta1 = BETA1
).minimize(
self.d_cost, var_list = self.d_params
)
self.g_train_op = tf.train.AdamOptimizer(
LEARNING_RATE, beta1 = BETA1
).minimize(
self.g_cost, var_list = self.g_params
)
self.init_op = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(self.init_op)
def build_discriminator(self, X, d_sizes):
with tf.variable_scope("discriminator") as scope:
self.d_convlayers = []
mi = self.num_colors
dim = self.img_length
count = 0
for mo, filtersz, stride, apply_batch_norm in d_sizes['conv_layers']:
name = "convlayer_%s" % count
count += 1
layer = ConvLayer(name, mi, mo, apply_batch_norm, filtersz, stride, lrelu)
self.d_convlayers.append(layer)
mi = mo
print("dim: ", dim)
dim = int(np.ceil(float(dim) / stride))
mi = mi * dim * dim
self.d_denselayers = []
for mo, apply_batch_norm in d_sizes['dense_layers']:
name = "denselayer_%s" % count
count += 1
layer = DenseLayer(name, mi, mo, apply_batch_norm, lrelu)
mi = mo
self.d_denselayers.append(layer)
name = "denselayer_%s" % count
self.d_finallayer = DenseLayer(name, mi, 1, False, lambda x : x)
logits = self.d_forward(X)
return logits
def d_forward(self, X, reuse = None, is_training = True):
output = X
for layer in self.d_convlayers:
output = layer.forward(output, reuse, is_training)
output = tf.contrib.layers.flatten(output)
for layer in self.d_denselayers:
output = layer.forward(output, reuse, is_training)
logits = self.d_finallayer.forward(output, reuse, is_training)
return logits
def build_generator(self, Z, g_sizes):
with tf.variable_scope('generator') as scope:
dims = [self.img_length]
dim = self.img_length
for _, _, stride, _ in reversed(g_sizes['conv_layers']):
dim = int(np.ceil(float(dim) / stride))
dims.append(dim)
dims = list(reversed(dims))
print("dims: ", dims)
self.g_dims = dims
mi = self.latent_dims
self.g_denselayers = []
count = 0
for mo, apply_batch_norm in g_sizes['dense_layers']:
name = "g_denselayers_%s" % count
count += 1
layer = DenseLayer(name, mi, mp, apply_batch_norm)
self.g_denselayers.append(layer)
mi = mo
mo = g_sizes['projection'] * dims[0] * dims[0]
name = "g_denselayer_%s" % count
layer = DenseLayer(name, mi, mo, not g_sizes['bn_after_project'])
self.g_denselayers.append(layer)
mi = g_sizes['projection']
self.g_convlayers = []
num_relus = len(g_sizes['conv_layers']) - 1
activation_functions = [tf.nn.relu] * num_relus + [g_sizes['output_activation']]
for i in range(len(g_sizes['conv_layers'])):
name = "fs_convlayer_%s" % i
mo, filtersz, stride, apply_batch_norm = g_sizes['conv_layers'][i]
f = activation_functions[i]
output_shape = [BATCH_SIZE, dims[i+1], dims[i+1], mo]
print("mi: ", mi, "mo: ", mo, "output_shape: ", output_shape)
layer = FractionallyStridedConvLayer(
name, mi, mo, output_shape, apply_batch_norm, filtersz, stride, f
)
self.g_convlayers.append(layer)
mi = mo
self.g_sizes = g_sizes
return self.g_forward(Z)
def g_forward(self, Z, reuse = None, is_training = True):
output = Z
for layer in self.g_denselayers:
output = layer.forward(output, reuse, is_training)
output = tf.reshape(
output,
[-1, self.g_dims[0], self.g_dims[0], self.g_sizes['projection']]
)
# apply bnorm
if self.g_sizes['bn_after_project']:
output = tf.contrib.layers.batch_norm(
output,
decay = 0.9,
updates_collections = None,
epsilon=1e-5,
scale = True,
is_training = is_training,
reuse = reuse,
scope = 'bn_after_project'
)
for layer in self.g_convlayers:
output = layer.forward(output, reuse, is_training)
return output
def fit(self, X):
d_costs = []
g_costs = []
N = len(X)
n_batches = N // BATCH_SIZE
total_iters = 0
for i in range(0, EPOCHS):
print("epoch: ", i)
np.random.shuffle(X)
for j in range(0, n_batches):
t0 = datetime.now()
if(type(X[0]) is str):
batch = util.files2images(
X[j*BATCH_SIZE:((j+1)*BATCH_SIZE)]
)
else:
batch = X[j*BATCH_SIZE:(j+1)*BATCH_SIZE]
Z = np.random.uniform(-1, 1, size=(BATCH_SIZE, self.latent_dims))
_, d_cost, d_acc = self.sess.run(
(self.d_train_op, self.d_cost, self.d_accuracy),
feed_dict = {self.X: batch, self.Z: Z}
)
d_costs.append(d_cost)
_, g_cost1 = self.sess.run(
(self.g_train_op, self.g_cost),
feed_dict = {self.Z : Z}
)
_, g_cost2 = self.sess.run(
(self.g_train_op, self.g_cost),
feed_dict = {self.Z : Z}
)
g_costs.append((g_cost1 + g_cost2) / 2)
print("batch %d/%d - dt: %s - d_acc: %.2f" % (j+1, n_batches, datetime.now() - t0))
total_iters += 1
if total_iters % SAVE_SAMPLE_PERIOD == 0:
print("saving sample...")
samples = self.sample(64)
d = self.img_length
if samples.shape[-1] == 1:
samples = samples.reshape(64, d, d)
flat_image = np.empty((8*d, 8*d))
k = 0
for a in range(0, 8):
for b in range(0, 8):
flat_image[a*d:(a+1)*d, b*d:(b+1)*d] = samples[k].reshape(d,d)
k+=1
else:
flat_image = np.empty((8*d, 8*d, 3))
k = 0
for a in range(0, 8):
for b in range(0, 8):
flat_image[a*d:(a+1)*d, b*d:(b+1)*d] = samples[k]
k+=1
sp.misc.imsave(
'samples/samples_at_iter_%d.png' % total_iters,
(flat_image + 1) / 2
)
plt.clf()
plt.plot(d_costs, label = 'discriminator cost')
plt.plot(g_costs, label = 'generator cost')
plt.legend()
plt.savefig('cost_vs_iteration.png')
def sample(self, n):
Z = np.random.uniform(-1, 1, size = (n, self.latent_dims))
samples = self.sess.run(self.sample_images_test, feed_dict = {self.Z : Z})
return samples
def celeb():
X = util.get_celeb()
dim = 64
colors = 3
d_sizes = {
'conv_layers' : [
(64,5,2,False),
(128,5,2,True),
(256,5,2,True),
(512,5,2,True)
],
'dense_layers': []
}
g_sizes = {
'z': 100,
'projection':512,
'bn_after_project': True,
'conv_layers' : [
(256,5,2,True),
(128,5,2,True),
(64,5,2,True),
(colors, 5,2, False)
],
'dense_layers': [],
'output_activation' : tf.tanh
}
gan = DCGAN(dim, colors, d_sizes, g_sizes)
gan.fit(X)
def mnist():
X, Y = util.get_mnist()
X = X.reshape(len(X), 28, 28, 1)
dim = X.shape[1]
colors = X.shape[-1]
d_sizes = {
'conv_layers': [(2, 5, 2, False), (64, 5, 2, True)],
'dense_layers': [(1024, True)]
}
g_sizes = {
'z':100,
'projection': 128,
'bn_after_project': False,
'conv_layers': [(128, 5, 2, True), (colors, 5, 2, False)],
'dense_layers': [(1024, True)],
'output_activation' : tf.sigmoid
}
gan = DCGAN(dim, colors, d_sizes, g_sizes)
gan.fit(X)
if __name__ == '__main__':
mnist()
| mit | 34,060,963,830,307,964 | 29.65 | 111 | 0.483458 | false | 3.644471 | false | false | false |
h4ck3rm1k3/gcc-python-plugin-1 | tests/plugin/switch/script.py | 4 | 2104 | # -*- coding: utf-8 -*-
# Copyright 2011 David Malcolm <dmalcolm@redhat.com>
# Copyright 2011 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import gcc
from gccutils import pprint
def on_pass_execution(p, fn):
if p.name == '*warn_function_return':
assert isinstance(fn, gcc.Function)
print('fn: %r' % fn)
assert isinstance(fn.decl, gcc.FunctionDecl)
print('fn.decl.name: %r' % fn.decl.name)
assert isinstance(fn.cfg, gcc.Cfg) # None for some early passes
assert fn.cfg.basic_blocks[0] == fn.cfg.entry
assert fn.cfg.basic_blocks[1] == fn.cfg.exit
for bb in fn.cfg.basic_blocks:
if bb.gimple:
for i,stmt in enumerate(bb.gimple):
print('gimple[%i]:' % i)
print(' str(stmt): %r' % str(stmt))
print(' repr(stmt): %r' % repr(stmt))
if isinstance(stmt, gcc.GimpleSwitch):
print(' stmt.indexvar: %r' % stmt.indexvar)
print(' stmt.labels: %r' % stmt.labels)
for j, label in enumerate(stmt.labels):
print(' label[%i].low: %r' % (j, label.low))
print(' label[%i].high: %r' % (j, label.high))
print(' label[%i].target: %r' % (j, label.target))
gcc.register_callback(gcc.PLUGIN_PASS_EXECUTION,
on_pass_execution)
| gpl-3.0 | 7,235,730,597,787,832,000 | 43.765957 | 83 | 0.577947 | false | 3.723894 | false | false | false |
polysquare/polysquare-travis-container | psqtraviscontainer/output.py | 1 | 1519 | # /psqtraviscontainer/output.py
#
# Helper classes to monitor and capture output as it runs.
#
# See /LICENCE.md for Copyright information
"""Helper classes to monitor and capture output as it runs."""
import sys
import threading
def monitor(stream,
modifier=None,
live=False,
output=sys.stdout):
"""Monitor and print lines from stream until end of file is reached.
Each line is piped through :modifier:.
"""
from six import StringIO
captured = StringIO()
modifier = modifier or (lambda l: l)
def read_thread():
"""Read each line from the stream and print it."""
# No stream, not much we can really do here.
if not stream:
return
for line in stream:
line = modifier(line)
captured.write(line)
if live:
output.write(line)
output.flush()
def joiner_for_output(thread):
"""Closure to join the thread and do something with its output."""
thread.start()
def join():
"""Join the thread and then return its output."""
thread.join()
captured.seek(0)
return captured
return join
# Note that while it is necessary to call joiner_for_output if you want
# resources to be cleaned up, it is not necessary if you don't care
# about cleanup and just want the program to keep running.
return joiner_for_output(threading.Thread(target=read_thread))
| mit | -7,674,804,951,134,952,000 | 27.660377 | 75 | 0.612245 | false | 4.480826 | false | false | false |
asajeffrey/servo | tests/wpt/web-platform-tests/tools/wpt/install.py | 3 | 3912 | import argparse
from . import browser
latest_channels = {
'firefox': 'nightly',
'chrome': 'nightly',
'chrome_android': 'dev',
'edgechromium': 'dev',
'safari': 'preview',
'servo': 'nightly',
'webkitgtk_minibrowser': 'nightly'
}
channel_by_name = {
'stable': 'stable',
'release': 'stable',
'beta': 'beta',
'dev': 'dev',
'canary': 'canary',
'nightly': latest_channels,
'preview': latest_channels,
'experimental': latest_channels,
}
channel_args = argparse.ArgumentParser(add_help=False)
channel_args.add_argument('--channel', choices=channel_by_name.keys(),
default='nightly', action='store',
help='''
Name of browser release channel (default: nightly). "stable" and "release" are
synonyms for the latest browser stable release; "beta" is the beta release;
"dev" is only meaningful for Chrome (i.e. Chrome Dev); "nightly",
"experimental", and "preview" are all synonyms for the latest available
development or trunk release. (For WebDriver installs, we attempt to select an
appropriate, compatible version for the latest browser release on the selected
channel.) This flag overrides --browser-channel.''')
def get_parser():
parser = argparse.ArgumentParser(
parents=[channel_args],
description="Install a given browser or webdriver frontend.")
parser.add_argument('browser', choices=['firefox', 'chrome', 'servo'],
help='name of web browser product')
parser.add_argument('component', choices=['browser', 'webdriver'],
help='name of component')
parser.add_argument('--download-only', action="store_true",
help="Download the selected component but don't install it")
parser.add_argument('--rename', action="store", default=None,
help="Filename, excluding extension for downloaded archive "
"(only with --download-only)")
parser.add_argument('-d', '--destination',
help='filesystem directory to place the component')
return parser
def get_channel(browser, channel):
channel = channel_by_name[channel]
if isinstance(channel, dict):
channel = channel.get(browser)
return channel
def run(venv, **kwargs):
import logging
logger = logging.getLogger("install")
browser = kwargs["browser"]
destination = kwargs["destination"]
channel = get_channel(browser, kwargs["channel"])
if channel != kwargs["channel"]:
logger.info("Interpreting channel '%s' as '%s'", kwargs["channel"], channel)
if destination is None:
if venv:
if kwargs["component"] == "browser":
destination = venv.path
else:
destination = venv.bin_path
else:
raise argparse.ArgumentError(None,
"No --destination argument, and no default for the environment")
install(browser, kwargs["component"], destination, channel, logger=logger,
download_only=kwargs["download_only"], rename=kwargs["rename"])
def install(name, component, destination, channel="nightly", logger=None, download_only=False,
rename=None):
if logger is None:
import logging
logger = logging.getLogger("install")
prefix = "download" if download_only else "install"
suffix = "_webdriver" if component == 'webdriver' else ""
method = prefix + suffix
browser_cls = getattr(browser, name.title())
logger.info('Now installing %s %s...', name, component)
kwargs = {}
if download_only and rename:
kwargs["rename"] = rename
path = getattr(browser_cls(logger), method)(dest=destination, channel=channel, **kwargs)
if path:
logger.info('Binary %s as %s', "downloaded" if download_only else "installed", path)
| mpl-2.0 | 5,533,614,927,335,678,000 | 35.90566 | 105 | 0.627556 | false | 4.337029 | true | false | false |
wjchen84/lfd | scripts/eval.py | 2 | 30870 | #!/usr/bin/env python
from __future__ import division
import time
import os.path
import h5py
import atexit
import trajoptpy
import numpy as np
from lfd.environment import sim_util
from lfd.environment import settings
from constants import MAX_ACTIONS_TO_TRY
from lfd.demonstration.demonstration import SceneState, GroundTruthRopeSceneState, AugmentedTrajectory, Demonstration
from lfd.environment.simulation import DynamicRopeSimulationRobotWorld
from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, CylinderSimulationObject, RopeSimulationObject
from lfd.environment.environment import LfdEnvironment, GroundTruthRopeLfdEnvironment
from lfd.registration.registration import TpsRpmBijRegistrationFactory, TpsRpmRegistrationFactory, TpsSegmentRegistrationFactory, BatchGpuTpsRpmBijRegistrationFactory, BatchGpuTpsRpmRegistrationFactory
from lfd.transfer.transfer import PoseTrajectoryTransferer, FingerTrajectoryTransferer
from lfd.transfer.registration_transfer import TwoStepRegistrationAndTrajectoryTransferer, UnifiedRegistrationAndTrajectoryTransferer
from lfd.action_selection import GreedyActionSelection
from lfd.action_selection import FeatureActionSelection
from lfd.rapprentice import eval_util, util
from lfd.rapprentice import task_execution
from lfd.rapprentice.knot_classifier import isKnot as is_knot
from lfd.rapprentice.util import redprint, yellowprint
class GlobalVars:
exec_log = None
actions = None
actions_cache = None
demos = None
features = None
def eval_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim):
"""TODO
Args:
action_selection: ActionSelection
reg_and_traj_transferer: RegistrationAndTrajectoryTransferer
lfd_env: LfdEnvironment
sim: DynamicSimulation
"""
holdoutfile = h5py.File(args.eval.holdoutfile, 'r')
holdout_items = eval_util.get_indexed_items(holdoutfile, task_list=args.tasks, task_file=args.taskfile, i_start=args.i_start, i_end=args.i_end)
rope_params = sim_util.RopeParams()
if args.eval.rope_param_radius is not None:
rope_params.radius = args.eval.rope_param_radius
if args.eval.rope_param_angStiffness is not None:
rope_params.angStiffness = args.eval.rope_param_angStiffness
num_successes = 0
num_total = 0
for i_task, demo_id_rope_nodes in holdout_items:
redprint("task %s" % i_task)
init_rope_nodes = demo_id_rope_nodes["rope_nodes"][:]
rope = RopeSimulationObject("rope", init_rope_nodes, rope_params)
sim.add_objects([rope])
sim.settle(step_viewer=args.animation)
for i_step in range(args.eval.num_steps):
redprint("task %s step %i" % (i_task, i_step))
sim_util.reset_arms_to_side(sim)
if args.animation:
sim.viewer.Step()
sim_state = sim.get_state()
sim.set_state(sim_state)
scene_state = lfd_env.observe_scene()
# plot cloud of the test scene
handles = []
if args.plotting:
handles.append(sim.env.plot3(scene_state.cloud[:,:3], 2, scene_state.color if scene_state.color is not None else (0,0,1)))
sim.viewer.Step()
eval_stats = eval_util.EvalStats()
start_time = time.time()
if len(scene_state.cloud) == 0:
redprint("Detected 0 points in scene")
break
try:
(agenda, q_values_root), goal_found = action_selection.plan_agenda(scene_state, i_step)
except ValueError: #e.g. if cloud is empty - any action is hopeless
redprint("**Raised Value Error during action selection")
break
eval_stats.action_elapsed_time += time.time() - start_time
eval_stats.generalized = True
num_actions_to_try = MAX_ACTIONS_TO_TRY if args.eval.search_until_feasible else 1
for i_choice in range(num_actions_to_try):
if q_values_root[i_choice] == -np.inf: # none of the demonstrations generalize
eval_stats.generalized = False
break
redprint("TRYING %s"%agenda[i_choice])
best_root_action = str(agenda[i_choice])
start_time = time.time()
try:
test_aug_traj = reg_and_traj_transferer.transfer(GlobalVars.demos[best_root_action], scene_state, plotting=args.plotting)
except ValueError: # If something is cloud/traj is empty or something
redprint("**Raised value error during traj transfer")
break
eval_stats.feasible, eval_stats.misgrasp = lfd_env.execute_augmented_trajectory(test_aug_traj, step_viewer=args.animation, interactive=args.interactive, check_feasible=args.eval.check_feasible)
eval_stats.exec_elapsed_time += time.time() - start_time
if not args.eval.check_feasible or eval_stats.feasible: # try next action if TrajOpt cannot find feasible action and we care about feasibility
break
else:
sim.set_state(sim_state)
knot = is_knot(rope.rope.GetControlPoints())
results = {'scene_state':scene_state, 'best_action':best_root_action, 'values':q_values_root, 'aug_traj':test_aug_traj, 'eval_stats':eval_stats, 'sim_state':sim_state, 'knot':knot, 'goal_found': goal_found}
eval_util.save_task_results_step(args.resultfile, i_task, i_step, results)
if not eval_stats.generalized:
assert not knot
break
if args.eval.check_feasible and not eval_stats.feasible:
# Skip to next knot tie if the action is infeasible -- since
# that means all future steps (up to 5) will have infeasible trajectories
assert not knot
break
if knot:
num_successes += 1
break;
sim.remove_objects([rope])
num_total += 1
redprint('Eval Successes / Total: ' + str(num_successes) + '/' + str(num_total))
redprint('Success Rate: ' + str(float(num_successes)/num_total))
def eval_on_holdout_parallel(args, action_selection, reg_and_traj_transferer, lfd_env, sim):
raise NotImplementedError
# holdoutfile = h5py.File(args.eval.holdoutfile, 'r')
# holdout_items = eval_util.get_indexed_items(holdoutfile, task_list=args.tasks, task_file=args.taskfile, i_start=args.i_start, i_end=args.i_end)
#
# rope_params = sim_util.RopeParams()
# if args.eval.rope_param_radius is not None:
# rope_params.radius = args.eval.rope_param_radius
# if args.eval.rope_param_angStiffness is not None:
# rope_params.angStiffness = args.eval.rope_param_angStiffness
#
# batch_transfer_simulate = BatchTransferSimulate(transfer, lfd_env)
#
# states = {}
# q_values_roots = {}
# best_root_actions = {}
# state_id2i_task = {}
# results = {}
# successes = {}
# for i_step in range(args.eval.num_steps):
# for i_task, demo_id_rope_nodes in holdout_items:
# if i_task in successes:
# # task already finished
# continue
#
# redprint("task %s step %i" % (i_task, i_step))
#
# if i_step == 0:
# sim_util.reset_arms_to_side(lfd_env)
#
# init_rope_nodes = demo_id_rope_nodes["rope_nodes"][:]
# lfd_env.set_rope_state(RopeState(init_rope_nodes, rope_params))
# states[i_task] = {}
# states[i_task][i_step] = lfd_env.observe_scene(**vars(args.eval))
# best_root_actions[i_task] = {}
# q_values_roots[i_task] = {}
# results[i_task] = {}
#
# if args.animation:
# lfd_env.viewer.Step()
#
# state = states[i_task][i_step]
#
# num_actions_to_try = MAX_ACTIONS_TO_TRY if args.eval.search_until_feasible else 1
#
# agenda, q_values_root = select_best(args.eval, state, batch_transfer_simulate) # TODO fix select_best to handle batch_transfer_simulate
# q_values_roots[i_task][i_step] = q_values_root
#
# i_choice = 0
# if q_values_root[i_choice] == -np.inf: # none of the demonstrations generalize
# successes[i_task] = False
# continue
#
# best_root_action = agenda[i_choice]
# best_root_actions[i_task][i_step] = best_root_action
#
# next_state_id = SceneState.get_unique_id()
# batch_transfer_simulate.queue_transfer_simulate(state, best_root_action, next_state_id)
#
# state_id2i_task[next_state_id] = i_task
#
# batch_transfer_simulate.wait_while_queue_is_nonempty()
# for result in batch_transfer_simulate.get_results():
# i_task = state_id2i_task[result.state.id]
# results[i_task][i_step] = result
#
# for i_task, demo_id_rope_nodes in holdout_items:
# if i_task in successes:
# # task already finished
# continue
#
# result = results[i_task][i_step]
# eval_stats = eval_util.EvalStats()
# eval_stats.success, eval_stats.feasible, eval_stats.misgrasp, full_trajs, next_state = result.success, result.feasible, result.misgrasp, result.full_trajs, result.state
# # TODO eval_stats.exec_elapsed_time
#
# if not eval_stats.feasible: # If not feasible, restore state
# next_state = states[i_task][i_step]
#
# state = states[i_task][i_step]
# best_root_action = best_root_actions[i_task][i_step]
# q_values_root = q_values_roots[i_task][i_step]
# eval_util.save_task_results_step(args.resultfile, i_task, i_step, state, best_root_action, q_values_root, full_trajs, next_state, eval_stats, new_cloud_ds=state.cloud, new_rope_nodes=state.rope_nodes)
#
# states[i_task][i_step+1] = next_state
#
# if not eval_stats.feasible:
# successes[i_task] = False
# # Skip to next knot tie if the action is infeasible -- since
# # that means all future steps (up to 5) will have infeasible trajectories
# continue
#
# if is_knot(next_state.rope_nodes):
# successes[i_task] = True
# continue
#
# if i_step == args.eval.num_steps - 1:
# for i_task, demo_id_rope_nodes in holdout_items:
# if i_task not in successes:
# # task ran out of steps
# successes[i_task] = False
#
# num_successes = np.sum(successes.values())
# num_total = len(successes)
# redprint('Eval Successes / Total: ' + str(num_successes) + '/' + str(num_total))
def replay_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim):
loadresultfile = h5py.File(args.replay.loadresultfile, 'r')
loadresult_items = eval_util.get_indexed_items(loadresultfile, task_list=args.tasks, task_file=args.taskfile, i_start=args.i_start, i_end=args.i_end)
num_successes = 0
num_total = 0
for i_task, task_info in loadresult_items:
redprint("task %s" % i_task)
for i_step in range(len(task_info)):
redprint("task %s step %i" % (i_task, i_step))
replay_results = eval_util.load_task_results_step(args.replay.loadresultfile, i_task, i_step)
sim_state = replay_results['sim_state']
if i_step > 0: # sanity check for reproducibility
sim_util.reset_arms_to_side(sim)
if sim.simulation_state_equal(sim_state, sim.get_state()):
yellowprint("Reproducible results OK")
else:
yellowprint("The replayed simulation state doesn't match the one from the result file")
sim.set_state(sim_state)
if args.replay.simulate_traj_steps is not None and i_step not in args.replay.simulate_traj_steps:
continue
if i_step in args.replay.compute_traj_steps: # compute the trajectory in this step
best_root_action = replay_results['best_action']
scene_state = replay_results['scene_state']
# plot cloud of the test scene
handles = []
if args.plotting:
handles.append(sim.env.plot3(scene_state.cloud[:,:3], 2, scene_state.color if scene_state.color is not None else (0,0,1)))
sim.viewer.Step()
test_aug_traj = reg_and_traj_transferer.transfer(GlobalVars.demos[best_root_action], scene_state, plotting=args.plotting)
else:
test_aug_traj = replay_results['aug_traj']
feasible, misgrasp = lfd_env.execute_augmented_trajectory(test_aug_traj, step_viewer=args.animation, interactive=args.interactive, check_feasible=args.eval.check_feasible)
if replay_results['knot']:
num_successes += 1
num_total += 1
redprint('REPLAY Successes / Total: ' + str(num_successes) + '/' + str(num_total))
def parse_input_args():
parser = util.ArgumentParser()
parser.add_argument("--animation", type=int, default=0, help="animates if it is non-zero. the viewer is stepped according to this number")
parser.add_argument("--plotting", type=int, default=1, help="plots if animation != 0 and plotting != 0")
parser.add_argument("--interactive", action="store_true", help="step animation and optimization if specified")
parser.add_argument("--resultfile", type=str, help="no results are saved if this is not specified")
# selects tasks to evaluate/replay
parser.add_argument("--tasks", type=int, nargs='*', metavar="i_task")
parser.add_argument("--taskfile", type=str)
parser.add_argument("--i_start", type=int, default=-1, metavar="i_task")
parser.add_argument("--i_end", type=int, default=-1, metavar="i_task")
parser.add_argument("--camera_matrix_file", type=str, default='../.camera_matrix.txt')
parser.add_argument("--window_prop_file", type=str, default='../.win_prop.txt')
parser.add_argument("--random_seed", type=int, default=None)
parser.add_argument("--log", type=str, default="")
subparsers = parser.add_subparsers(dest='subparser_name')
# arguments for eval
parser_eval = subparsers.add_parser('eval')
parser_eval.add_argument('actionfile', type=str, nargs='?', default='../bigdata/misc/overhand_actions.h5')
parser_eval.add_argument('holdoutfile', type=str, nargs='?', default='../bigdata/misc/holdout_set_Jun20_0.10.h5')
parser.add_argument("--landmarkfile", type=str, default='../data/misc/landmarks.h5')
parser_eval.add_argument('action_selection', type=str, nargs='?', choices=['greedy', 'feature'])
parser_eval.add_argument('--weightfile', type=str, default='')
parser_eval.add_argument('--feature_type', type=str, nargs='?', choices=['base', 'mul', 'mul_quad', 'mul_quad_ind', 'mul_quad_bendind', 'mul_quad_mapind', 'mul_s', 'mul_grip', 'mul_s_map', 'landmark', 'timestep'], default='base')
parser_eval.add_argument("transferopt", type=str, nargs='?', choices=['pose', 'finger'], default='finger')
parser_eval.add_argument("reg_type", type=str, choices=['segment', 'rpm', 'bij'], default='bij')
parser_eval.add_argument("--unified", type=int, default=0)
parser_eval.add_argument("--obstacles", type=str, nargs='*', choices=['bookshelve', 'boxes', 'cylinders'], default=[])
parser_eval.add_argument("--downsample", type=int, default=1)
parser_eval.add_argument("--downsample_size", type=float, default=0.025)
parser_eval.add_argument("--upsample", type=int, default=0)
parser_eval.add_argument("--upsample_rad", type=int, default=1, help="upsample_rad > 1 incompatible with downsample != 0")
parser_eval.add_argument("--ground_truth", type=int, default=0)
parser_eval.add_argument("--fake_data_segment",type=str, default='demo1-seg00')
parser_eval.add_argument("--fake_data_transform", type=float, nargs=6, metavar=("tx","ty","tz","rx","ry","rz"),
default=[0,0,0,0,0,0], help="translation=(tx,ty,tz), axis-angle rotation=(rx,ry,rz)")
parser_eval.add_argument("--search_until_feasible", action="store_true")
parser_eval.add_argument("--check_feasible", type=int, default=0)
parser_eval.add_argument("--width", type=int, default=1)
parser_eval.add_argument("--depth", type=int, default=0)
parser_eval.add_argument("--alpha", type=float, default=1000000.0)
parser_eval.add_argument("--beta_pos", type=float, default=1000000.0)
parser_eval.add_argument("--beta_rot", type=float, default=100.0)
parser_eval.add_argument("--gamma", type=float, default=1000.0)
parser_eval.add_argument("--use_collision_cost", type=int, default=1)
parser_eval.add_argument("--num_steps", type=int, default=5, help="maximum number of steps to simulate each task")
parser_eval.add_argument("--dof_limits_factor", type=float, default=1.0)
parser_eval.add_argument("--rope_param_radius", type=str, default=None)
parser_eval.add_argument("--rope_param_angStiffness", type=str, default=None)
parser_eval.add_argument("--use_color", type=int, default=0)
parser_eval.add_argument("--parallel", action="store_true")
parser_eval.add_argument("--batch", action="store_true", default=False)
parser_replay = subparsers.add_parser('replay')
parser_replay.add_argument("loadresultfile", type=str)
parser_replay.add_argument("--compute_traj_steps", type=int, default=[], nargs='*', metavar='i_step', help="recompute trajectories for the i_step of all tasks")
parser_replay.add_argument("--simulate_traj_steps", type=int, default=None, nargs='*', metavar='i_step',
help="if specified, restore the rope state from file and then simulate for the i_step of all tasks")
# if not specified, the rope state is not restored from file, but it is as given by the sequential simulation
args = parser.parse_args()
if not args.animation:
args.plotting = 0
return args
def setup_log_file(args):
if args.log:
redprint("Writing log to file %s" % args.log)
GlobalVars.exec_log = task_execution.ExecutionLog(args.log)
atexit.register(GlobalVars.exec_log.close)
GlobalVars.exec_log(0, "main.args", args)
def set_global_vars(args):
if args.random_seed is not None: np.random.seed(args.random_seed)
GlobalVars.actions = h5py.File(args.eval.actionfile, 'r')
actions_root, actions_ext = os.path.splitext(args.eval.actionfile)
GlobalVars.actions_cache = h5py.File(actions_root + '.cache' + actions_ext, 'a')
GlobalVars.demos = {}
for action, seg_info in GlobalVars.actions.iteritems():
if args.eval.ground_truth:
rope_nodes = seg_info['rope_nodes'][()]
scene_state = GroundTruthRopeSceneState(rope_nodes, settings.ROPE_RADIUS, upsample=args.eval.upsample, upsample_rad=args.eval.upsample_rad, downsample_size=args.eval.downsample_size)
else:
full_cloud = seg_info['cloud_xyz'][()]
scene_state = SceneState(full_cloud, downsample_size=args.eval.downsample_size)
lr2arm_traj = {}
lr2finger_traj = {}
lr2ee_traj = {}
lr2open_finger_traj = {}
lr2close_finger_traj = {}
for lr in 'lr':
arm_name = {"l":"leftarm", "r":"rightarm"}[lr]
lr2arm_traj[lr] = np.asarray(seg_info[arm_name])
lr2finger_traj[lr] = sim_util.gripper_joint2gripper_l_finger_joint_values(np.asarray(seg_info['%s_gripper_joint'%lr]))[:,None]
lr2ee_traj[lr] = np.asarray(seg_info["%s_gripper_tool_frame"%lr]['hmat'])
lr2open_finger_traj[lr] = np.zeros(len(lr2finger_traj[lr]), dtype=bool)
lr2close_finger_traj[lr] = np.zeros(len(lr2finger_traj[lr]), dtype=bool)
opening_inds, closing_inds = sim_util.get_opening_closing_inds(lr2finger_traj[lr])
# # opening_inds/closing_inds are indices before the opening/closing happens, so increment those indices (if they are not out of bound)
# opening_inds = np.clip(opening_inds+1, 0, len(lr2finger_traj[lr])-1) # TODO figure out if +1 is necessary
# closing_inds = np.clip(closing_inds+1, 0, len(lr2finger_traj[lr])-1)
lr2open_finger_traj[lr][opening_inds] = True
lr2close_finger_traj[lr][closing_inds] = True
aug_traj = AugmentedTrajectory(lr2arm_traj=lr2arm_traj, lr2finger_traj=lr2finger_traj, lr2ee_traj=lr2ee_traj, lr2open_finger_traj=lr2open_finger_traj, lr2close_finger_traj=lr2close_finger_traj)
demo = Demonstration(action, scene_state, aug_traj)
GlobalVars.demos[action] = demo
def setup_lfd_environment_sim(args):
actions = h5py.File(args.eval.actionfile, 'r')
init_rope_xyz, init_joint_names, init_joint_values = sim_util.load_fake_data_segment(actions, args.eval.fake_data_segment, args.eval.fake_data_transform)
table_height = init_rope_xyz[:,2].mean() - .02
sim_objs = []
sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False))
sim_objs.append(BoxSimulationObject("table", [1, 0, table_height + (-.1 + .01)], [.85, .85, .1], dynamic=False))
if 'bookshelve' in args.eval.obstacles:
sim_objs.append(XmlSimulationObject("../data/bookshelve.env.xml", dynamic=False))
if 'boxes' in args.eval.obstacles:
sim_objs.append(BoxSimulationObject("box0", [.7,.43,table_height+(.01+.12)], [.12,.12,.12], dynamic=False))
sim_objs.append(BoxSimulationObject("box1", [.74,.47,table_height+(.01+.12*2+.08)], [.08,.08,.08], dynamic=False))
if 'cylinders' in args.eval.obstacles:
sim_objs.append(CylinderSimulationObject("cylinder0", [.7,.43,table_height+(.01+.5)], .12, 1., dynamic=False))
sim_objs.append(CylinderSimulationObject("cylinder1", [.7,-.43,table_height+(.01+.5)], .12, 1., dynamic=False))
sim_objs.append(CylinderSimulationObject("cylinder2", [.4,.2,table_height+(.01+.65)], .06, .5, dynamic=False))
sim_objs.append(CylinderSimulationObject("cylinder3", [.4,-.2,table_height+(.01+.65)], .06, .5, dynamic=False))
sim = DynamicRopeSimulationRobotWorld()
world = sim
sim.add_objects(sim_objs)
if args.eval.ground_truth:
lfd_env = GroundTruthRopeLfdEnvironment(sim, world, upsample=args.eval.upsample, upsample_rad=args.eval.upsample_rad, downsample_size=args.eval.downsample_size)
else:
lfd_env = LfdEnvironment(sim, world, downsample_size=args.eval.downsample_size)
dof_inds = sim_util.dof_inds_from_name(sim.robot, '+'.join(init_joint_names))
values, dof_inds = zip(*[(value, dof_ind) for value, dof_ind in zip(init_joint_values, dof_inds) if dof_ind != -1])
sim.robot.SetDOFValues(values, dof_inds) # this also sets the torso (torso_lift_joint) to the height in the data
sim_util.reset_arms_to_side(sim)
if args.animation:
viewer = trajoptpy.GetViewer(sim.env)
if os.path.isfile(args.window_prop_file) and os.path.isfile(args.camera_matrix_file):
print "loading window and camera properties"
window_prop = np.loadtxt(args.window_prop_file)
camera_matrix = np.loadtxt(args.camera_matrix_file)
try:
viewer.SetWindowProp(*window_prop)
viewer.SetCameraManipulatorMatrix(camera_matrix)
except:
print "SetWindowProp and SetCameraManipulatorMatrix are not defined. Pull and recompile Trajopt."
else:
print "move viewer to viewpoint that isn't stupid"
print "then hit 'p' to continue"
viewer.Idle()
print "saving window and camera properties"
try:
window_prop = viewer.GetWindowProp()
camera_matrix = viewer.GetCameraManipulatorMatrix()
np.savetxt(args.window_prop_file, window_prop, fmt='%d')
np.savetxt(args.camera_matrix_file, camera_matrix)
except:
print "GetWindowProp and GetCameraManipulatorMatrix are not defined. Pull and recompile Trajopt."
viewer.Step()
if args.eval.dof_limits_factor != 1.0:
assert 0 < args.eval.dof_limits_factor and args.eval.dof_limits_factor <= 1.0
active_dof_indices = sim.robot.GetActiveDOFIndices()
active_dof_limits = sim.robot.GetActiveDOFLimits()
for lr in 'lr':
manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
dof_inds = sim.robot.GetManipulator(manip_name).GetArmIndices()
limits = np.asarray(sim.robot.GetDOFLimits(dof_inds))
limits_mean = limits.mean(axis=0)
limits_width = np.diff(limits, axis=0)
new_limits = limits_mean + args.eval.dof_limits_factor * np.r_[-limits_width/2.0, limits_width/2.0]
for i, ind in enumerate(dof_inds):
active_dof_limits[0][active_dof_indices.tolist().index(ind)] = new_limits[0,i]
active_dof_limits[1][active_dof_indices.tolist().index(ind)] = new_limits[1,i]
sim.robot.SetDOFLimits(active_dof_limits[0], active_dof_limits[1])
return lfd_env, sim
def setup_registration_and_trajectory_transferer(args, sim):
if args.eval.batch:
if args.eval.reg_type == 'rpm':
reg_factory = BatchGpuTpsRpmRegistrationFactory(GlobalVars.demos, args.eval.actionfile)
elif args.eval.reg_type == 'bij':
reg_factory = BatchGpuTpsRpmBijRegistrationFactory(GlobalVars.demos, args.eval.actionfile)
else:
raise RuntimeError("Invalid reg_type option %s"%args.eval.reg_type)
else:
if args.eval.reg_type == 'segment':
reg_factory = TpsSegmentRegistrationFactory(GlobalVars.demos)
elif args.eval.reg_type == 'rpm':
reg_factory = TpsRpmRegistrationFactory(GlobalVars.demos)
elif args.eval.reg_type == 'bij':
reg_factory = TpsRpmBijRegistrationFactory(GlobalVars.demos, actionfile=args.eval.actionfile)
else:
raise RuntimeError("Invalid reg_type option %s"%args.eval.reg_type)
if args.eval.transferopt == 'pose' or args.eval.transferopt == 'finger':
traj_transferer = PoseTrajectoryTransferer(sim, args.eval.beta_pos, args.eval.beta_rot, args.eval.gamma, args.eval.use_collision_cost)
if args.eval.transferopt == 'finger':
traj_transferer = FingerTrajectoryTransferer(sim, args.eval.beta_pos, args.eval.gamma, args.eval.use_collision_cost, init_trajectory_transferer=traj_transferer)
else:
raise RuntimeError("Invalid transferopt option %s"%args.eval.transferopt)
if args.eval.unified:
reg_and_traj_transferer = UnifiedRegistrationAndTrajectoryTransferer(reg_factory, traj_transferer)
else:
reg_and_traj_transferer = TwoStepRegistrationAndTrajectoryTransferer(reg_factory, traj_transferer)
return reg_and_traj_transferer
def get_features(args):
feat_type = args.eval.feature_type
if feat_type== 'base':
from lfd.mmqe.features import BatchRCFeats as feat
elif feat_type == 'mul':
from lfd.mmqe.features import MulFeats as feat
elif feat_type == 'mul_quad':
from lfd.mmqe.features import QuadSimpleMulFeats as feat
elif feat_type == 'mul_quad_ind':
from lfd.mmqe.features import QuadSimpleMulIndFeats as feat
elif feat_type == 'mul_quad_mapind':
from lfd.mmqe.features import QuadSimpleMulMapIndFeats as feat
elif feat_type == 'mul_quad_bendind':
from lfd.mmqe.features import QuadSimpleMulBendIndFeats as feat
elif feat_type == 'mul_s':
from lfd.mmqe.features import SimpleMulFeats as feat
elif feat_type == 'mul_grip':
from lfd.mmqe.features import SimpleMulGripperFeats as feat
elif feat_type == 'mul_s_map':
from lfd.mmqe.features import SimpleMulMapIndFeats as feat
elif feat_type == 'landmark':
from lfd.mmqe.features import LandmarkFeats as feat
elif feat_type == 'timestep':
from lfd.mmqe.features import TimestepActionMulFeats as feat
else:
raise ValueError('Incorrect Feature Type')
feats = feat(args.eval.actionfile)
try:
feats.set_landmark_file(args.landmarkfile)
except AttributeError:
pass
if args.eval.weightfile:
feats.load_weights(args.eval.weightfile)
GlobalVars.features = feats
return feats
def main():
args = parse_input_args()
if args.subparser_name == "eval":
eval_util.save_results_args(args.resultfile, args)
elif args.subparser_name == "replay":
loaded_args = eval_util.load_results_args(args.replay.loadresultfile)
assert 'eval' not in vars(args)
args.eval = loaded_args.eval
else:
raise RuntimeError("Invalid subparser name")
setup_log_file(args)
set_global_vars(args)
trajoptpy.SetInteractive(args.interactive)
lfd_env, sim = setup_lfd_environment_sim(args)
reg_and_traj_transferer = setup_registration_and_trajectory_transferer(args, sim)
if args.eval.action_selection == 'feature':
get_features(args)
if args.eval.action_selection == 'greedy':
action_selection = GreedyActionSelection(reg_and_traj_transferer.registration_factory)
else:
action_selection = FeatureActionSelection(reg_and_traj_transferer.registration_factory, GlobalVars.features, GlobalVars.actions, GlobalVars.demos, simulator=reg_and_traj_transferer, lfd_env=lfd_env, width=args.eval.width, depth=args.eval.depth)
if args.subparser_name == "eval":
start = time.time()
if args.eval.parallel:
eval_on_holdout_parallel(args, action_selection, reg_and_traj_transferer, lfd_env, sim)
else:
eval_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim)
print "eval time is:\t{}".format(time.time() - start)
elif args.subparser_name == "replay":
replay_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim)
else:
raise RuntimeError("Invalid subparser name")
if __name__ == "__main__":
main()
| bsd-2-clause | -2,636,299,079,971,652,600 | 49.940594 | 252 | 0.637868 | false | 3.38784 | false | false | false |
CityGrid/arsenal | server/arsenalweb/views/api/enc.py | 1 | 5630 | '''Arsenal API ENC for puppet.'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
from arsenalweb.models.common import (
DBSession,
)
from arsenalweb.models.nodes import (
Node,
)
from arsenalweb.views.api.common import (
api_200,
api_400,
api_500,
api_501,
)
from arsenalweb.views.api.data_centers import (
find_data_center_by_id,
)
LOG = logging.getLogger(__name__)
def find_node_by_name_and_status(settings, node_name):
'''Find a node by name, filtered by statuses'''
try:
status_ids = [s for s in settings['arsenal.enc.status_ids'].splitlines() if s]
except KeyError as ex:
msg = 'You must define arsenal.enc.status_ids in the main settings file to ' \
'enable the enc.'
LOG.error(msg)
raise type(ex)(ex.message + ' {0}'.format(msg))
node = DBSession.query(Node)
node = node.filter(Node.status_id.in_(status_ids))
node = node.filter(Node.name == node_name)
return node.one()
def process_tags(tags, tag_type):
'''Processes tags. If the value is 'True' or 'False', converts it to a
boolean. Otherwise returns as-is (what about integers?).'''
results = {}
for tag in tags:
LOG.debug('{0} tag: {1}={2}'.format(tag_type, tag.name, tag.value))
if tag.value == 'True':
results[tag.name] = bool(tag.value)
elif tag.value == 'False':
results[tag.name] = bool('')
else:
try:
my_value = tag.value
my_value = int(my_value)
except ValueError:
pass
results[tag.name] = my_value
return results
def process_node_enc(settings, node_name, param_sources=False):
'''Process enc for node. Merges tags from the following three
objects in order from least to most specific:
node_group
data_center
node
Multiple node groups are sorted and take priority..?'''
results = {}
results['classes'] = []
results['parameters'] = {}
results['status'] = {
'name': None,
}
if param_sources:
results['param_sources'] = {}
try:
node = find_node_by_name_and_status(settings, node_name)
results['name'] = node.name
results['id'] = node.id
results['status'] = node.status
LOG.debug('node name is: {0}'.format(node.name))
LOG.debug('node datacenter is: {0}'.format(node.data_center_id))
# What happens when there's more than one node group? What tags
# win, alphabetic?
for node_group in node.node_groups:
LOG.debug('node_group: {0}'.format(node_group.name))
results['classes'].append(node_group.name)
my_tags = process_tags(node_group.tags, 'node_group')
results['parameters'].update(my_tags)
if param_sources:
for tag in my_tags:
results['param_sources'][tag] = 'node_group'
data_center = find_data_center_by_id(node.data_center_id)
my_tags = process_tags(data_center.tags, 'data_center')
results['parameters'].update(my_tags)
if param_sources:
for tag in my_tags:
results['param_sources'][tag] = 'data_center'
my_tags = process_tags(node.tags, 'node')
results['parameters'].update(my_tags)
if param_sources:
for tag in my_tags:
results['param_sources'][tag] = 'node'
except NoResultFound:
LOG.debug('node not found: {0}'.format(node_name))
except (AttributeError, KeyError):
raise
return results
@view_config(route_name='api_enc', request_method='GET', renderer='json')
def api_enc(request):
'''External node classifier for puppet. Takes a required request parameter
'name', finds all node_groups associated witht he node, and all tags merged
based on the following hierarchy:
node_group
data_center
node
Optional request parameter 'param_sources' will add an additional key that
identifies what level of the hierarchy each tag comes from. Returns a
dict.'''
settings = request.registry.settings
try:
try:
name = request.params['name']
except KeyError as ex:
msg = "Bad Request. Parameter 'name' is required."
LOG.error(msg)
return api_400(msg=msg)
try:
param_sources = request.params['param_sources']
except KeyError:
param_sources = False
LOG.debug('Starting enc for node: {0}'.format(name))
try:
results = process_node_enc(settings, name, param_sources=param_sources)
except (AttributeError, KeyError) as ex:
return api_501(msg=repr(ex))
except Exception as ex:
msg = 'Error calling enc! Exception: {0}'.format(repr(ex))
LOG.error(msg)
return api_500(msg=msg)
return api_200(results=results)
| apache-2.0 | 3,331,748,297,912,140,300 | 32.117647 | 86 | 0.615098 | false | 3.840382 | false | false | false |
markgw/jazzparser | src/jazzparser/utils/chords.py | 1 | 8399 | """Chord processing utilities.
A library of utility functions used throughout the Jazz Parser relating
to chord processing in the input.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <mark.granroth-wilding@ed.ac.uk>"
import xml.dom.minidom
import re, copy
import logging
# Get the logger from the logging system
logger = logging.getLogger("main_logger")
# Conversions between Lilypond notes and their numeric representation
ly_note_to_int = {"c" : 0, "C" : 0,\
"d" : 2, "D" : 2,\
"e" : 4, "E" : 4,\
"f" : 5, "F" : 5,\
"g" : 7, "G" : 7,\
"a" : 9, "A" : 9,\
"b" : 11, "B" : 11,\
"r" : None }
ly_note_to_base_int = {"c" : 0, "C" : 0,\
"d" : 1, "D" : 1,\
"e" : 2, "E" : 2,\
"f" : 3, "F" : 3,\
"g" : 4, "G" : 4,\
"a" : 5, "A" : 5,\
"b" : 6, "B" : 6,\
"r" : None }
int_to_ly_note = { 0 : "c",\
1 : "cis",\
2 : "d",\
3 : "dis",\
4 : "e",\
5 : "f",\
6 : "fis",\
7 : "g",\
8 : "gis",\
9 : "a",\
10: "ais",\
11: "b",\
None: "r"}
int_to_note_name = { 0 : "C", \
1 : "Db", \
2 : "D", \
3 : "Eb", \
4 : "E", \
5 : "F", \
6 : "Gb", \
7 : "G", \
8 : "Ab", \
9 : "A", \
10: "Bb", \
11: "B" }
ROMAN_NUMERALS = { 0 : "I",
1 : "bII",
2 : "II",
3 : "bIII",
4 : "III",
5 : "IV",
6 : "#IV",
7 : "V",
8 : "bVI",
9 : "VI",
10 : "bVII",
11 : "VII" }
def chord_numeral_to_int(chord_numeral, strict=False):
"""
Given a chord numeral (e.g. "I" or "bVII"), returns the integer
that corresponds to this chord root.
Returns None if input is either a chord variable ("X", "Y") or
itself None.
If strict is set, doesn't allow variable names.
"""
if strict:
numerals = { "I" : 0,
"II" : 2,
"III" : 4,
"IV" : 5,
"V" : 7,
"VI" : 9,
"VII" : 11, }
root_pattern = re.compile(r'^([b|\#]?)(I{1,3}|I?V|VI{0,2})$')
else:
# Map roman numerals to numbers
numerals = { "I" : 0,
"II" : 2,
"III" : 4,
"IV" : 5,
"V" : 7,
"VI" : 9,
"VII" : 11,
"X" : None,
"Y" : None,
"Z" : None,
None : None }
# Use a regular expression to split the chord root into a
# its accidental and numeral.
root_pattern = re.compile(r'^([b|\#]?)(I{1,3}|I?V|VI{0,2}|X|Y|Z)$')
# Map accidentals to a numeric adjustment
accidentals = { "#" : 1, "" : 0, "b" : -1 }
result = root_pattern.search(chord_numeral)
if result is None:
raise ChordError, "The string '%s' cannot be parsed as a chord" % chord_numeral
result = result.groups()
accidental = result[0]
numeral = result[1]
# Map the root name to a number
if numeral not in numerals:
raise ChordError, "Chord numeral \"%s\" was not recognised." % numeral
chord_num = numerals[numeral]
# Adjust this number according to the accidental
if chord_num is not None:
if accidental not in accidentals:
raise ChordError, "Accidental \"%s\" was not recognised." \
% accidental
chord_num += accidentals[accidental]
return chord_num
def pitch_class_to_int(chord_numeral):
""" Like L{chord_numeral_to_int}, but for pitch class labels. """
pcs = { "C" : 0,
"D" : 2,
"E" : 4,
"F" : 5,
"G" : 7,
"A" : 9,
"B" : 11, }
root_pattern = re.compile(r'^([A-G])(b*|\#*)$')
result = root_pattern.search(chord_numeral)
if result is None:
raise ChordError, "The string '%s' cannot be parsed as a chord" % \
chord_numeral
pc_str,accidental_str = result.groups()
pc = pcs[pc_str]
# Adjust this number according to the accidentals
if accidental_str:
if accidental_str[0] == "#":
pc += len(accidental_str)
elif accidental_str[0] == "b":
pc -= len(accidental_str)
return pc % 12
def int_to_chord_numeral(chord_int):
"""
Given an internal integer representation of a chord root (i.e. a
note of the scale), returns the roman numeral as a string. This
will always use the same convention for #s and bs, so may not be
the same as the numeral that generated the note number.
The input numbers 0-11 correspond to I-VII in the scale. The input
need to be in this range. Outside it, numbers will be mapped into
this range by "% 12".
Returns "X" if input is None.
"""
if chord_int is None:
return "X"
# Take number mod 12, in case it's not in correct range
return ROMAN_NUMERALS[chord_int % 12]
def int_to_pitch_class(chord_int):
"""
Like L{int_to_chord_numeral}, but outputs a pitch class name instead of
roman numeral. Returns "X" if input is None.
"""
if chord_int is None:
return "X"
else:
# Take number mod 12, in case it's not in correct range
return int_to_note_name[chord_int % 12]
def generalise_chord_name(chord_name):
"""
The grammar generalises over chord names, using X to mean "any
roman numeral chord root". When a chord name comes as input to
the parser, say "IIm", we look up not "IIm", but "Xm".
Given any chord name, this function returns the generalised
chord name to look up in the grammar.
"""
from jazzparser.data import Chord
# Try building a chord from the chord name
chord = Chord.from_name(chord_name)
# Only interested in the tetrad type
return "X%s" % chord.tetrad_type
def interval_observation_from_chord_string_pair(chord1, chord2, type_mapping=None):
"""
Given two strings representing chords, produces a string representing
a chord observation of the form x-t, where x is the interval between
the chords (numeric) and t is the type of the first chord.
"""
from jazzparser.data import Chord
chord1 = Chord.from_name(chord1)
if chord2 is None:
interval = ""
else:
chord2 = Chord.from_name(chord2)
interval = "%d" % Chord.interval(chord1,chord2)
# Apply a mapping to the chord type if one was given
if type_mapping is not None:
ctype = type_mapping[chord1.type]
else:
ctype = chord1.type
return "%s-%s" % (interval, ctype)
class ChordError(Exception):
"""
Raised when there's a problem recognising or processing a chord.
"""
pass
| gpl-3.0 | 8,030,450,507,503,783,000 | 32.329365 | 87 | 0.496011 | false | 3.526029 | false | false | false |
mwickert/scikit-dsp-comm | sk_dsp_comm/synchronization.py | 1 | 20890 | """
A Digital Communications Synchronization
and PLLs Function Module
A collection of useful functions when studying PLLs
and synchronization and digital comm
Copyright (c) March 2017, Mark Wickert
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
import numpy as np
from logging import getLogger
log = getLogger(__name__)
import warnings
def NDA_symb_sync(z,Ns,L,BnTs,zeta=0.707,I_ord=3):
"""
zz,e_tau = NDA_symb_sync(z,Ns,L,BnTs,zeta=0.707,I_ord=3)
z = complex baseband input signal at nominally Ns samples
per symbol
Ns = Nominal number of samples per symbol (Ts/T) in the symbol
tracking loop, often 4
BnTs = time bandwidth product of loop bandwidth and the symbol period,
thus the loop bandwidth as a fraction of the symbol rate.
zeta = loop damping factor
I_ord = interpolator order, 1, 2, or 3
e_tau = the timing error e(k) input to the loop filter
Kp = The phase detector gain in the symbol tracking loop; for the
NDA algoithm used here always 1
Mark Wickert July 2014
Motivated by code found in M. Rice, Digital Communications A Discrete-Time
Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
"""
# Loop filter parameters
K0 = -1.0 # The modulo 1 counter counts down so a sign change in loop
Kp = 1.0
K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0
K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0
zz = np.zeros(len(z),dtype=np.complex128)
#zz = np.zeros(int(np.floor(len(z)/float(Ns))),dtype=np.complex128)
e_tau = np.zeros(len(z))
#e_tau = np.zeros(int(np.floor(len(z)/float(Ns))))
#z_TED_buff = np.zeros(Ns)
c1_buff = np.zeros(2*L+1)
vi = 0
CNT_next = 0
mu_next = 0
underflow = 0
epsilon = 0
mm = 1
z = np.hstack(([0], z))
for nn in range(1,Ns*int(np.floor(len(z)/float(Ns)-(Ns-1)))):
# Define variables used in linear interpolator control
CNT = CNT_next
mu = mu_next
if underflow == 1:
if I_ord == 1:
# Decimated interpolator output (piecewise linear)
z_interp = mu*z[nn] + (1 - mu)*z[nn-1]
elif I_ord == 2:
# Decimated interpolator output (piecewise parabolic)
# in Farrow form with alpha = 1/2
v2 = 1/2.*np.sum(z[nn+2:nn-1-1:-1]*[1, -1, -1, 1])
v1 = 1/2.*np.sum(z[nn+2:nn-1-1:-1]*[-1, 3, -1, -1])
v0 = z[nn]
z_interp = (mu*v2 + v1)*mu + v0
elif I_ord == 3:
# Decimated interpolator output (piecewise cubic)
# in Farrow form
v3 = np.sum(z[nn+2:nn-1-1:-1]*[1/6., -1/2., 1/2., -1/6.])
v2 = np.sum(z[nn+2:nn-1-1:-1]*[0, 1/2., -1, 1/2.])
v1 = np.sum(z[nn+2:nn-1-1:-1]*[-1/6., 1, -1/2., -1/3.])
v0 = z[nn]
z_interp = ((mu*v3 + v2)*mu + v1)*mu + v0
else:
log.error('I_ord must 1, 2, or 3')
# Form TED output that is smoothed using 2*L+1 samples
# We need Ns interpolants for this TED: 0:Ns-1
c1 = 0
for kk in range(Ns):
if I_ord == 1:
# piecewise linear interp over Ns samples for TED
z_TED_interp = mu*z[nn+kk] + (1 - mu)*z[nn-1+kk]
elif I_ord == 2:
# piecewise parabolic in Farrow form with alpha = 1/2
v2 = 1/2.*np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[1, -1, -1, 1])
v1 = 1/2.*np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[-1, 3, -1, -1])
v0 = z[nn+kk]
z_TED_interp = (mu*v2 + v1)*mu + v0
elif I_ord == 3:
# piecewise cubic in Farrow form
v3 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[1/6., -1/2., 1/2., -1/6.])
v2 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[0, 1/2., -1, 1/2.])
v1 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[-1/6., 1, -1/2., -1/3.])
v0 = z[nn+kk]
z_TED_interp = ((mu*v3 + v2)*mu + v1)*mu + v0
else:
log.error('Error: I_ord must 1, 2, or 3')
c1 = c1 + np.abs(z_TED_interp)**2 * np.exp(-1j*2*np.pi/Ns*kk)
c1 = c1/Ns
# Update 2*L+1 length buffer for TED output smoothing
c1_buff = np.hstack(([c1], c1_buff[:-1]))
# Form the smoothed TED output
epsilon = -1/(2*np.pi)*np.angle(np.sum(c1_buff)/(2*L+1))
# Save symbol spaced (decimated to symbol rate) interpolants in zz
zz[mm] = z_interp
e_tau[mm] = epsilon # log the error to the output vector e
mm += 1
else:
# Simple zezo-order hold interpolation between symbol samples
# we just coast using the old value
#epsilon = 0
pass
vp = K1*epsilon # proportional component of loop filter
vi = vi + K2*epsilon # integrator component of loop filter
v = vp + vi # loop filter output
W = 1/float(Ns) + v # counter control word
# update registers
CNT_next = CNT - W # Update counter value for next cycle
if CNT_next < 0: # Test to see if underflow has occured
CNT_next = 1 + CNT_next # Reduce counter value modulo-1 if underflow
underflow = 1 # Set the underflow flag
mu_next = CNT/W # update mu
else:
underflow = 0
mu_next = mu
# Remove zero samples at end
zz = zz[:-(len(zz)-mm+1)]
# Normalize so symbol values have a unity magnitude
zz /=np.std(zz)
e_tau = e_tau[:-(len(e_tau)-mm+1)]
return zz, e_tau
def DD_carrier_sync(z, M, BnTs, zeta=0.707, mod_type = 'MPSK', type = 0, open_loop = False):
"""
z_prime,a_hat,e_phi = DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0)
Decision directed carrier phase tracking
z = complex baseband PSK signal at one sample per symbol
M = The PSK modulation order, i.e., 2, 8, or 8.
BnTs = time bandwidth product of loop bandwidth and the symbol period,
thus the loop bandwidth as a fraction of the symbol rate.
zeta = loop damping factor
type = Phase error detector type: 0 <> ML, 1 <> heuristic
z_prime = phase rotation output (like soft symbol values)
a_hat = the hard decision symbol values landing at the constellation
values
e_phi = the phase error e(k) into the loop filter
Ns = Nominal number of samples per symbol (Ts/T) in the carrier
phase tracking loop, almost always 1
Kp = The phase detector gain in the carrier phase tracking loop;
This value depends upon the algorithm type. For the ML scheme
described at the end of notes Chapter 9, A = 1, K 1/sqrt(2),
so Kp = sqrt(2).
Mark Wickert July 2014
Updated for improved MPSK performance April 2020
Added experimental MQAM capability April 2020
Motivated by code found in M. Rice, Digital Communications A Discrete-Time
Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
"""
Ns = 1
z_prime = np.zeros_like(z)
a_hat = np.zeros_like(z)
e_phi = np.zeros(len(z))
theta_h = np.zeros(len(z))
theta_hat = 0
# Tracking loop constants
Kp = 1 # What is it for the different schemes and modes?
K0 = 1
K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0;
K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0;
# Initial condition
vi = 0
# Scaling for MQAM using signal power
# and known relationship for QAM.
if mod_type == 'MQAM':
z_scale = np.std(z) * np.sqrt(3/(2*(M-1)))
z = z/z_scale
for nn in range(len(z)):
# Multiply by the phase estimate exp(-j*theta_hat[n])
z_prime[nn] = z[nn]*np.exp(-1j*theta_hat)
if mod_type == 'MPSK':
if M == 2:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*0
elif M == 4:
a_hat[nn] = (np.sign(z_prime[nn].real) + \
1j*np.sign(z_prime[nn].imag))/sqrt(2)
elif M > 4:
# round to the nearest integer and fold to nonnegative
# integers; detection into M-levels with thresholds at mid points.
a_hat[nn] = np.mod((np.rint(np.angle(z_prime[nn])*M/2/np.pi)).astype(np.int),M)
a_hat[nn] = np.exp(1j*2*np.pi*a_hat[nn]/M)
else:
print('M must be 2, 4, 8, etc.')
elif mod_type == 'MQAM':
# Scale adaptively assuming var(x_hat) is proportional to
if M ==2 or M == 4 or M == 16 or M == 64 or M == 256:
x_m = np.sqrt(M)-1
if M == 2: x_m = 1
# Shift to quadrant one for hard decisions
a_hat_shift = (z_prime[nn] + x_m*(1+1j))/2
# Soft IQ symbol values are converted to hard symbol decisions
a_hat_shiftI = np.int16(np.clip(np.rint(a_hat_shift.real),0,x_m))
a_hat_shiftQ = np.int16(np.clip(np.rint(a_hat_shift.imag),0,x_m))
# Shift back to antipodal QAM
a_hat[nn] = 2*(a_hat_shiftI + 1j*a_hat_shiftQ) - x_m*(1+1j)
else:
print('M must be 2, 4, 16, 64, or 256');
if type == 0:
# Maximum likelihood (ML) Rice
e_phi[nn] = z_prime[nn].imag * a_hat[nn].real - \
z_prime[nn].real * a_hat[nn].imag
elif type == 1:
# Heuristic Rice
e_phi[nn] = np.angle(z_prime[nn]) - np.angle(a_hat[nn])
# Wrap the phase to [-pi,pi]
e_phi[nn] = np.angle(np.exp(1j*e_phi[nn]))
elif type == 2:
# Ouyang and Wang 2002 MQAM paper
e_phi[nn] = imag(z_prime[nn]/a_hat[nn])
else:
print('Type must be 0 or 1')
vp = K1*e_phi[nn] # proportional component of loop filter
vi = vi + K2*e_phi[nn] # integrator component of loop filter
v = vp + vi # loop filter output
theta_hat = np.mod(theta_hat + v,2*np.pi)
theta_h[nn] = theta_hat # phase track output array
if open_loop:
theta_hat = 0 # for open-loop testing
# Normalize MQAM outputs
if mod_type == 'MQAM':
z_prime *= z_scale
return z_prime, a_hat, e_phi, theta_h
def time_step(z, ns, t_step, n_step):
"""
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param ns: number of sample per symbol
:param t_step: in samples relative to Ns
:param n_step: symbol sample location where the step turns on
:return: the one sample per symbol signal containing the phase step
Mark Wickert July 2014
"""
z_step = np.hstack((z[:ns * n_step], z[(ns * n_step + t_step):], np.zeros(t_step)))
return z_step
def phase_step(z, ns, p_step, n_step):
"""
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param ns: number of sample per symbol
:param p_step: size in radians of the phase step
:param n_step: symbol sample location where the step turns on
:return: the one sample symbol signal containing the phase step
Mark Wickert July 2014
"""
nn = np.arange(0, len(z[::ns]))
theta = np.zeros(len(nn))
idx = np.where(nn >= n_step)
theta[idx] = p_step*np.ones(len(idx))
z_rot = z[::ns] * np.exp(1j * theta)
return z_rot
def PLL1(theta,fs,loop_type,Kv,fn,zeta,non_lin):
"""
Baseband Analog PLL Simulation Model
:param theta: input phase deviation in radians
:param fs: sampling rate in sample per second or Hz
:param loop_type: 1, first-order loop filter F(s)=K_LF; 2, integrator
with lead compensation F(s) = (1 + s tau2)/(s tau1),
i.e., a type II, or 3, lowpass with lead compensation
F(s) = (1 + s tau2)/(1 + s tau1)
:param Kv: VCO gain in Hz/v; note presently assume Kp = 1v/rad
and K_LF = 1; the user can easily change this
:param fn: Loop natural frequency (loops 2 & 3) or cutoff
frquency (loop 1)
:param zeta: Damping factor for loops 2 & 3
:param non_lin: 0, linear phase detector; 1, sinusoidal phase detector
:return: theta_hat = Output phase estimate of the input theta in radians,
ev = VCO control voltage,
phi = phase error = theta - theta_hat
Notes
-----
Alternate input in place of natural frequency, fn, in Hz is
the noise equivalent bandwidth Bn in Hz.
Mark Wickert, April 2007 for ECE 5625/4625
Modified February 2008 and July 2014 for ECE 5675/4675
Python version August 2014
"""
T = 1/float(fs)
Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v
if loop_type == 1:
# First-order loop parameters
# Note Bn = K/4 Hz but K has units of rad/s
#fn = 4*Bn/(2*pi);
K = 2*np.pi*fn # loop natural frequency in rad/s
elif loop_type == 2:
# Second-order loop parameters
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = 4 *np.pi*zeta*fn # loop natural frequency in rad/s
tau2 = zeta/(np.pi*fn)
elif loop_type == 3:
# Second-order loop parameters for one-pole lowpass with
# phase lead correction.
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = Kv # Essentially the VCO gain sets the single-sided
# hold-in range in Hz, as it is assumed that Kp = 1
# and KLF = 1.
tau1 = K/((2*np.pi*fn)**2)
tau2 = 2*zeta/(2*np.pi*fn)*(1 - 2*np.pi*fn/K*1/(2*zeta))
else:
warnings.warn('Loop type must be 1, 2, or 3')
# Initialize integration approximation filters
filt_in_last = 0; filt_out_last = 0;
vco_in_last = 0; vco_out = 0; vco_out_last = 0;
# Initialize working and final output vectors
n = np.arange(len(theta))
theta_hat = np.zeros_like(theta)
ev = np.zeros_like(theta)
phi = np.zeros_like(theta)
# Begin the simulation loop
for k in range(len(n)):
phi[k] = theta[k] - vco_out
if non_lin == 1:
# sinusoidal phase detector
pd_out = np.sin(phi[k])
else:
# Linear phase detector
pd_out = phi[k]
# Loop gain
gain_out = K/Kv*pd_out # apply VCO gain at VCO
# Loop filter
if loop_type == 2:
filt_in = (1/tau2)*gain_out
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
filt_out = filt_out + gain_out
elif loop_type == 3:
filt_in = (tau2/tau1)*gain_out - (1/tau1)*filt_out_last
u3 = filt_in + (1/tau2)*filt_out_last
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
else:
filt_out = gain_out;
# VCO
vco_in = filt_out
if loop_type == 3:
vco_in = u3
vco_out = vco_out_last + T/2*(vco_in + vco_in_last)
vco_in_last = vco_in
vco_out_last = vco_out
vco_out = Kv*vco_out # apply Kv
# Measured loop signals
ev[k] = vco_in
theta_hat[k] = vco_out
return theta_hat, ev, phi
def PLL_cbb(x,fs,loop_type,Kv,fn,zeta):
"""
Baseband Analog PLL Simulation Model
:param x: input phase deviation in radians
:param fs: sampling rate in sample per second or Hz
:param loop_type: 1, first-order loop filter F(s)=K_LF; 2, integrator
with lead compensation F(s) = (1 + s tau2)/(s tau1),
i.e., a type II, or 3, lowpass with lead compensation
F(s) = (1 + s tau2)/(1 + s tau1)
:param Kv: VCO gain in Hz/v; note presently assume Kp = 1v/rad
and K_LF = 1; the user can easily change this
:param fn: Loop natural frequency (loops 2 & 3) or cutoff
frequency (loop 1)
:param zeta: Damping factor for loops 2 & 3
:return: theta_hat = Output phase estimate of the input theta in radians,
ev = VCO control voltage,
phi = phase error = theta - theta_hat
Mark Wickert, April 2007 for ECE 5625/4625
Modified February 2008 and July 2014 for ECE 5675/4675
Python version August 2014
"""
T = 1/float(fs)
Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v
if loop_type == 1:
# First-order loop parameters
# Note Bn = K/4 Hz but K has units of rad/s
#fn = 4*Bn/(2*pi);
K = 2*np.pi*fn # loop natural frequency in rad/s
elif loop_type == 2:
# Second-order loop parameters
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = 4 *np.pi*zeta*fn # loop natural frequency in rad/s
tau2 = zeta/(np.pi*fn)
elif loop_type == 3:
# Second-order loop parameters for one-pole lowpass with
# phase lead correction.
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = Kv # Essentially the VCO gain sets the single-sided
# hold-in range in Hz, as it is assumed that Kp = 1
# and KLF = 1.
tau1 = K/((2*np.pi*fn)^2);
tau2 = 2*zeta/(2*np.pi*fn)*(1 - 2*np.pi*fn/K*1/(2*zeta))
else:
warnings.warn('Loop type must be 1, 2, or 3')
# Initialize integration approximation filters
filt_in_last = 0; filt_out_last = 0;
vco_in_last = 0; vco_out = 0; vco_out_last = 0;
vco_out_cbb = 0
# Initialize working and final output vectors
n = np.arange(len(x))
theta_hat = np.zeros(len(x))
ev = np.zeros(len(x))
phi = np.zeros(len(x))
# Begin the simulation loop
for k in range(len(n)):
#phi[k] = theta[k] - vco_out
phi[k] = np.imag(x[k] * np.conj(vco_out_cbb))
pd_out = phi[k]
# Loop gain
gain_out = K/Kv*pd_out # apply VCO gain at VCO
# Loop filter
if loop_type == 2:
filt_in = (1/tau2)*gain_out
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
filt_out = filt_out + gain_out
elif loop_type == 3:
filt_in = (tau2/tau1)*gain_out - (1/tau1)*filt_out_last
u3 = filt_in + (1/tau2)*filt_out_last
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
else:
filt_out = gain_out;
# VCO
vco_in = filt_out
if loop_type == 3:
vco_in = u3
vco_out = vco_out_last + T/2*(vco_in + vco_in_last)
vco_in_last = vco_in
vco_out_last = vco_out
vco_out = Kv*vco_out # apply Kv
vco_out_cbb = np.exp(1j*vco_out)
# Measured loop signals
ev[k] = vco_in
theta_hat[k] = vco_out
return theta_hat, ev, phi
| bsd-2-clause | 5,287,978,719,403,717,000 | 39.642023 | 95 | 0.560843 | false | 3.210882 | false | false | false |
cphyc/MHD_simulation | python/simul.py | 1 | 9256 | #!/usr/env python3
try:
import numpypy as np
except:
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
#import ipdb
## Tri Diagonal Matrix Algorithm(a.k.a Thomas algorithm) solver
def TDMAsolver(a, b, c, d):
'''
TDMA solver, a b c d can be NumPy array type or Python list type.
refer to http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
'''
nf = len(a) # number of equations
ac, bc, cc, dc = map(np.array, (a, b, c, d)) # copy the array
for it in xrange(1, nf):
mc = ac[it]/bc[it-1]
bc[it] = bc[it] - mc*cc[it-1]
dc[it] = dc[it] - mc*dc[it-1]
xc = ac
xc[-1] = dc[-1]/bc[-1]
for il in xrange(nf-2, -1, -1):
xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]
del bc, cc, dc # delete variables from memory
return xc
class Vector(object):
def __init__(self, parent):
# save the pointer to the parent (dynamical)
self.p = parent
# initial G = 0, G[k,n]
self.G = np.zeros((self.p.Nz, self.p.NFourier), dtype="float64")
# access via G[k][n]
def step(self):
# save the old G
self.G_old = self.G.copy()
# compute the new one
self.compute_G()
# new += dt/2*(3G-G_old)
self.field[1:-1] = (self.field[1:-1]
+ self.p.dt/2*(3*self.G[1:-1] - self.G_old[1:-1])
)
# conditions at top and bottom : null
self.field[0 ,:] = 0
self.field[-1,:] = 0
def compute_G(self):
raise Exception("Vector class is a base class, not supposed to be "+
"used like that")
def initial(self, init_cond):
if init_cond == 'null':
self.field = np.zeros((self.p.Nz, self.p.NFourier))
elif init_cond == "T":
self.field = np.array([[T_0(n,k,self.p) for n in range(self.p.NFourier)]
for k in range(self.p.Nz)])
else:
raise Exception("init_cond must be either `null` or `T`")
class Temp(Vector):
name = "T"
def compute_G(self):
# compute G except for k = 0, Nz-1 and n = 0
for n in range(1, self.p.NFourier):
self.G[1:-1,n] = ((self.field[:-2,n]-2*self.field[1:-1,n]+self.field[2:,n])
* self.p.oodz2
- (n*self.p.pi/self.p.a)**2
* self.field[1:-1,n] )
class Vort(Vector):
name = "ω"
def __init__(self, parent):
super().__init__(parent)
self.compute_wk()
def compute_wk(self):
# init. the arrays:
self.wk1 = np.zeros((self.p.Nz, self.p.NFourier))
self.wk2 = np.zeros((self.p.Nz, self.p.NFourier))
self.sub = np.zeros((self.p.Nz, self.p.NFourier))
for n in range(1,self.p.NFourier):
# save some usefull functions
sub_f = lambda k : -self.p.oodz2 if k<self.p.Nz-1 else 1
dia = lambda k : (n*self.p.pi/self.p.a)**2 + 2*self.p.oodz2 if 0<k<self.p.Nz-1 else 1
sup = lambda k : -self.p.oodz2 if k>0 else 1
# tridiag. solver
self.wk1[0,n] = 1/dia(0)
self.wk2[0,n] = sup(0) * self.wk1[0,n]
for k in range(1, self.p.Nz-1):
self.wk1[k,n] = 1 /(dia(k)-sub_f(k)*self.wk2[k-1,n])
self.wk2[k,n] = sup(k)*self.wk1[k,n]
self.wk1[-1,n] = 1/(dia(self.p.Nz-1)-sub_f(self.p.Nz-1)*self.wk2[-2,n])
self.sub[:,n] = [sub_f(k) for k in range(self.p.Nz)]
def step(self):
rhs = self.p.psi.field.copy()
# boundary conditions k=0, Nz-1 : psi = 0
rhs[0, :] = 0
rhs[-1,:] = 0
for n in range(1,self.p.NFourier):
# tridiag. solver
self.field[0,n] = rhs[0,n]*self.wk1[0,n]
for k in range(1, self.p.Nz):
self.field[k,n] = (rhs[k,n] - self.sub[k,n]*self.field[k-1,n]*self.wk1[k,n])
for k in range(self.p.Nz-2, 0, -1):
self.field[k,n] = self.field[k,n]-self.wk2[k,n]*self.field[k+1,n]
class Stream(Vector):
name = "ψ"
def compute_G(self):
# compute G except for k=0, Nz-1 and n=0
for n in range(1, self.p.NFourier):
a = self.p.Ra*n*self.p.pi/self.p.a*self.p.T.field[1:-1,n]
b = (self.field[:-2,n] - 2*self.field[1:-1,n] + self.field[2:,n])*self.p.oodz2
c = (n*self.p.pi/self.p.a)**2*self.field[1:-1,n]
self.G[1:-1,n] = self.p.Pr*( a + b - c)
class Simulation(object):
param_list = {'Re': 1, 'Pr': 1, 'Ra': 1, 'a' : 1, 'Nz': 100,
'NFourier': 50, 'dt_security': 0.9,
'maxiter': 100, 'freq_output': 10,
'freq_critical_Ra':50, 'verbose': False}
def __init__(self, *args, **kargs):
# save the default parameters
for param, value in self.param_list.items():
setattr(self, param, value)
# override if necessary
for param, value in kargs.items():
if param not in self.param_list:
raise Exception("`%s' not recognized" % param)
else:
setattr(self, param, value)
# set the initial values
self.t = 0
self.niter = 0
self.dz = 1/(self.Nz-1)
# some usefull quantities
self.oodz2 = 1/self.dz**2
self.pi = np.pi
# create the inner fields
self.T = Temp(self)
self.omega = Vort(self)
self.psi = Stream(self)
# previous fields for critical Ra number
self.T_old = np.zeros((self.NFourier,))
self.omega_old = np.zeros((self.NFourier,))
self.psi_old = np.zeros((self.NFourier,))
def __del__(self):
pass
def growth(self):
''' Calculate the log-growth rate and return a string containing
all the growth rate'''
amp = lambda v: np.log(abs(v)) if v != 0 else 0
gr = lambda new,old,n: str(amp(new.field[self.Nz//3,n])
- amp(abs(old[n])))
out = "".join([ gr(self.T, self.T_old,n) + "\t" +
gr(self.omega, self.omega_old,n) + "\t" +
gr(self.psi, self.psi_old,n) + "\t"
for n in range(self.NFourier) ])
# save the arrays for next output
self.T_old = self.T.field[self.Nz//3,:].copy()
self.omega_old = self.omega.field[self.Nz//3,:].copy()
self.psi_old = self.psi.field[self.Nz//3,:].copy()
return out+"\n"
def step(self):
# eventually output
if self.verbose and self.niter % self.freq_output == 0:
self.dump()
# eventually calculate the d-ln term for the critical Ra
if self.verbose and self.niter % self.freq_critical_Ra == 0 :
output = "# growth : \t"
output+= "".join([
"{T.name}_{n}\t{w.name}_{n}\t{psi.name}_{n}\t".format(T=self.T,
w=self.omega,
psi=self.psi,
n=n)
for n in range(self.NFourier)])
output+= "\n"
output+= "# growth : \t"
output+= self.growth()
print(output)
# get the max timestep
self.CFL()
# increase the time, the iteration
self.t += self.dt
self.niter += 1
# check that the end is not reached
if self.niter > self.maxiter:
return False
else:
return True
def dump(self):
output = "#k\t"
for n in range(self.NFourier):
o = "{T}_{n}\t{w}_{n}\t{psi}_{n}\t".format(T=self.T.name,
w=self.omega.name,
psi=self.psi.name,
n=n)
output += o
output += "\n"
for k in range(self.Nz):
output += str(k) + "\t"
for n in range(self.NFourier):
l = "{T}\t{w}\t{psi}\t".format(T=self.T.field[k,n],
w=self.omega.field[k,n],
psi=self.psi.field[k,n])
output += l
output += "\n"
print(output)
def CFL(self):
# dt < (dz)^2/4 or (dz)^2/(4Pr) if Pr > 1
self.dt = self.dt_security * self.dz**2/(4*max(1,self.Pr))
def T_0 (n,k,s):
if n > 0:
return np.sin(s.pi*k*s.dz)
else:
return 1-k*s.dz
if __name__ == '__main__':
# create a new simulation
s = Simulation(Re=5)
# initial conditions psi(0) = 0, Omega(0) = 0
s.psi.initial("null")
s.omega.initial("null")
# T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz
s.T.initial(lambda n, k: T_0(n,k,s))
# main loop over time
while s.step():
s.T.step()
s.psi.step()
s.omega.step()
del s
| apache-2.0 | -7,288,239,873,645,479,000 | 32.773723 | 97 | 0.469635 | false | 3.173525 | false | false | false |
CosmicFish/CosmicFish | python/cosmicfish_pylib/fisher_operations.py | 1 | 13671 | #----------------------------------------------------------------------------------------
#
# This file is part of CosmicFish.
#
# Copyright (C) 2015-2017 by the CosmicFish authors
#
# The CosmicFish code is free software;
# You can use it, redistribute it, and/or modify it under the terms
# of the GNU General Public License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any later version.
# The full text of the license can be found in the file LICENSE at
# the top level of the CosmicFish distribution.
#
#----------------------------------------------------------------------------------------
"""
.. module:: fisher_operations
:platform: Unix
:synopsis: Module that contains operations that can be performed on Fisher matrices.
All of them are safeguarded against non-Fisher input.
.. moduleauthor:: Marco Raveri <mraveri@uchicago.edu> for the CosmicFish code.
"""
# ***************************************************************************************
import numpy as np
from . import fisher_matrix as fm
import math
# ***************************************************************************************
def eliminate_columns_rows( fisher_matrix, indexes ):
"""
This function eliminates the row and columns corresponding to the given indexes
from the Fisher matrix. It also deletes all the other informations like the names
of the parameters. Notice that the index corresponding to the first parameter
is zero.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param indexes: list of integers with the indexes to delete from the Fisher matrix
:type indexes: :class:`list` of :class:`int`
:returns: A Fisher matrix with the columns and rows deleted
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# write the param names:
new_param_names = []
new_param_names_latex = []
new_param_fiducial = []
for i in range( fisher_matrix.num_params ):
if i not in indexes:
new_param_names.append( fisher_matrix.param_names[i] )
new_param_names_latex.append( fisher_matrix.param_names_latex[i] )
new_param_fiducial.append( fisher_matrix.param_fiducial[i] )
# write the Fisher matrix:
fisher_temp = np.delete ( np.delete( fisher_matrix.fisher_matrix, indexes , 0 ), indexes , 1 )
# initialize the new Fisher matrix:
fisher_new = fm.fisher_matrix(fisher_matrix=fisher_temp, param_names=new_param_names, param_names_latex=new_param_names_latex, fiducial=new_param_fiducial )
fisher_new.name = fisher_matrix.name + '_reduced'
fisher_new.path = fisher_matrix.path
fisher_new.indir = fisher_matrix.indir
return fisher_new
# ***************************************************************************************
def eliminate_parameters( fisher_matrix, names ):
"""
This function eliminates the row and columns corresponding to the given parameter
name from the Fisher matrix. It also deletes all the other informations like the names
of the parameters.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param names: list of names of the parameters to delete from the Fisher matrix
:type names: :class:`list` of :class:`string`
:returns: A Fisher matrix with the parameters deleted
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# get the indexes of the parameters:
index_list = []
for i in names:
if i not in fisher_matrix.param_names_dict:
raise ValueError('Error, parameter '+str(i)+' is not in a parameter of fisher_matrix')
index_list.append(fisher_matrix.param_names_dict[i]-1)
# elminate them from the list and return:
return eliminate_columns_rows( fisher_matrix, index_list )
# ***************************************************************************************
def reshuffle( fisher_matrix, names ):
"""
This function reshuffles a Fisher matrix. The new Fisher matrix will have the
parameters specified in names, in the order specified by names.
Can be used to delete parameters, change their order or extract the Fisher
for some parameters without marginalizing over the others.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param names: list of names of the parameters that are desired in the output Fisher
matrix, in the desired order.
:type names: :class:`list` of :class:`string`
:returns: A Fisher matrix with the new parameters
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# check wether the names required are inside the Fisher matrix:
for i in names:
if i not in fisher_matrix.param_names_dict:
raise ValueError('Error, parameter '+str(i)+' is not in a parameter of fisher_matrix')
# get the new latex names and fiducial:
new_param_names_latex = []
new_param_fiducial = []
for i in names:
ind = fisher_matrix.param_names_dict[i] -1
new_param_names_latex.append(fisher_matrix.param_names_latex[ind])
new_param_fiducial.append(fisher_matrix.param_fiducial[ind])
# initialize an empty matrix:
num_param_new = len(names)
new_matrix = np.zeros([num_param_new,num_param_new])
# fill the new matrix:
for i in range(num_param_new):
for j in range(num_param_new):
# get the name:
x = names[i]
y = names[j]
# get the parameter name:
x1 = fisher_matrix.param_names_dict[x]-1
y1 = fisher_matrix.param_names_dict[y]-1
# get the entrance of the new matrix:
new_matrix[i,j] = fisher_matrix.fisher_matrix[x1,y1]
# create the new Fisher matrix:
fisher_new = fm.fisher_matrix(fisher_matrix=new_matrix, param_names=names, param_names_latex=new_param_names_latex, fiducial=new_param_fiducial)
fisher_new.name = fisher_matrix.name + '_reshuffled'
fisher_new.path = fisher_matrix.path
fisher_new.indir = fisher_matrix.indir
return fisher_new
# ***************************************************************************************
def marginalise( fisher_matrix, names ):
"""
This function marginalises a Fisher matrix over all parameters but the ones in names.
The new Fisher matrix will have the parameters specified in names, in the order specified by names.
The calculation is performed in the numerically stable way.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param names: list of names of the parameters of the output Fisher matrix,
in the order that will appear in the output Fisher matrix. All other parameters
will be marginalized over.
:type names: :class:`list` of :class:`string`
:returns: A Fisher matrix with the marginalized parameters
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# check wether the names required are inside the Fisher matrix:
for i in names:
if i not in fisher_matrix.param_names_dict:
raise ValueError('Error, parameter '+str(i)+' is not in a parameter of fisher_matrix')
# get the new latex names and fiducial:
new_param_names_latex = []
new_param_fiducial = []
for i in names:
ind = fisher_matrix.param_names_dict[i] -1
new_param_names_latex.append(fisher_matrix.param_names_latex[ind])
new_param_fiducial.append(fisher_matrix.param_fiducial[ind])
# initialize an empty matrix:
num_param_new = len(names)
new_matrix = np.zeros([num_param_new,num_param_new])
# fill the new inverse matrix:
for i in range(num_param_new):
for j in range(num_param_new):
# get the name:
x = names[i]
y = names[j]
# get the parameter name:
x1 = fisher_matrix.param_names_dict[x]-1
y1 = fisher_matrix.param_names_dict[y]-1
# get the entrance of the new matrix:
new_matrix[i,j] = fisher_matrix.get_fisher_inverse()[x1,y1]
fisher_temp = np.linalg.inv( new_matrix )
# create the new Fisher matrix:
fisher_new = fm.fisher_matrix(fisher_matrix=fisher_temp, param_names=names, param_names_latex=new_param_names_latex, fiducial=new_param_fiducial)
fisher_new.name = fisher_matrix.name + '_marginal'
fisher_new.path = fisher_matrix.path
fisher_new.indir = fisher_matrix.indir
return fisher_new
# ***************************************************************************************
def marginalise_over( fisher_matrix, names ):
"""
This function marginalises a Fisher matrix over the parameters in names.
The new Fisher matrix will not have the parameters specified in names.
The calculation is performed in the numerically stable way.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param names: list of names of the parameters over which the Fisher will be marginalised.
:type names: :class:`list` of :class:`string`
:returns: A Fisher matrix with the names parameters marginalized.
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# check wether the names required are inside the Fisher matrix:
for i in names:
if i not in fisher_matrix.param_names_dict:
raise ValueError('Error, parameter '+str(i)+' is not in a parameter of fisher_matrix')
# get the indexes:
new_names = [ i for i in fisher_matrix.param_names if i not in names ]
return marginalise( fisher_matrix, new_names )
# ***************************************************************************************
def information_gain( fisher_1, fisher_2, fisher_prior, units=math.log(2.0), stat=True ):
"""
This function computes the Fisher approximation of Kullback-Leibler information gain.
For the details of the formula we refer to the CosmicFish notes.
:param fisher_1: first input Fisher matrix
:type fisher_1: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param fisher_2: second input Fisher matrix
:type fisher_2: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param fisher_prior: input Fisher matrix with the prior information.
:type fisher_prior: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param units: Units of information gain. Optional by default in Bits.
:type units: :class:`float`
:param stat: wether to output the expected value and variance
:type stat: :class:`logical`
:returns: a :class:`float` with the information gain.
:rtype: :class:`float`
"""
info_gain = 0.0
# first computations:
F1p = fisher_1 + fisher_prior
F2p = fisher_2 + fisher_prior
# get common parameter names:
param_names = [ name for name in F1p.get_param_names() if name in F2p.get_param_names() ]
# reshuffle the second matrix:
F1p = reshuffle( F1p, param_names )
F2p = reshuffle( F2p, param_names )
# define a dummy Fisher matrix with empty entrances and with the same parameters as the others:
fisher_temp = fm.fisher_matrix( fisher_matrix=0.0*F2p.get_fisher_matrix(),
param_names=F2p.get_param_names(),
param_names_latex=F2p.get_param_names_latex(),
fiducial=F2p.get_param_fiducial() )
fisher_temp = fisher_2 + fisher_temp
# the first term:
info_gain = info_gain -math.log( F1p.determinant()/F2p.determinant() )
info_gain = info_gain -F1p.get_fisher_matrix().shape[0]
# the second trace term:
info_gain = info_gain + np.trace( np.dot( F2p.get_fisher_inverse() , F1p.get_fisher_matrix() ) )
# add additional term if statistical average over data is wanted
if stat:
# we break down the third term into two pieces:
temp = np.dot( np.dot( np.dot( fisher_temp.get_fisher_matrix(), F2p.get_fisher_inverse() ),F1p.get_fisher_matrix() ), F2p.get_fisher_inverse() )
temp = temp + np.dot( np.dot( temp,fisher_temp.get_fisher_matrix() ), F1p.get_fisher_inverse() )
info_gain = info_gain + np.trace( temp )
# compute variance:
temp = np.dot( temp, temp )
info_variance = np.trace( temp )
# output
info_gain = info_gain/2.0/units
return info_gain
# ***************************************************************************************
| gpl-3.0 | 1,292,096,842,457,407,700 | 46.141379 | 160 | 0.633823 | false | 3.520731 | false | false | false |
Craig-Macomber/Panda3D-Terrain-System | renderer/geoClipMapper.py | 1 | 13967 | import math
from renderer import RenderNode
from terrain.bakery.gpuBakery import tileMapSize
from terrain.bakery.bakery import loadTex
from panda3d.core import *
#from terrain.textureRenderer import *
class GeoClipMapper(RenderNode):
def __init__(self,path,tileSource,minScale,focus):
RenderNode.__init__(self,path,NodePath(path+"_terrainNode"),heightScale=300.0)
heightMapName=self.specialMaps['height']
self.heightMapRez=0
for s in tileSource.shaders:
if s.name==heightMapName:
self.heightMapRez=s.getRez(tileMapSize)
break
if self.heightMapRez==0: print 'Failed to determain height map resolution'
self.setShaderInput("heightMapRez",self.heightMapRez,0,0,0)
self.focus=focus
self.minScale=minScale
self.tileSource=tileSource
self.heightStage=TextureStage("height")
rezFactor=50
n=rezFactor*4-1
if n+4>=self.heightMapRez:
print 'Error: Can not have geoClipMap rez higher than height map rez'
self.rez=n
m=(n+1)/4
self.baseTileScale=minScale/n*self.heightMapRez
scale=minScale/(n-1)
self.terrainNode.setScale(scale,scale,scale)
self.shaderHeightScale=self.heightScale/scale
self.terrainNode.setShaderInput("heightScale",self.shaderHeightScale,0,0)
self.terrainNode.setShader(loader.loadShader("terrain/geoClip.sha"))
def makeGrid(xSize,ySize):
""" Size is in verts, not squares """
format=GeomVertexFormat.getV3()
vdata=GeomVertexData('grid', format, Geom.UHStatic)
vertex=GeomVertexWriter(vdata, 'vertex')
grid=Geom(vdata)
#snode=GeomNode('grid')
for x in xrange(xSize):
for y in xrange(ySize):
vertex.addData3f(x,y,0)
tri=GeomTristrips(Geom.UHStatic)
def index(lx,ly):
return ly+lx*(ySize)
for x in xrange(xSize-1):
for y in xrange(ySize):
tri.addVertex(index(x,y))
tri.addVertex(index(x+1,y))
tri.closePrimitive()
grid.addPrimitive(tri)
grid.setBoundsType(BoundingVolume.BTBox)
grid.setBounds(BoundingBox(Point3(0,0,0),Point3(xSize-1,ySize-1,self.shaderHeightScale)))
#snode.addGeom(grid)
#snode.setBoundsType(BoundingVolume.BTBox)
#snode.setBounds(BoundingBox(Point3(0,0,0),Point3(xSize-1,ySize-1,self.shaderHeightScale)))
#snode.setFinal(True)
return grid
nxn=makeGrid(n,n)
mxm=makeGrid(m,m)
mx3=makeGrid(m,3)
x3xm=makeGrid(3,m)
m2x2=makeGrid(2*m+1,2)
cNode=GeomNode('center')
cGeom=nxn.makeCopy()
cGeom.transformVertices(Mat4.translateMat(-n/2,-n/2,0))
cNode.addGeom(cGeom)
cGeom.setBoundsType(BoundingVolume.BTBox)
cGeom.setBounds(BoundingBox(Point3(-n/2,-n/2,0),Point3(n/2-1,n/2-1,self.shaderHeightScale)))
cNode.setBoundsType(BoundingVolume.BTBox)
center=_GeoClipLevel(0,self,cNode)
#NodePath(nxn).instanceTo(center).setPos(-n/2,-n/2,0)
center.reparentTo(self.terrainNode)
halfOffset=n/2
#ring=NodePath("Ring")
ring=GeomNode('ring')
def doCorner(x,y):
xd=x*n/2-(x+1)*m/2
yd=y*n/2-(y+1)*m/2
def doGeom(g,x,y):
cGeom=(g).makeCopy()
cGeom.transformVertices(Mat4.translateMat(x,y,0))
cGeom.setBoundsType(BoundingVolume.BTBox)
b=g.getBounds()
p=b.getPoint(7)
cGeom.setBounds(BoundingBox(Point3(x,y,0),Point3(p.getX()+x,p.getY()+y,self.shaderHeightScale)))
ring.addGeom(cGeom)
doGeom(mxm,xd,yd)
doGeom(mxm,xd,yd-y*(m-1))
doGeom(mxm,xd-x*(m-1),yd)
#NodePath(mxm).copyTo(ring).setPos(xd,yd,0)
#NodePath(mxm).copyTo(ring).setPos(xd,yd-y*(m-1),0)
#NodePath(mxm).copyTo(ring).setPos(xd-x*(m-1),yd,0)
if x==-1:
if y==1:
doGeom(mx3,xd,yd-y*(m+1))
#NodePath(mx3).copyTo(ring).setPos(xd,yd-y*(m+1),0)
else:
xd2=n/2-m
doGeom(mx3,xd2,yd+2*m-2)
#NodePath(mx3).copyTo(ring).setPos(xd2,yd+2*m-2,0)
else:
doGeom(x3xm,xd-x*(m+1),yd)
#NodePath(x3xm).copyTo(ring).setPos(xd-x*(m+1),yd,0)
doCorner(-1,-1)
doCorner(1,-1)
doCorner(-1,1)
doCorner(1,1)
ring.setBoundsType(BoundingVolume.BTBox)
ringCount=4
self.levels=[center]
for i in xrange(ringCount):
cNode=GeomNode('ring'+str(i))
cNode.addGeomsFrom(ring)
'''for c in ring.getChildren():
x=c.copyTo(r)
#v1=Point3()
#v2=Point3()
#x.calcTightBounds(v1,v2)
#v2.setZ(1)
node=x.node()
node.setBoundsType(BoundingVolume.BTBox)
node.setBounds(c.node().getBounds())#(BoundingBox(v1,v2))
node.setFinal(1)
x.showBounds()'''
#r.showBounds()
r=_GeoClipLevel(i+1,self,cNode)
r.reparentTo(self.terrainNode)
r.node().setBoundsType(BoundingVolume.BTBox)
#r.showBounds()
self.levels.append(r)
self.terrainNode.setShaderInput("n",n,0,0,0)
# Add a task to keep updating the terrain
taskMgr.add(self.update, "update")
self.grass=self.setUpGrass(center,n)
grassTex = loadTex("terrain/grassSheet",True)
self.grass.setShaderInput("grassSheet",grassTex)
grassTex.setWrapU(Texture.WMClamp)
grassTex.setWrapV(Texture.WMClamp)
self.terrainNode.setShaderInput("offset",0,0,0,0)
#for r in self.levels:
# for node in r.getChildren():
# node.setShaderInput("offset",node.getX()+halfOffset,node.getY()+halfOffset,0,0)
self.centerTile=None
def setUpGrass(self,node,rez):
# create a mesh thats a bunch of disconnected rectangles, 1 tall, 0.5 wide, at every grid point
format=GeomVertexFormat.getV3()
snode=GeomNode('grass')
grass=NodePath(snode)
grass.reparentTo(node)
grass.setAttrib(CullFaceAttrib.make(CullFaceAttrib.MCullNone))
grass.setShader(loader.loadShader("terrain/geoClipGrass.sha"))
cullmargin=3
def makeGrid(ofx,ofy,xStart,yStart,xEnd,yEnd):
# does not include end values, but does include start ones
vdata=GeomVertexData('grid', format, Geom.UHStatic)
vertex=GeomVertexWriter(vdata, 'vertex')
grid=Geom(vdata)
snode.setBoundsType(BoundingVolume.BTBox)
for x in xrange(xStart,xEnd):
for y in xrange(yStart,yEnd):
xp=x-ofx-.25-1
yp=y-ofy-1
vertex.addData3f(xp,yp,0)
vertex.addData3f(xp+.5,yp,0)
vertex.addData3f(xp,yp,1)
vertex.addData3f(xp+.5,yp,1)
tri=GeomTristrips(Geom.UHStatic)
def index(lx,ly):
return ((ly-yStart)+(lx-xStart)*(yEnd-yStart))*4
for x in xrange(xStart,xEnd):
for y in xrange(yStart,yEnd):
i=index(x,y)
tri.addVertex(i)
tri.addVertex(i+1)
tri.addVertex(i+2)
tri.addVertex(i+3)
tri.closePrimitive()
grid.addPrimitive(tri)
snode.addGeom(grid)
#block=NodePath(snode)
#block.reparentTo(grass)
grid.setBoundsType(BoundingVolume.BTBox)
grid.setBounds(BoundingBox(Point3(xStart-cullmargin-ofx,yStart-cullmargin-ofy,0),Point3(xEnd-1+cullmargin-ofx,yEnd-1+cullmargin-ofy,self.shaderHeightScale+cullmargin)))
#block.node().setFinal(True)
#
#grass.showBounds()
#makeGrid(rez/2,rez/2,0,0,rez,rez)
c=5
for x in xrange(c):
for y in xrange(c):
makeGrid(rez/2,rez/2,x*rez//c,y*rez//c,(x+1)*rez//c,(y+1)*rez//c)
grass.node().setBoundsType(BoundingVolume.BTBox)
#grass.showBounds()
return grass
def height(self,x,y):
if self.centerTile is None: return 0
#print 'y'
tile=self.centerTile
peeker=self.heightPeeker
tx=(x-tile.x)/tile.scale
ty=(y-tile.y)/tile.scale
c=Vec4()
sx=peeker.getXSize()
sy=peeker.getYSize()
px=(sx*tx)
py=(sy*ty)
#u=math.floor(px)/sx
#v=math.floor(py)/sy
fu=px-math.floor(px)
fv=py-math.floor(py)
#u2=math.floor(px+1)/sx
#v2=math.floor(py)/sy
px=math.floor(px)
py=math.floor(py)
#peeker.lookup(c,u,v)
def getH(x,y):
peeker.lookup(c,x/sx,y/sy)
return c.getX()+c.getY()/256+c.getZ()/(256*256)
h=(getH(px+1,py+1)*fu+getH(px,py+1)*(1-fu))*fv+(getH(px+1,py)*fu+getH(px,py)*(1-fu))*(1-fv)
#peeker.filterRect(c,px/sx,py/sy,px/sx,py/sy)
#h=c.getX()+c.getY()/256+c.getZ()/(256*256)
return h*self.heightScale
def update(self,task):
center=self.levels[0]
if center.lastTile:
maps=center.lastTile.renderMaps
t=maps[self.specialMaps['height']].tex
if self.centerTile is not center.lastTile: # new height tex!
self.heightPeeker=t.peek()
self.centerTile=center.lastTile
for i in xrange(len(self.levels),0,-1):
self.levels[i-1].update(self.levels[i] if i<len(self.levels) else None)
return task.cont
#def height(self,x,y): return 0
class _GeoClipLevel(NodePath):
def __init__(self,level,geoClipMapper,node=None):
"""
level starts at 0 in center
scale is 2**level
"""
if node:
NodePath.__init__(self,node)
else:
NodePath.__init__(self,"GeoClipLevel_"+str(level))
self.level=level
self.geoClipMapper=geoClipMapper
self.heightTex=Texture()#loadTex("renderData/textures/grass") # some texture as place holder before map is made.
self.setShaderInput("height",self.heightTex)
scale=2**(level)
self.setScale(scale,scale,1)
self.lastTile=None
self.tileScale=geoClipMapper.baseTileScale*scale
self.makingTile=False
self.setShaderInput("tileOffset",0,0,0,0)
self.setShaderInput("tilePos",0,0,0,0)
def update(self,bigger):
""" bigger is next larger _GeoClipLevel, or None is self is biggest """
# Place me!
s=int(self.getScale().getX())*2
fx=self.geoClipMapper.focus.getX(self.geoClipMapper.terrainNode)
fy=self.geoClipMapper.focus.getY(self.geoClipMapper.terrainNode)
x=int(fx)/s+1
y=int(fy)/s+1
self.setPos(x*s,y*s,0)
# Tex Offset
#node.setShaderInput("texOffset",node.getX()+halfOffset,node.getY()+halfOffset,0,0)
if self.lastTile is not None:
# get dist from center of self.lastTile to focuse
tx=(self.lastTile.x+self.tileScale/2.0)/self.geoClipMapper.terrainNode.getSx()
ty=(self.lastTile.y+self.tileScale/2.0)/self.geoClipMapper.terrainNode.getSy()
dx=self.getX()-tx
dy=self.getY()-ty
# convert dx and dy to current level scale
dx/=self.getSx()
dy/=self.getSy()
# get margin in px between current tile edge and level edge
s=self.geoClipMapper.heightMapRez
mx=s/2-abs(dx)-self.geoClipMapper.rez/2
my=s/2-abs(dy)-self.geoClipMapper.rez/2
ox=dx+s/2
oy=dy+s/2
self.setShaderInput("tileOffset",ox,oy,0,0)
self.setShaderInput("tilePos",self.lastTile.x,self.lastTile.y,self.lastTile.scale,0)
self.setShaderInput("grassData",self.lastTile.renderMaps[self.geoClipMapper.specialMaps['grassData']].tex)
self.setShaderInput("grassData2",self.lastTile.renderMaps[self.geoClipMapper.specialMaps['grassData2']].tex)
m=min(mx,my)
if (not self.makingTile) and (self.lastTile is None or m<2):
self.makingTile=True
x=self.geoClipMapper.focus.getX(self.geoClipMapper)-self.tileScale/2
y=self.geoClipMapper.focus.getY(self.geoClipMapper)-self.tileScale/2
self.geoClipMapper.tileSource.asyncGetTile(x,y,self.tileScale,self.asyncTileDone)
def asyncTileDone(self,tile):
self.lastTile=tile
print "Tile Level: "+str(self.level)
self.makingTile=False
tex=self.lastTile.renderMaps[self.geoClipMapper.specialMaps['height']].tex
tex.setMinfilter(Texture.FTNearest)
tex.setMagfilter(Texture.FTNearest)
self.setShaderInput("height",tex)
| bsd-2-clause | 5,505,236,265,132,271,000 | 35.565445 | 180 | 0.549223 | false | 3.342987 | false | false | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/system/systemcmdpolicy.py | 1 | 9456 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemcmdpolicy(base_resource) :
""" Configuration for command policy resource. """
def __init__(self) :
self._policyname = ""
self._action = ""
self._cmdspec = ""
self._builtin = []
self.___count = 0
@property
def policyname(self) :
"""Name for a command policy. Must begin with a letter, number, or the underscore (_) character, and must contain only alphanumeric, hyphen (-), period (.), hash (#), space ( ), at (@), equal (=), colon (:), and underscore characters. Cannot be changed after the policy is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name for a command policy. Must begin with a letter, number, or the underscore (_) character, and must contain only alphanumeric, hyphen (-), period (.), hash (#), space ( ), at (@), equal (=), colon (:), and underscore characters. Cannot be changed after the policy is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def action(self) :
"""Action to perform when a request matches the policy.<br/>Possible values = ALLOW, DENY.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
"""Action to perform when a request matches the policy.<br/>Possible values = ALLOW, DENY
"""
try :
self._action = action
except Exception as e:
raise e
@property
def cmdspec(self) :
"""Regular expression specifying the data that matches the policy.<br/>Minimum length = 1.
"""
try :
return self._cmdspec
except Exception as e:
raise e
@cmdspec.setter
def cmdspec(self, cmdspec) :
"""Regular expression specifying the data that matches the policy.<br/>Minimum length = 1
"""
try :
self._cmdspec = cmdspec
except Exception as e:
raise e
@property
def builtin(self) :
""".<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemcmdpolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemcmdpolicy
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.policyname) :
return str(self.policyname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add systemcmdpolicy.
"""
try :
if type(resource) is not list :
addresource = systemcmdpolicy()
addresource.policyname = resource.policyname
addresource.action = resource.action
addresource.cmdspec = resource.cmdspec
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ systemcmdpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].policyname = resource[i].policyname
addresources[i].action = resource[i].action
addresources[i].cmdspec = resource[i].cmdspec
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete systemcmdpolicy.
"""
try :
if type(resource) is not list :
deleteresource = systemcmdpolicy()
if type(resource) != type(deleteresource):
deleteresource.policyname = resource
else :
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ systemcmdpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ systemcmdpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update systemcmdpolicy.
"""
try :
if type(resource) is not list :
updateresource = systemcmdpolicy()
updateresource.policyname = resource.policyname
updateresource.action = resource.action
updateresource.cmdspec = resource.cmdspec
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ systemcmdpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].action = resource[i].action
updateresources[i].cmdspec = resource[i].cmdspec
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the systemcmdpolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = systemcmdpolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = systemcmdpolicy()
obj.policyname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [systemcmdpolicy() for _ in range(len(name))]
obj = [systemcmdpolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = systemcmdpolicy()
obj[i].policyname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of systemcmdpolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemcmdpolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the systemcmdpolicy resources configured on NetScaler.
"""
try :
obj = systemcmdpolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of systemcmdpolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemcmdpolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Action:
ALLOW = "ALLOW"
DENY = "DENY"
class systemcmdpolicy_response(base_response) :
def __init__(self, length=1) :
self.systemcmdpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemcmdpolicy = [systemcmdpolicy() for _ in range(length)]
| apache-2.0 | -3,354,399,022,787,214,300 | 31.163265 | 283 | 0.684433 | false | 3.473916 | false | false | false |
ramunasd/python-ant | src/ant/core/driver.py | 1 | 7358 | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring, invalid-name
##############################################################################
#
# Copyright (c) 2011, Martín Raúl Villalba
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
##############################################################################
from __future__ import division, absolute_import, print_function, unicode_literals
from threading import Lock
# USB1 driver uses a USB<->Serial bridge
from serial import Serial, SerialException, SerialTimeoutException
# USB2 driver uses direct USB connection. Requires PyUSB
from usb.control import get_interface
from usb.core import USBError, find as findDeviceUSB
from usb.util import (find_descriptor, claim_interface, release_interface,
endpoint_direction, ENDPOINT_OUT, ENDPOINT_IN)
from ant.core.exceptions import DriverError
class Driver(object):
def __init__(self, device, log=None, debug=False):
self.device = device
self.debug = debug
self.log = log
self._lock = Lock()
def open(self):
with self._lock:
if self._opened:
raise DriverError("Could not open device (already open).")
self._open()
if self.log:
self.log.logOpen()
@property
def opened(self):
with self._lock:
return self._opened
def close(self):
with self._lock:
if not self._opened:
raise DriverError("Could not close device (not open).")
self._close()
if self.log:
self.log.logClose()
def read(self, count):
if count <= 0:
raise DriverError("Could not read from device (zero request).")
if not self.opened:
raise DriverError("Could not read from device (not open).")
data = self._read(count)
with self._lock:
if self.log:
self.log.logRead(data)
if self.debug:
self._dump(data, 'READ')
return data
def write(self, data):
if len(data) <= 0:
raise DriverError("Could not write to device (no data).")
if not self.opened:
raise DriverError("Could not write to device (not open).")
ret = self._write(data.encode())
with self._lock:
if self.debug:
self._dump(data, 'WRITE')
if self.log:
self.log.logWrite(data[0:ret])
return ret
@staticmethod
def _dump(data, title):
if len(data) == 0:
return
print("========== [{0}] ==========".format(title))
length = 8
line = 0
while data:
row = data[:length]
data = data[length:]
hex_data = [b'%02X' % ord(byte) for byte in row]
print(b'%04X' % line, b' '.join(hex_data))
print()
@property
def _opened(self):
raise NotImplementedError()
def _open(self):
raise NotImplementedError()
def _close(self):
raise NotImplementedError()
def _read(self, count):
raise NotImplementedError()
def _write(self, data):
raise NotImplementedError()
class USB1Driver(Driver):
def __init__(self, device, baud_rate=115200, log=None, debug=False):
super(USB1Driver, self).__init__(log, debug)
self.device = device
self.baud = baud_rate
self._serial = None
def _open(self):
try:
dev = Serial(self.device, self.baud)
except SerialException as e:
raise DriverError(str(e))
if not dev.isOpen():
raise DriverError("Could not open device")
self._serial = dev
self._serial.timeout = 0.01
@property
def _opened(self):
return self._serial is not None
def _close(self):
self._serial.close()
def _read(self, count):
return self._serial.read(count)
def _write(self, data):
try:
count = self._serial.write(data)
self._serial.flush()
except SerialTimeoutException as e:
raise DriverError(str(e))
return count
class USB2Driver(Driver):
def __init__(self, log=None, debug=False):
super(USB2Driver, self).__init__(log, debug)
self._ep_out = None
self._ep_in = None
self._dev = None
self._int = None
def _open(self):
# Most of this is straight from the PyUSB example documentation
dev = findDeviceUSB(idVendor=0x0fcf, idProduct=0x1008)
if dev is None:
raise DriverError("Could not open device (not found)")
# make sure the kernel driver is not active
if dev.is_kernel_driver_active(0):
try:
dev.detach_kernel_driver(0)
except USBError as e:
exit("could not detach kernel driver: {}".format(e))
dev.set_configuration()
cfg = dev.get_active_configuration()
interface_number = cfg[(0, 0)].bInterfaceNumber
intf = find_descriptor(cfg,
bInterfaceNumber=interface_number,
bAlternateSetting=get_interface(dev, interface_number)
)
claim_interface(dev, interface_number)
ep_out = find_descriptor(intf, custom_match= \
lambda e: endpoint_direction(e.bEndpointAddress) == ENDPOINT_OUT
)
assert ep_out is not None
ep_in = find_descriptor(intf, custom_match= \
lambda e: endpoint_direction(e.bEndpointAddress) == ENDPOINT_IN
)
assert ep_in is not None
self._ep_out = ep_out
self._ep_in = ep_in
self._dev = dev
self._int = interface_number
@property
def _opened(self):
return self._dev is not None
def _close(self):
release_interface(self._dev, self._int)
self._dev = None
def _read(self, count):
return self._ep_in.read(count).tostring()
def _write(self, data):
return self._ep_out.write(data)
| mit | 2,149,521,618,508,776,000 | 30.435897 | 82 | 0.56838 | false | 4.373365 | false | false | false |
gdestuynder/MozDef | alerts/write_audit.py | 2 | 2157 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# This code alerts on every successfully opened session on any of the host from a given list
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch, PhraseMatch
class WriteAudit(AlertTask):
def main(self):
self.parse_config('write_audit.conf', ['skipprocess', 'expectedusers'])
search_query = SearchQuery(minutes=15)
search_query.add_must([
TermMatch('category', 'write'),
TermMatch('details.auditkey', 'audit'),
])
for processname in self.config.skipprocess.split():
search_query.add_must_not(PhraseMatch('details.processname', processname))
self.filtersManual(search_query)
self.searchEventsAggregated('details.originaluser', samplesLimit=10)
self.walkAggregations(threshold=2)
def onAggregation(self, aggreg):
category = 'write'
severity = 'WARNING'
tags = ['audit']
users = set()
paths = set()
for event in aggreg['events']:
users.add(event['_source']['details']['user'])
paths.add(event['_source']['summary'].split(' ')[1])
summary = '{0} Filesystem write(s) to an auditd path ({1}) by {2} ({3})'.format(
aggreg['count'],
', '.join(paths),
', '.join(users),
aggreg['value']
)
if aggreg['value'] in self.config.expectedusers.split(' '):
severity = 'NOTICE'
hostnames = self.mostCommon(aggreg['allevents'], '_source.hostname')
# did they modify more than one host?
# or just modify an existing configuration more than once?
if len(hostnames) > 1:
for i in hostnames[:5]:
summary += ' on {0} ({1} hosts)'.format(i[0], i[1])
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
| mpl-2.0 | 2,070,204,838,203,926,800 | 35.559322 | 92 | 0.613815 | false | 3.965074 | false | false | false |
schoeke/django-openresources | openresources/urls.py | 2 | 4748 | # -*- coding: utf-8 -*-
# Copyright 2011 Florian Ledermann <ledermann@ims.tuwien.ac.at>
#
# This file is part of OpenResources
# https://bitbucket.org/floledermann/openresources/
#
# OpenResources is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenResources is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenResources. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from django.conf import settings
from django.views.generic.simple import direct_to_template
from openresources import views
from openresources.models import Tag
# commenting out autocomplete stuff for now, probably needs custom implementation
#from autocomplete.views import autocomplete
#autocomplete.register(
# id = 'keys',
# queryset = Tag.objects.values('key').distinct(),
# fields = ('key',),
# limit = 20,
# key = 'key',
# label = 'key',
#)
view_patterns = patterns('',
url(r'^$', views.view, name='openresources_view'),
url(r'^in/(?P<area>[0-9A-Za-z-_]+)/$', views.view, name='openresources_view'),
url(r'^json/$', views.view_json, name='geojson'),
url(r'^(?P<mode>[0-9A-Za-z-_]+)/$', views.view, name='openresources_view'),
url(r'^in/(?P<area>[0-9A-Za-z-_]+)/(?P<mode>[0-9A-Za-z-_]+)/$', views.view, name='openresources_view'),
)
urlpatterns = patterns('',
url(r'^views/$', views.views, name='openresources_views'),
url(r'^views/new/$', views.edit_view, name='openresources_new_view'),
url(r'^views/edit/(?P<name>[0-9A-Za-z-_]+)/$', views.edit_view, name='openresources_edit_view'),
url(r'^templates/$', views.templates, name='openresources_templates'),
url(r'^templates/new/$', views.edit_template, name='openresources_template_edit'),
url(r'^template/(?P<name>[0-9A-Za-z-_]+)/$', views.edit_template, name='openresources_template_edit'),
# temporary, until resource view support assigned template
url(r'^template-resource/(?P<template>[0-9A-Za-z-_]+)/(?P<resource>[0-9A-Za-z-_]+)/$', views.edit_with_template, name='openresources_edit_with_template'),
url(r'^template-resource/(?P<template>[0-9A-Za-z-_]+)/$', views.edit_with_template, name='openresources_edit_with_template'),
#url(r'^all/$', views.all_resources, name='openresources_all'),
url(r'^tags/$', views.tags, name='openresources_tags'),
# *? matches key non-greedy, matching only as few as possible characters if value has = sign in it
url(r'^tag/(?P<key>.*?)=(?P<value>.*)/$', views.tag, name='openresources_tag'),
url(r'^tag/(?P<key>.*)/$', views.tag, name='openresources_tag_key'),
url(r'^tools/rename_tag/$', views.rename_tag, name='openresources_rename_tag'),
url(r'^icons/$', views.icons, name='openresources_icons'),
url(r'^icons/add/$', views.add_icon, name='openresources_new_icon'),
url(r'^choices.json$', views.resource_choices),
url(r'^tag/(?P<key>.*)/choices.json$', views.tag_choices),
# *? matches key non-greedy, matching only as few as possible characters if value has '=' sign in it
url(r'^with/tag/(?P<key>.*?)=(?P<value>.*)/$', views.resources_by_tag, name='openresources_with_tag'),
url(r'^with/tag/(?P<key>.*)/$', views.resources_by_tag, name='openresources_with_key'),
url(r'^resource/(?P<key>.*)/$', views.resource, name='openresources_resource'),
url(r'^new/$', views.edit_resource, name='openresources_new'),
url(r'^edit/(?P<key>.*)/$', views.edit_resource, name='openresources_edit'),
#url('^autocomplete/(\w+)/$', autocomplete, name='autocomplete'),
url(r'^context/set/$', views.set_context, name='openresources_set_context'),
url(r'^search/$', views.search, name='openresources_search'),
url(r'^credits/$', direct_to_template, {'template': 'openresources/credits.html'}, name='openresources_credits'),
url(r'^json/all/$', views.all_json, name='geojson_all'),
url(r'^', include(view_patterns)),
url(r'^view/(?P<name>[0-9A-Za-z-_]+)/', include(view_patterns)),
# this cannot be reached, as we show the default view as an index page
# however this is used for reversing the index page url in templates
url(r'^$', views.index, name='openresources_index'),
)
| agpl-3.0 | 3,711,478,665,950,113,000 | 45.48 | 158 | 0.657119 | false | 3.348378 | false | false | false |
innovationgarage/tfprism | tfprism/trainingserver.py | 1 | 4061 | #! /usr/bin/env python
import argparse
import sys
import tensorflow as tf
import netifaces
import dns.resolver
import pieshell
import multiprocessing
import click
FLAGS = None
def run_server(spec, job_name, task_index):
print "Starting server /job:%s/task:%s as %s..." % (job_name, task_index, spec[job_name][task_index])
tf.train.Server(
tf.train.ClusterSpec(spec),
job_name=job_name,
task_index=task_index
).join()
def generate_tasks(servers, base_port):
"""Input: {"server1": ncpus, "server2":ncpus...}
Output: (("server1", port1), ("server1", port2)...("serverN", "portM"))
"""
for server, ncpus in servers:
for cpuidx in xrange(0, ncpus):
yield (server, base_port + cpuidx)
def generate_cluster(servers, base_port, n_ps_tasks):
tasks = ["%s:%s" % (server, port) for server, port in generate_tasks(servers, base_port)]
ps_tasks = tasks[:n_ps_tasks]
worker_tasks = tasks[n_ps_tasks:]
return {'ps': ps_tasks, 'worker': worker_tasks}
def find_local_server_idx(servers):
local_ips = set([netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']
for iface in netifaces.interfaces()
if netifaces.AF_INET in netifaces.ifaddresses(iface)])
local_ips.add("127.0.1.1") # Hack for debian
task_ips = [server[0] for server in servers]
task_ips = [record.address
for ip in task_ips
for record in dns.resolver.query(ip, 'A')]
local_task_ip = iter(local_ips.intersection(set(task_ips))).next()
return task_ips.index(local_task_ip)
def generate_task_indexes(servers, server_idx, n_ps_tasks):
base_task_idx = sum(s[1] for s in servers[:server_idx])
server = servers[server_idx]
for n in xrange(0, server[1]):
task_idx = base_task_idx + n
if task_idx >= n_ps_tasks:
yield "worker", task_idx - n_ps_tasks
else:
yield "ps", task_idx
def servers_to_str(servers):
return ",".join("%s:%s" % s for s in servers)
def str_to_servers(str):
return [(name, int(ncpus)) for name, ncpus in (s.split(":") for s in str.split(","))]
def introspect_cluster(servernames):
return ",".join(pieshell.env.parallel("--no-notice", "--nonall", "--line-buffer", "-S", servernames,
'echo -n "$(hostname):"; cat /proc/cpuinfo | grep "processor" | wc -l'))
def start_cluster(servernames, base_port, n_ps_tasks):
servers = introspect_cluster(servernames)
print pieshell.env.parallel(
'--no-notice', '--nonall', '--line-buffer', '--tag',
'-S', servernames,
'nohup tfprism node run --base_port %s --ps_tasks %s %s < /dev/null > tfprism.log 2>&1 & echo "$!" > /var/run/tfprism.pid; sleep 2' % (
base_port, n_ps_tasks, servers))
def stop_cluster(servernames):
print pieshell.env.parallel(
'--no-notice', '--nonall', '--line-buffer', '--tag',
'-S', servernames,
"kill -KILL $(cat /var/run/tfprism.pid)" % servers)
def run_node(servers, base_port, n_ps_tasks):
servers = str_to_servers(servers)
cluster_spec = generate_cluster(servers, base_port, n_ps_tasks)
procs = [multiprocessing.Process(target=run_server, args=(cluster_spec, job_name, task_index))
for job_name, task_index in generate_task_indexes(servers, find_local_server_idx(servers), n_ps_tasks)]
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
proc.join()
@click.group()
def main():
pass
@main.group()
def node():
pass
@node.command()
@click.argument("servers")
@click.option('--base_port', default=5600)
@click.option('--ps_tasks', default=1)
def run(servers, base_port, ps_tasks):
run_node(servers, base_port, ps_tasks)
@main.group()
def cluster():
pass
@cluster.command()
@click.argument("servers")
@click.option('--base_port', default=5600)
@click.option('--ps_tasks', default=1)
def start(servers, base_port, ps_tasks):
start_cluster(servers, base_port, ps_tasks)
@cluster.command()
@click.argument("servers")
def stop(servers):
stop_cluster(servers)
if __name__ == "__main__":
main()
| apache-2.0 | 577,285,489,258,239,600 | 29.30597 | 139 | 0.644669 | false | 3.062594 | false | false | false |
bingweichen/GOKU | backend/server/route/virtual_card_route.py | 1 | 7586 | # -*- coding: UTF-8 -*-
"""
@author: larry.shuoren@outlook.com
@time: 8/10/17
@desc: virtual card route
"""
import json
from flask import Blueprint
from flask import jsonify
from flask import request
from peewee import DoesNotExist
from flask_jwt_extended import jwt_required, get_jwt_identity
from playhouse.shortcuts import model_to_dict
from server.service import virtual_card_service
from server.utility.json_utility import models_to_json, custom_models_to_json
from server.utility.exception import *
from server.service import wx_payment_service
from server.database.model import User
from server.database.model import VirtualCard
from server.utility.constant.basic_constant import \
WxPaymentBody, WxPaymentAttach
PREFIX = '/virtual_card'
virtual_card_app = Blueprint("virtual_card", __name__, url_prefix=PREFIX)
# ***************************** 虚拟卡 ***************************** #
# 获取虚拟卡
@virtual_card_app.route('', methods=['GET'])
@jwt_required
def get_virtual_card():
username = get_jwt_identity()
# username = request.args.get("username")
try:
virtual_card = virtual_card_service.get_virtual_card(
card_no=username
)
virtual_card = model_to_dict(virtual_card, recurse=False)
return jsonify({'response': virtual_card}), 200
except DoesNotExist as e:
return jsonify({
'response': {
'error': e.args,
'message': '未开通虚拟消费卡'
}
}), 400
# ***************************** 押金 ***************************** #
# # 获取押金数额
# @virtual_card_app.route('/deposit', methods=['GET'])
# def get_deposit():
# """
# check if the card deposited
# :param card_no: card number
# :return: True of False
# """
# username = request.args.get("username")
# try:
# deposit = virtual_card_service.get_deposit(
# card_no=username
# )
# return jsonify({'response': deposit}), 200
#
# except DoesNotExist as e:
# return jsonify({
# 'response': {
# 'error': e.args,
# 'message': '未开通虚拟消费卡'
# }
# })
# 支付押金
@virtual_card_app.route('/deposit', methods=['POST'])
@jwt_required
def pay_deposit():
"""
pay deposit
eg = {
# "card_no": "bingwei",
# "deposit_fee": 199
}
:return:
"""
username = get_jwt_identity()
data = request.get_json()
openid = data.get("openid")
# 如果没有openid传入,则从用户信息中获取
if not openid:
user = User.get(username=username)
openid = user.we_chat_id
try:
deposit_fee = virtual_card_service.pre_pay_deposit(
card_no=username,
)
# 生成预付订单
result = wx_payment_service.get_prepay_id_json(
openid=openid,
body=WxPaymentBody.DEPOSIT,
total_fee=deposit_fee * 100,
attach={
"code": WxPaymentAttach.DEPOSIT
}
)
return jsonify({
'response': result
}), 200
except Error as e:
return jsonify({
'response': {
'error': e.args,
'message': '%s' % e.args
}
}), 400
# 退还押金
@virtual_card_app.route('/deposit/return_deposit', methods=['POST'])
@jwt_required
def return_deposit():
"""
eg = {
# "comment": "test",
}
return deposit
:return:
"""
username = get_jwt_identity()
try:
result, record, refund_record = \
virtual_card_service.return_deposit(
card_no=username
)
return jsonify({
'response': {
"result": result,
"record": model_to_dict(record, recurse=False),
"refund_record": model_to_dict(refund_record, recurse=False)
}}), 200
except Error as e:
return jsonify({
'response': {
'error': e.args,
'message': '%s' % e.args
}
}), 400
# ***************************** 余额 ***************************** #
# # 获取余额
# @virtual_card_app.route('/balance', methods=['GET'])
# def get_card_balance():
# """
# get card balance
#
# :return: balance
# """
# username = request.args.get("username")
# try:
# balance = virtual_card_service.get_card_balance(
# card_no=username
# )
# return jsonify({
# 'response': {
# 'balance': balance,
# }
# })
# except DoesNotExist as e:
# return jsonify({
# 'response': {
# 'error': e.args,
# 'message': '未开通虚拟消费卡'
# }
# })
# # 消费余额
# @virtual_card.route('/balance/consume', methods=['POST'])
# def consume_virtual_card():
# """
# consume virtual card
#
# eg = {
# "username": "bingwei",
# "amount": 120
# }
# :return:
# """
# data = request.get_json()
# result, record = virtual_card_service.consume_virtual_card(
# card_no=data["username"],
# amount=data["amount"],
# )
# return jsonify({'response': {
# "result": result,
# "record": model_to_dict(record)
# }}), 200
# 充值
@virtual_card_app.route('/balance/top_up', methods=['POST'])
@jwt_required
def pre_top_up():
"""
generate top up prepay
top up virtual card
eg = {
"top_up_fee": 120,
"openid": "",
}
:return:
:rtype:
"""
username = get_jwt_identity()
data = request.get_json()
openid = data.get("openid")
# 如果没有openid传入,则从用户信息中获取
try:
top_up_fee = float(data["top_up_fee"])
except ValueError as e:
return jsonify({
'response': {
'error': e.args,
'message': "金额不是数字"
}
}), 400
if not openid:
user = User.get(username=username)
openid = user.we_chat_id
try:
# check
virtual_card_service.pre_top_up(
card_no=username,
)
# 生成预付订单
result = wx_payment_service.get_prepay_id_json(
openid=openid,
body=WxPaymentBody.BALANCE,
total_fee=top_up_fee * 100,
attach={
"code": WxPaymentAttach.BALANCE
}
)
return jsonify({
'response': result
}), 200
except Error as e:
return jsonify({
'response': {
'error': e.args,
'message': '%s' % e.args
}
}), 400
# ***************************** 消费记录 ***************************** #
# 获取消费记录
@virtual_card_app.route('/consume_record', methods=['GET'])
@jwt_required
def get_consume_record():
"""
get consume records
:param card_no: card number
:return: consume records
"""
username = get_jwt_identity()
# username = request.args.get("username")
record = virtual_card_service.get_consume_record(
card_no=username
)
new_records = custom_models_to_json(record, [
"consume_date_time",
"consume_event",
"consume_fee",
"id"
])
if record:
return jsonify({'response': new_records}), 200
else:
return jsonify({'response': 'No record found'}), 404
| apache-2.0 | -77,828,308,756,038,660 | 23.164474 | 77 | 0.513477 | false | 3.424709 | false | false | false |
wylwang/vnpy | vnpy/trader/app/ctaStrategy/ctaEngine.py | 2 | 26400 | # encoding: UTF-8
'''
本文件中实现了CTA策略引擎,针对CTA类型的策略,抽象简化了部分底层接口的功能。
关于平今和平昨规则:
1. 普通的平仓OFFSET_CLOSET等于平昨OFFSET_CLOSEYESTERDAY
2. 只有上期所的品种需要考虑平今和平昨的区别
3. 当上期所的期货有今仓时,调用Sell和Cover会使用OFFSET_CLOSETODAY,否则
会使用OFFSET_CLOSE
4. 以上设计意味着如果Sell和Cover的数量超过今日持仓量时,会导致出错(即用户
希望通过一个指令同时平今和平昨)
5. 采用以上设计的原因是考虑到vn.trader的用户主要是对TB、MC和金字塔类的平台
感到功能不足的用户(即希望更高频的交易),交易策略不应该出现4中所述的情况
6. 对于想要实现4中所述情况的用户,需要实现一个策略信号引擎和交易委托引擎分开
的定制化统结构(没错,得自己写)
'''
from __future__ import division
import json
import os
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
from vnpy.event import Event
from vnpy.trader.vtEvent import *
from vnpy.trader.vtConstant import *
from vnpy.trader.vtObject import VtTickData, VtBarData
from vnpy.trader.vtGateway import VtSubscribeReq, VtOrderReq, VtCancelOrderReq, VtLogData
from vnpy.trader.vtFunction import todayDate
from vnpy.trader.app.ctaStrategy.ctaBase import *
from vnpy.trader.app.ctaStrategy.strategy import STRATEGY_CLASS
########################################################################
class CtaEngine(object):
"""CTA策略引擎"""
settingFileName = 'CTA_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
settingFileName = os.path.join(path, settingFileName)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 保存策略实例的字典
# key为策略名称,value为策略实例,注意策略名称不允许重复
self.strategyDict = {}
# 保存vtSymbol和策略实例映射的字典(用于推送tick数据)
# 由于可能多个strategy交易同一个vtSymbol,因此key为vtSymbol
# value为包含所有相关strategy对象的list
self.tickStrategyDict = {}
# 保存vtOrderID和strategy对象映射的字典(用于推送order和trade数据)
# key为vtOrderID,value为strategy对象
self.orderStrategyDict = {}
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 持仓缓存字典
# key为vtSymbol,value为PositionBuffer对象
self.posBufferDict = {}
# 成交号集合,用来过滤已经收到过的成交推送
self.tradeSet = set()
# 引擎类型为实盘
self.engineType = ENGINETYPE_TRADING
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
contract = self.mainEngine.getContract(vtSymbol)
req = VtOrderReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
req.price = self.roundToPriceTick(contract.priceTick, price)
req.volume = volume
req.productClass = strategy.productClass
req.currency = strategy.currency
# 设计为CTA引擎发出的委托只允许使用限价单
req.priceType = PRICETYPE_LIMITPRICE
# CTA委托类型映射
if orderType == CTAORDER_BUY:
req.direction = DIRECTION_LONG
req.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
req.direction = DIRECTION_SHORT
# 只有上期所才要考虑平今平昨
if contract.exchange != EXCHANGE_SHFE:
req.offset = OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.posBufferDict.get(vtSymbol, None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
req.offset = OFFSET_CLOSE
# 否则如果有多头今仓,则使用平今
elif posBuffer.longToday:
req.offset= OFFSET_CLOSETODAY
# 其他情况使用平昨
else:
req.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
req.direction = DIRECTION_SHORT
req.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
req.direction = DIRECTION_LONG
# 只有上期所才要考虑平今平昨
if contract.exchange != EXCHANGE_SHFE:
req.offset = OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.posBufferDict.get(vtSymbol, None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
req.offset = OFFSET_CLOSE
# 否则如果有空头今仓,则使用平今
elif posBuffer.shortToday:
req.offset= OFFSET_CLOSETODAY
# 其他情况使用平昨
else:
req.offset = OFFSET_CLOSE
vtOrderID = self.mainEngine.sendOrder(req, contract.gatewayName) # 发单
self.orderStrategyDict[vtOrderID] = strategy # 保存vtOrderID和策略的映射关系
self.writeCtaLog(u'策略%s发送委托,%s,%s,%s@%s'
%(strategy.name, vtSymbol, req.direction, volume, price))
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
# 查询报单对象
order = self.mainEngine.getOrder(vtOrderID)
# 如果查询成功
if order:
# 检查是否报单还有效,只有有效时才发出撤单指令
orderFinished = (order.status==STATUS_ALLTRADED or order.status==STATUS_CANCELLED)
if not orderFinished:
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.orderType = orderType
so.price = price
so.volume = volume
so.strategy = strategy
so.stopOrderID = stopOrderID
so.status = STOPORDER_WAITING
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
return stopOrderID
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def processStopOrder(self, tick):
"""收到行情后处理本地停止单(检查是否要立即发出)"""
vtSymbol = tick.vtSymbol
# 首先检查是否有策略交易该合约
if vtSymbol in self.tickStrategyDict:
# 遍历等待中的停止单,检查是否会被触发
for so in self.workingStopOrderDict.values():
if so.vtSymbol == vtSymbol:
longTriggered = so.direction==DIRECTION_LONG and tick.lastPrice>=so.price # 多头停止单被触发
shortTriggered = so.direction==DIRECTION_SHORT and tick.lastPrice<=so.price # 空头停止单被触发
if longTriggered or shortTriggered:
# 买入和卖出分别以涨停跌停价发单(模拟市价单)
if so.direction==DIRECTION_LONG:
price = tick.upperLimit
else:
price = tick.lowerLimit
so.status = STOPORDER_TRIGGERED
self.sendOrder(so.vtSymbol, so.orderType, price, so.volume, so.strategy)
del self.workingStopOrderDict[so.stopOrderID]
#----------------------------------------------------------------------
def processTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
# 收到tick行情后,先处理本地停止单(检查是否要立即发出)
self.processStopOrder(tick)
# 推送tick到对应的策略实例进行处理
if tick.vtSymbol in self.tickStrategyDict:
# 添加datetime字段
if not tick.datetime:
tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
# 逐个推送到策略实例中
l = self.tickStrategyDict[tick.vtSymbol]
for strategy in l:
self.callStrategyFunc(strategy, strategy.onTick, tick)
#----------------------------------------------------------------------
def processOrderEvent(self, event):
"""处理委托推送"""
order = event.dict_['data']
if order.vtOrderID in self.orderStrategyDict:
strategy = self.orderStrategyDict[order.vtOrderID]
self.callStrategyFunc(strategy, strategy.onOrder, order)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交推送"""
trade = event.dict_['data']
# 过滤已经收到过的成交回报
if trade.vtTradeID in self.tradeSet:
return
self.tradeSet.add(trade.vtTradeID)
# 将成交推送到策略对象中
if trade.vtOrderID in self.orderStrategyDict:
strategy = self.orderStrategyDict[trade.vtOrderID]
# 计算策略持仓
if trade.direction == DIRECTION_LONG:
strategy.pos += trade.volume
else:
strategy.pos -= trade.volume
self.callStrategyFunc(strategy, strategy.onTrade, trade)
# 更新持仓缓存数据
if trade.vtSymbol in self.tickStrategyDict:
posBuffer = self.posBufferDict.get(trade.vtSymbol, None)
if not posBuffer:
posBuffer = PositionBuffer()
posBuffer.vtSymbol = trade.vtSymbol
self.posBufferDict[trade.vtSymbol] = posBuffer
posBuffer.updateTradeData(trade)
#----------------------------------------------------------------------
def processPositionEvent(self, event):
"""处理持仓推送"""
pos = event.dict_['data']
# 更新持仓缓存数据
if pos.vtSymbol in self.tickStrategyDict:
posBuffer = self.posBufferDict.get(pos.vtSymbol, None)
if not posBuffer:
posBuffer = PositionBuffer()
posBuffer.vtSymbol = pos.vtSymbol
self.posBufferDict[pos.vtSymbol] = posBuffer
posBuffer.updatePositionData(pos)
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.processTickEvent)
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_POSITION, self.processPositionEvent)
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是VtTickData或者VtBarData)"""
self.mainEngine.dbInsert(dbName, collectionName, data.__dict__)
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, days):
"""从数据库中读取Bar数据,startDate是datetime对象"""
startDate = self.today - timedelta(days)
d = {'datetime':{'$gte':startDate}}
barData = self.mainEngine.dbQuery(dbName, collectionName, d)
l = []
for d in barData:
bar = VtBarData()
bar.__dict__ = d
l.append(bar)
return l
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, days):
"""从数据库中读取Tick数据,startDate是datetime对象"""
startDate = self.today - timedelta(days)
d = {'datetime':{'$gte':startDate}}
tickData = self.mainEngine.dbQuery(dbName, collectionName, d)
l = []
for d in tickData:
tick = VtTickData()
tick.__dict__ = d
l.append(tick)
return l
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""快速发出CTA模块日志事件"""
log = VtLogData()
log.logContent = content
event = Event(type_=EVENT_CTA_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def loadStrategy(self, setting):
"""载入策略"""
try:
name = setting['name']
className = setting['className']
except Exception, e:
self.writeCtaLog(u'载入策略出错:%s' %e)
return
# 获取策略类
strategyClass = STRATEGY_CLASS.get(className, None)
if not strategyClass:
self.writeCtaLog(u'找不到策略类:%s' %className)
return
# 防止策略重名
if name in self.strategyDict:
self.writeCtaLog(u'策略实例重名:%s' %name)
else:
# 创建策略实例
strategy = strategyClass(self, setting)
self.strategyDict[name] = strategy
# 保存Tick映射关系
if strategy.vtSymbol in self.tickStrategyDict:
l = self.tickStrategyDict[strategy.vtSymbol]
else:
l = []
self.tickStrategyDict[strategy.vtSymbol] = l
l.append(strategy)
# 订阅合约
contract = self.mainEngine.getContract(strategy.vtSymbol)
if contract:
req = VtSubscribeReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
# 对于IB接口订阅行情时所需的货币和产品类型,从策略属性中获取
req.currency = strategy.currency
req.productClass = strategy.productClass
self.mainEngine.subscribe(req, contract.gatewayName)
else:
self.writeCtaLog(u'%s的交易合约%s无法找到' %(name, strategy.vtSymbol))
#----------------------------------------------------------------------
def initStrategy(self, name):
"""初始化策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if not strategy.inited:
strategy.inited = True
self.callStrategyFunc(strategy, strategy.onInit)
else:
self.writeCtaLog(u'请勿重复初始化策略实例:%s' %name)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#---------------------------------------------------------------------
def startStrategy(self, name):
"""启动策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if strategy.inited and not strategy.trading:
strategy.trading = True
self.callStrategyFunc(strategy, strategy.onStart)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#----------------------------------------------------------------------
def stopStrategy(self, name):
"""停止策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if strategy.trading:
strategy.trading = False
self.callStrategyFunc(strategy, strategy.onStop)
# 对该策略发出的所有限价单进行撤单
for vtOrderID, s in self.orderStrategyDict.items():
if s is strategy:
self.cancelOrder(vtOrderID)
# 对该策略发出的所有本地停止单撤单
for stopOrderID, so in self.workingStopOrderDict.items():
if so.strategy is strategy:
self.cancelStopOrder(stopOrderID)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#----------------------------------------------------------------------
def saveSetting(self):
"""保存策略配置"""
with open(self.settingFileName, 'w') as f:
l = []
for strategy in self.strategyDict.values():
setting = {}
for param in strategy.paramList:
setting[param] = strategy.__getattribute__(param)
l.append(setting)
jsonL = json.dumps(l, indent=4)
f.write(jsonL)
#----------------------------------------------------------------------
def loadSetting(self):
"""读取策略配置"""
with open(self.settingFileName) as f:
l = json.load(f)
for setting in l:
self.loadStrategy(setting)
self.loadPosition()
#----------------------------------------------------------------------
def getStrategyVar(self, name):
"""获取策略当前的变量字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
varDict = OrderedDict()
for key in strategy.varList:
varDict[key] = strategy.__getattribute__(key)
return varDict
else:
self.writeCtaLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def getStrategyParam(self, name):
"""获取策略的参数字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
paramDict = OrderedDict()
for key in strategy.paramList:
paramDict[key] = strategy.__getattribute__(key)
return paramDict
else:
self.writeCtaLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""触发策略状态变化事件(通常用于通知GUI更新)"""
event = Event(EVENT_CTA_STRATEGY+name)
self.eventEngine.put(event)
#----------------------------------------------------------------------
def callStrategyFunc(self, strategy, func, params=None):
"""调用策略的函数,若触发异常则捕捉"""
try:
if params:
func(params)
else:
func()
except Exception:
# 停止策略,修改状态为未初始化
strategy.trading = False
strategy.inited = False
# 发出日志
content = '\n'.join([u'策略%s触发异常已停止' %strategy.name,
traceback.format_exc()])
self.writeCtaLog(content)
#----------------------------------------------------------------------
def savePosition(self):
"""保存所有策略的持仓情况到数据库"""
for strategy in self.strategyDict.values():
flt = {'name': strategy.name,
'vtSymbol': strategy.vtSymbol}
d = {'name': strategy.name,
'vtSymbol': strategy.vtSymbol,
'pos': strategy.pos}
self.mainEngine.dbUpdate(POSITION_DB_NAME, strategy.className,
d, flt, True)
content = '策略%s持仓保存成功' %strategy.name
self.writeCtaLog(content)
#----------------------------------------------------------------------
def loadPosition(self):
"""从数据库载入策略的持仓情况"""
for strategy in self.strategyDict.values():
flt = {'name': strategy.name,
'vtSymbol': strategy.vtSymbol}
posData = self.mainEngine.dbQuery(POSITION_DB_NAME, strategy.className, flt)
for d in posData:
strategy.pos = d['pos']
#----------------------------------------------------------------------
def roundToPriceTick(self, priceTick, price):
"""取整价格到合约最小价格变动"""
if not priceTick:
return price
newPrice = round(price/priceTick, 0) * priceTick
return newPrice
#----------------------------------------------------------------------
def stop(self):
"""停止"""
pass
########################################################################
class PositionBuffer(object):
"""持仓缓存信息(本地维护的持仓数据)"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING
# 多头
self.longPosition = EMPTY_INT
self.longToday = EMPTY_INT
self.longYd = EMPTY_INT
# 空头
self.shortPosition = EMPTY_INT
self.shortToday = EMPTY_INT
self.shortYd = EMPTY_INT
#----------------------------------------------------------------------
def updatePositionData(self, pos):
"""更新持仓数据"""
if pos.direction == DIRECTION_LONG:
self.longPosition = pos.position
self.longYd = pos.ydPosition
self.longToday = self.longPosition - self.longYd
else:
self.shortPosition = pos.position
self.shortYd = pos.ydPosition
self.shortToday = self.shortPosition - self.shortYd
#----------------------------------------------------------------------
def updateTradeData(self, trade):
"""更新成交数据"""
if trade.direction == DIRECTION_LONG:
# 多方开仓,则对应多头的持仓和今仓增加
if trade.offset == OFFSET_OPEN:
self.longPosition += trade.volume
self.longToday += trade.volume
# 多方平今,对应空头的持仓和今仓减少
elif trade.offset == OFFSET_CLOSETODAY:
self.shortPosition -= trade.volume
self.shortToday -= trade.volume
# 多方平昨,对应空头的持仓和昨仓减少
else:
self.shortPosition -= trade.volume
self.shortYd -= trade.volume
else:
# 空头和多头相同
if trade.offset == OFFSET_OPEN:
self.shortPosition += trade.volume
self.shortToday += trade.volume
elif trade.offset == OFFSET_CLOSETODAY:
self.longPosition -= trade.volume
self.longToday -= trade.volume
else:
self.longPosition -= trade.volume
self.longYd -= trade.volume
| mit | -4,068,506,922,399,666,000 | 35.088685 | 111 | 0.485383 | false | 3.557197 | false | false | false |
Stanford-Legal-Tech-Design/legaltech-rapidpro | temba/tests.py | 1 | 15348 | from __future__ import unicode_literals
import json
import os
import redis
import string
import time
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from django.db import connection
from django.test import LiveServerTestCase
from django.utils import timezone
from djorm_hstore.models import register_hstore_handler
from smartmin.tests import SmartminTest
from temba.contacts.models import Contact, ContactGroup, TEL_SCHEME, TWITTER_SCHEME
from temba.orgs.models import Org
from temba.channels.models import Channel
from temba.locations.models import AdminBoundary
from temba.flows.models import Flow
from temba.msgs.models import Msg, INCOMING
from temba.utils import dict_to_struct
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return unix_time(dt) * 1000.0
def add_testing_flag_to_context(*args):
return dict(testing=settings.TESTING)
def uuid(id):
return '00000000-00000000-00000000-%08d' % id
class TembaTest(SmartminTest):
def setUp(self):
self.clear_cache()
self.superuser = User.objects.create_superuser(username="super", email="super@user.com", password="super")
# some users not tied to our org
self.non_org_user = self.create_user("NonOrg")
self.non_org_manager = self.create_user("NonOrgManager")
# our three user types inside our org
self.user = self.create_user("User")
self.root = self.create_user("Root")
self.root.groups.add(Group.objects.get(name="Alpha"))
self.admin = self.create_user("Administrator")
# setup admin boundaries for Rwanda
self.country = AdminBoundary.objects.create(osm_id='171496', name='Rwanda', level=0)
state1 = AdminBoundary.objects.create(osm_id='1708283', name='Kigali City', level=1, parent=self.country)
state2 = AdminBoundary.objects.create(osm_id='171591', name='Eastern Province', level=1, parent=self.country)
AdminBoundary.objects.create(osm_id='1711131', name='Gatsibo', level=2, parent=state2)
AdminBoundary.objects.create(osm_id='1711163', name='Kayonza', level=2, parent=state2)
AdminBoundary.objects.create(osm_id='60485579', name='Kigali', level=2, parent=state1)
AdminBoundary.objects.create(osm_id='1711142', name='Rwamagana', level=2, parent=state2)
self.org = Org.objects.create(name="Temba", timezone="Africa/Kigali", country=self.country,
created_by=self.user, modified_by=self.user)
# add users to the org
self.org.administrators.add(self.admin)
self.admin.set_org(self.org)
self.org.administrators.add(self.root)
self.root.set_org(self.org)
self.user.set_org(self.org)
self.superuser.set_org(self.org)
# welcome topup with 1000 credits
self.welcome_topup = self.org.create_welcome_topup(self.admin)
# a single Android channel
self.channel = Channel.objects.create(org=self.org, name="Test Channel",
address="+250785551212", country='RW', channel_type='A',
secret="12345", gcm_id="123",
created_by=self.user, modified_by=self.user)
# reset our simulation to False
Contact.set_simulation(False)
def clear_cache(self):
# we are extra paranoid here and actually hardcode redis to 'localhost' and '10'
# Redis 10 is our testing redis db
r = redis.StrictRedis(host='localhost', db=10)
r.flushdb()
def import_file(self, file, site='http://rapidpro.io'):
handle = open('%s/test_imports/%s.json' % (settings.MEDIA_ROOT, file), 'r+')
data = handle.read()
handle.close()
# import all our bits
self.org.import_app(json.loads(data), self.admin, site=site)
def create_secondary_org(self):
self.admin2 = self.create_user("Administrator2")
self.org2 = Org.objects.create(name="Trileet Inc.", timezone="Africa/Kigali", created_by=self.admin2, modified_by=self.admin2)
self.org2.administrators.add(self.admin2)
self.admin2.set_org(self.org)
def create_contact(self, name=None, number=None, twitter=None):
"""
Create a contact in the master test org
"""
urns = []
if number:
urns.append((TEL_SCHEME, number))
if twitter:
urns.append((TWITTER_SCHEME, twitter))
if not name and not urns:
raise ValueError("Need a name or URN to create a contact")
return Contact.get_or_create(self.org, self.user, name, urns=urns)
def create_group(self, name, contacts):
group = ContactGroup.create(self.org, self.user, name)
group.contacts.add(*contacts)
return group
def create_msg(self, **kwargs):
if not 'org' in kwargs:
kwargs['org'] = self.org
if not 'channel' in kwargs:
kwargs['channel'] = self.channel
if not 'contact_urn' in kwargs:
kwargs['contact_urn'] = kwargs['contact'].get_urn(TEL_SCHEME)
if not 'created_on' in kwargs:
kwargs['created_on'] = timezone.now()
if not kwargs['contact'].is_test:
kwargs['topup_id'] = kwargs['org'].decrement_credit()
return Msg.objects.create(**kwargs)
def create_flow(self):
start = int(time.time() * 1000) % 1000000
definition = dict(action_sets=[dict(uuid=uuid(start + 1), x=1, y=1, destination=uuid(start + 5),
actions=[dict(type='reply', msg='What is your favorite color?')]),
dict(uuid=uuid(start + 2), x=2, y=2, destination=None,
actions=[dict(type='reply', msg='I love orange too!')]),
dict(uuid=uuid(start + 3), x=3, y=3, destination=None,
actions=[dict(type='reply', msg='Blue is sad. :(')]),
dict(uuid=uuid(start + 4), x=4, y=4, destination=None,
actions=[dict(type='reply', msg='That is a funny color.')])
],
rule_sets=[dict(uuid=uuid(start + 5), x=5, y=5,
label='color',
response_type='C',
rules=[
dict(uuid=uuid(start + 12), destination=uuid(start + 2), test=dict(type='contains', test='orange'), category="Orange"),
dict(uuid=uuid(start + 13), destination=uuid(start + 3), test=dict(type='contains', test='blue'), category="Blue"),
dict(uuid=uuid(start + 14), destination=uuid(start + 4), test=dict(type='true'), category="Other"),
dict(uuid=uuid(start + 15), test=dict(type='true'), category="Nothing")]) # test case with no destination
],
entry=uuid(start + 1))
flow = Flow.create(self.org, self.admin, "Color Flow")
flow.update(definition)
return flow
class FlowFileTest(TembaTest):
def setUp(self):
super(FlowFileTest, self).setUp()
self.contact = self.create_contact('Ben Haggerty', '+12065552020')
register_hstore_handler(connection)
def assertLastResponse(self, message):
response = Msg.objects.filter(contact=self.contact).order_by('-created_on', '-pk').first()
self.assertTrue("Missing response from contact.", response)
self.assertEquals(message, response.text)
def send_message(self, flow, message, restart_participants=False, contact=None, initiate_flow=False, assert_reply=True):
"""
Starts the flow, sends the message, returns the reply
"""
if not contact:
contact = self.contact
try:
if contact.is_test:
Contact.set_simulation(True)
incoming = self.create_msg(direction=INCOMING, contact=contact, text=message)
# start the flow
if initiate_flow:
flow.start(groups=[], contacts=[contact], restart_participants=restart_participants, start_msg=incoming)
else:
flow.start(groups=[], contacts=[contact], restart_participants=restart_participants)
self.assertTrue(flow.find_and_handle(incoming))
# our message should have gotten a reply
if assert_reply:
reply = Msg.objects.get(response_to=incoming)
self.assertEquals(contact, reply.contact)
return reply.text
return None
finally:
Contact.set_simulation(False)
def get_flow(self, filename, substitutions=None):
flow = Flow.create(self.org, self.admin, name=filename)
self.update_flow(flow, filename, substitutions)
return flow
def update_flow(self, flow, filename, substitutions=None):
from django.conf import settings
handle = open('%s/test_flows/%s.json' % (settings.MEDIA_ROOT, filename), 'r+')
contents = handle.read()
handle.close()
if substitutions:
for key in substitutions.keys():
contents = contents.replace(key, str(substitutions[key]))
flow.update(json.loads(contents))
return flow
from selenium.webdriver.firefox.webdriver import WebDriver
from HTMLParser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
class BrowserTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
cls.driver = WebDriver()
try:
import os
os.mkdir('screenshots')
except:
pass
super(BrowserTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
pass
#cls.driver.quit()
#super(BrowserTest, cls).tearDownClass()
def strip_tags(self, html):
s = MLStripper()
s.feed(html)
return s.get_data()
def save_screenshot(self):
time.sleep(1)
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in self.driver.current_url if c in valid_chars)
self.driver.get_screenshot_as_file("screenshots/%s.png" % filename)
def fetch_page(self, url=None):
if not url:
url = ''
if 'http://' not in url:
url = self.live_server_url + url
self.driver.get(url)
self.save_screenshot()
def get_elements(self, selector):
return self.driver.find_elements_by_css_selector(selector)
def get_element(self, selector):
if selector[0] == '#' or selector[0] == '.':
return self.driver.find_element_by_css_selector(selector)
else:
return self.driver.find_element_by_name(selector)
def keys(self, selector, value):
self.get_element(selector).send_keys(value)
def click(self, selector):
time.sleep(1)
self.get_element(selector).click()
self.save_screenshot()
def link(self, link_text):
self.driver.find_element_by_link_text(link_text).click()
time.sleep(2)
self.save_screenshot()
def submit(self, selector):
time.sleep(1)
self.get_element(selector).submit()
self.save_screenshot()
time.sleep(1)
def assertInElements(self, selector, text, strip_html=True):
for element in self.get_elements(selector):
if text in (self.strip_tags(element.text) if strip_html else element.text):
return
self.fail("Couldn't find '%s' in any element '%s'" % (text, selector))
def assertInElement(self, selector, text, strip_html=True):
element = self.get_element(selector)
if text not in (self.strip_tags(element.text) if strip_html else element.text):
self.fail("Couldn't find '%s' in '%s'" % (text, element.text))
#def flow_basics(self):
def browser(self):
self.driver.set_window_size(1024, 2000)
# view the homepage
self.fetch_page()
# go directly to our signup
self.fetch_page(reverse('orgs.org_signup'))
# create account
self.keys('email', 'code@temba.com')
self.keys('password', 'SuperSafe1')
self.keys('first_name', 'Joe')
self.keys('last_name', 'Blow')
self.click('#form-one-submit')
self.keys('name', 'Temba')
self.click('#form-two-submit')
# set up our channel for claiming
anon = User.objects.get(pk=settings.ANONYMOUS_USER_ID)
channel = Channel.objects.create(name="Test Channel", address="0785551212", country='RW',
created_by=anon, modified_by=anon, claim_code='AAABBBCCC',
secret="12345", gcm_id="123")
# and claim it
self.fetch_page(reverse('channels.channel_claim_android'))
self.keys('#id_claim_code', 'AAABBBCCC')
self.keys('#id_phone_number', '0785551212')
self.submit('.claim-form')
# get our freshly claimed channel
channel = Channel.objects.get(pk=channel.pk)
# now go to the contacts page
self.click('#menu-right .icon-contact')
self.click('#id_import_contacts')
# upload some contacts
directory = os.path.dirname(os.path.realpath(__file__))
self.keys('#csv_file', '%s/../media/test_imports/sample_contacts.xls' % directory)
self.submit('.smartmin-form')
# make sure they are there
self.click('#menu-right .icon-contact')
self.assertInElements('.value-phone', '+250788382382')
self.assertInElements('.value-text', 'Eric Newcomer')
self.assertInElements('.value-text', 'Sample Contacts')
class MockResponse(object):
def __init__(self, status_code, text, method='GET', url='http://foo.com/'):
self.text = text
self.status_code = status_code
# mock up a request object on our response as well
self.request = dict_to_struct('MockRequest', dict(method=method, url=url))
def json(self):
return json.loads(self.text)
def raise_for_status(self):
if self.status_code != 200:
raise Exception("Got HTTP error: %d" % self.status_code)
class AnonymousOrg(object):
"""
Makes the given org temporarily anonymous
"""
def __init__(self, org):
self.org = org
def __enter__(self):
self.org.is_anon = True
self.org.save()
def __exit__(self, exc_type, exc_val, exc_tb):
self.org.is_anon = False
self.org.save()
| agpl-3.0 | 2,440,041,185,708,644,000 | 35.198113 | 165 | 0.59252 | false | 3.89741 | true | false | false |
jskDr/jamespy | jmimo.py | 3 | 26574 | """
Author
--------
Best regards,
Sungjin (James) Kim, PhD
Postdoc, CCB in Harvard
sungjinkim@fas.harvard.edu
[Web] http://aspuru.chem.harvard.edu/james-sungjin-kim/
[Linkedin] https://www.linkedin.com/in/jamessungjinkim
[Facebook] https://www.facebook.com/jamessungjin.kim
[alternative email] jamessungjin.kim@gmail.com
Licence
---------
MIT License
"""
from __future__ import print_function
# I started to use __future__ so as to be compatible with Python3
import numpy as np
from sklearn import linear_model
from sklearn import cross_validation
from sklearn import metrics
import pandas as pd
from collections import OrderedDict
# To improve the speed, I using pyx.
import jpyx
import jutil
from jsklearn import codes
def mld( r_l, mod_l = [-0.70710678, 0.70710678]):
"""
maximum likelihood detection
r_l: received signals after reception processing
mod_l: list of all modulation signals
BPSK: [-0.70710678, 0.70710678]
return the demodulated signals (0, 1, ...)
"""
sd_l = list() # store demodulated signal
for r in r_l:
dist = list() #Store distance
for m in mod_l:
d = np.power( np.abs( r - m), 2)
dist.append( d)
sd = np.argmin( dist)
sd_l.append( sd)
return np.array( sd_l)
def calc_BER( r_l, x_l):
"""
calculate bit error rate (BER)
r_l: demodulated signals (ndarray, 1D)
x_l: transmitted signals (ndarray, 1D)
"""
err_l = r_l - x_l
errs = np.where( err_l != 0)[0]
# print 'err_l =', err_l
# print 'errs =', errs
Nerr = len(np.where( err_l != 0)[0])
return float( Nerr) / len( err_l), Nerr
def db2var( SNRdB):
return np.power( 10.0, SNRdB / 10.0)
def gen_BPSK(Nx, Nt):
"""
Generate BPSK modulated signals
"""
BPSK = np.array( [1, -1]) / np.sqrt( 2.0)
s_a = np.random.randint( 0, 2, Nx * Nt)
x_flat_a = BPSK[ s_a]
x_a = np.reshape( x_flat_a, (Nx, Nt))
return BPSK, s_a, x_flat_a, x_a
def gen_H( Nr, Nt):
return np.random.randn( Nr, Nt)
def gen_Rx( Nr, Nx, SNR, H_a, x_a):
"""
The received signals are modeled.
"""
n_a = np.random.randn( Nr, Nx) / np.sqrt( SNR)
y_a = np.dot( H_a, x_a.T) + n_a
return y_a
def normalize( W_a):
"Weight is normalized."
nW_a = np.linalg.norm( W_a, axis = 1)
for a0 in range( W_a.shape[0]):
W_a[a0,:] = np.divide( W_a[a0,:], nW_a[a0])
return W_a
class MIMO(object):
"""
Modeling for a MIMO wireless communication system.
"""
def __init__(self, Nt = 2, Nr = 4, Nx = 10, SNRdB = 10, model = "Ridge", Npilot = 10, Nloop = 10):
"""
The parameter of 'model' determines the regression method.
"""
self.set_param( (Nt, Nr, Nx, SNRdB))
self.model = model
self.Npilot = Npilot
self.Nloop = Nloop
# The function of test_ridge_all() uses 3 cases for testing.
# self.N_test_ridge_all = 3
def set_param( self, param_NtNrNxSNRdB):
Nt, Nr, Nx, SNRdB = param_NtNrNxSNRdB
# The antenna configuration is conducted.
self.Nt = Nt
self.Nr = Nr
# No of streams is fixed.
self.Nx = Nx
# Initial SNR is defined
self.SNRdB = SNRdB
self.SNR = db2var(SNRdB)
def _gen_BPSK_r0(self):
"""
Generate BPSK modulated signals
"""
self.BPSK = np.array( [1, -1]) / np.sqrt( 2.0)
self.s_a = np.random.randint( 0, 2, self.Nx * self.Nt)
self.x_flat_a = self.BPSK[ self.s_a]
self.x_a = np.reshape( self.x_flat_a, (self.Nx, self.Nt))
def gen_BPSK( self):
"""
Generate BPSK signals using global function gen_BPSK().
This function will be used to generate pilot signal as well.
"""
self.BPSK, self.s_a, self.x_flat_a, self.x_a = gen_BPSK( self.Nx, self.Nt)
def gen_H(self):
"""
The MIMO channel is generated.
"""
self.H_a = gen_H( self.Nr, self.Nt)
def _gen_Rx_r0(self):
"""
The received signals are modeled.
"""
self.n_a = np.random.randn( self.Nr, self.Nx) / np.sqrt( self.SNR)
self.y_a = np.dot( self.H_a, self.x_a.T) + self.n_a
def gen_Rx(self):
"""
The received signals are modeled.
"""
self.y_a = gen_Rx( self.Nr, self.Nx, self.SNR, self.H_a, self.x_a)
def gen_WR_ideal(self):
"""
The reception process with ideal channel estimation
is conducted.
each reception vector of W_a should be noramlized to one.
"""
self.W_a = np.linalg.pinv( self.H_a)
# The reception signal vector is transposed.
self.gen_Decoding()
def gen_WR_pilot(self, pilot_SNRdB):
"""
The reception process with pilot channel estimation
is conducted.
Pilot will be transmitted through random information channel.
"""
pilot_SNR = db2var(pilot_SNRdB)
N_a = np.random.randn( *self.H_a.shape) / np.sqrt( pilot_SNR)
Hp_a = self.H_a + N_a
self.W_a = np.linalg.pinv( Hp_a)
self.gen_Decoding()
def gen_WR_pilot_channel(self, pilot_SNRdB):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = self.Npilot
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
lm = linear_model.LinearRegression()
lm.fit( yT_a, x_a)
"""
Power normalization should be considered
unless it is multiplied with both sinal and noise.
In this case, MMSE weight is calculated while
pinv() obtain ZF filter.
"""
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gs_pilot_reg_only(self, alpha_l):
"""
Grid search is applied for alpha_l.
Later, the best alpha will be selected and decode data using it.
"""
pdo = pd.DataFrame()
for alpha in alpha_l:
pdi = self.cv_pilot_reg_only( alpha)
pdo = pdo.append( pdi, ignore_index = True)
return pdo
def gs_pilot_reg_full(self, alpha_l):
"""
Full means data and pilot are both generated and processed including data decoding
"""
self.gen_BPSK()
self.gen_H()
self.gen_Rx()
self.rx_pilot()
return self.gs_pilot_reg_only( alpha_l)
def gs_pilot_reg_best(self, alpha_l):
"""
Find the best alpha using Ridge regression.
Return
--------
The best alpha is returned.
"""
pdi = self.gs_pilot_reg_only( alpha_l)
# print( 'pdi["E[scores]"]', pdi["E[scores]"])
i_max = np.argmin( pdi["E[scores]"])
alpha_best = pdi["alpha"][i_max]
return alpha_best
def gs_pilot_reg_best_full(self, alpha_l):
"""
Full means data and pilot are both generated and processed including data decoding
"""
self.gen_BPSK()
self.gen_H()
self.gen_Rx()
self.rx_pilot()
return self.gs_pilot_reg_best( alpha_l)
def rx_pilot(self):
Npilot = self.Npilot
SNRpilot = self.SNR
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
self.rx_p = dict()
self.rx_p["yT_a"] = yT_a
self.rx_p["x_a"] = x_a
def cv_pilot_only(self):
"""
Cross-validatin scores are evaluated using LOO.
SNRpilot is equal to SNR, which is SNRdata.
"""
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
lm = linear_model.LinearRegression()
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
# Output is stored with enviromental variables.
pdi = pd.DataFrame()
pdi["model"] = ["LinearRegression"]
pdi["alpha"] = [0]
pdi["metric"] = ["mean_squared_error"]
pdi["E[scores]"] = [np.mean(scores)]
pdi["std[scores]"] = [np.std(scores)]
pdi["scores"] = [scores]
return pdi
def cv_pilot( self):
self.rx_pilot()
return self.cv_pilot_only()
def _cv_pilot_reg_only_r0(self, alpha = 0):
model = self.model
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
# kf = KFold()
# loo = cross_validation.LeaveOneOut( x_a.shape[0])
if alpha == 0:
lm = linear_model.LinearRegression()
else:
lm = getattr( linear_model, model)(alpha)
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
return scores
def cv_pilot_reg_only(self, alpha = 0):
model = self.model
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
# kf = KFold()
# loo = cross_validation.LeaveOneOut( x_a.shape[0])
if alpha == 0:
lm = linear_model.LinearRegression()
else:
lm = getattr( linear_model, model)(alpha)
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
# Output is stored with enviromental variables.
pdi = pd.DataFrame()
pdi["model"] = [model]
pdi["alpha"] = [alpha]
pdi["metric"] = ["mean_squared_error"]
pdi["E[scores]"] = [np.mean(np.power(scores,2))] # MSE
pdi["std[scores]"] = ["t.b.d."]
pdi["scores"] = [scores]
return pdi
def cv_pilot_reg( self, alpha = 0):
self.rx_pilot()
return self.cv_pilot_reg_only( alpha)
def _cv_pilot_reg_r0(self, alpha = 0):
"""
Cross-validatin scores are evaluated using LOO.
SNRpilot is equal to SNR, which is SNRdata.
"""
Npilot = self.Npilot
SNRpilot = self.SNR
model = self.model
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# kf = KFold()
# loo = cross_validation.LeaveOneOut( x_a.shape[0])
if alpha == 0:
lm = linear_model.LinearRegression()
else:
lm = getattr( linear_model, model)(alpha)
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
return scores
def _gen_WR_pilot_ch_r0(self, pilot_SNRdB, alpha = 0):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = 10
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
lm = linear_model.Ridge( alpha)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def _gen_WR_pilot_ch_r1(self, pilot_SNRdB, alpha = 0, model = "Ridge"):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = 10
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
# Now you can use either Ridge or Lasso methods.
#lm = linear_model.Ridge( alpha)
lm = getattr( linear_model, model)(alpha)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gen_WR_pilot_ch(self, pilot_SNRdB, alpha_l1r = 0, model = "Ridge"):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = self.Npilot
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
# Now you can use either Ridge or Lasso methods.
#lm = linear_model.Ridge( alpha)
if model == "ElasticNet":
lm = linear_model.ElasticNet( alpha_l1r[0], alpha_l1r[1])
else:
lm = getattr( linear_model, model)(alpha_l1r)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gen_WR_pilot_only(self, alpha_l1r = 0):
"""
yT_a and x_a was prepared already.
Now, W_a is calculated using alpha and then,
decode data.
For linear regression, alpha_l1r should not be specified except 0.
"""
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
# for alpha == 0, model is changed to linear regression.
if alpha_l1r == 0:
model = "LinearRegression"
else:
model = self.model
if model == "LinearRegression":
lm = linear_model.LinearRegression()
elif model == "ElasticNet":
lm = linear_model.ElasticNet( alpha_l1r[0], alpha_l1r[1])
else: # This is either Ridge or Lasso
lm = getattr( linear_model, model)(alpha_l1r)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gen_WR( self, pilot_SNRdB = None):
if pilot_SNRdB:
gen_WR_pilot( pilot_SNRdB)
else:
gen_WR_ideal()
def gen_Decoding(self):
"""
The reception process is conducted.
"""
self.W_a = normalize( self.W_a) # not important (useless at this moment)
self.rT_a = np.dot( self.W_a, self.y_a)
self.r_flat_a = self.rT_a.T.flatten()
#print( "type( self.r_flat_a), type( self.BPSK)")
#print( type( self.r_flat_a), type( self.BPSK))
# self.sd_a = jpyx.mld( self.r_flat_a, self.BPSK)
self.sd_a = jpyx.mld_fast( self.r_flat_a, self.BPSK)
self.BER, self.Nerr = calc_BER( self.s_a, self.sd_a)
def run_ideal( self, param_NtNrNxSNRdB = None, Nloop = 10, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
return self.run_pilot( param_NtNrNxSNRdB = param_NtNrNxSNRdB, Nloop = Nloop, disp = disp)
def run_pilot( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB is not None:
self.gen_WR_pilot( pilot_SNRdB)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def run_pilot_channel( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB is not None:
# self.gen_WR_pilot( pilot_SNRdB)
self.gen_WR_pilot_channel( pilot_SNRdB)
# self.gen_WR_pilot_ch( pilot_SNRdB, alpha)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def run_pilot_ch( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, alpha = 0, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB:
# self.gen_WR_pilot( pilot_SNRdB)
# self.gen_WR_pilot_channel( pilot_SNRdB)
self.gen_WR_pilot_ch( pilot_SNRdB, alpha)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def test_ridge_iter( self, alpha_l):
# Ideal ZF(H)
ID = 0
self.method = "Ideal ZF(H)"
self.model = "ZF"
self.alpha = 0
self.gen_WR_ideal()
yield ID
# Multiple Ridge regressions with alpha_l
for alpha in alpha_l:
ID += 1
self.method = "Ridge each"
self.model = "Ridge"
self.alpha = alpha
self.gen_WR_pilot_only( self.alpha)
yield ID
# Ridge regression with the best alpha among alpha_l
ID += 1
self.method = "Ridge best"
self.model = "Ridge"
self.alpha = self.gs_pilot_reg_best( alpha_l)
self.gen_WR_pilot_only( self.alpha)
yield ID
def test_ridge_all( self, pdi_d_prev, alpha_l):
"""
1. LinearRegression
2. multiple Ridge regression with each alpha in alpha_l
3. Ridge regression with the best alpha among alpha_l
"""
# pdi_d is generated only once.
if pdi_d_prev is None:
pdi_d = dict()
else:
pdi_d = pdi_d_prev
for ID in self.test_ridge_iter(alpha_l):
"""
If pdi_l is not defined yet,
it will be generated first and initial values are stored.
Otherwise, new data are added for the corresponding space.
"""
if pdi_d_prev is None:
pdi = pd.DataFrame()
pdi["Nerr_total"] = [0]
pdi["BER_l"] = [[self.BER]]
else:
pdi = pdi_d[ ID]
pdi["Nerr_total"] = [ pdi["Nerr_total"][0] + self.Nerr]
pdi["BER_l"] = [pdi["BER_l"][0] + [self.BER]]
pdi["method"] = [self.method]
pdi["model"] = [self.model]
pdi["alpha"] = [self.alpha]
# print( 'pdi["BER_l"]', pdi["BER_l"])
pdi["BER"] = [np.mean( pdi["BER_l"][0])]
pdi_d[ ID] = pdi
return pdi_d
def run_gs_pilot_Ridge( self, alpha_l):
"""
Search the best alpha using Ridge.
I focus on Ridge for simplicity at this moment.
Other regularization modes will be used later on.
"""
Nloop = self.Nloop
pdi_d = None
for nloop in range( Nloop):
self.gen_BPSK()
self.gen_H()
self.gen_Rx()
# For fair comparision, pilot is also generated commonly for all methods.
self.rx_pilot()
pdi_d = self.test_ridge_all( pdi_d, alpha_l)
pdo = pd.DataFrame()
for pdi in pdi_d.values():
pdo = pdo.append( pdi, ignore_index = True)
return pdo
def run_pilot_ch_model( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, alpha = 0, disp = False):
"""
A system is run from the transmitter to the receiver.
self.model is used to determine the regression model such as Ridge and Lasso
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB is not None: # 'is' needed for checking None
# self.gen_WR_pilot( pilot_SNRdB)
# self.gen_WR_pilot_channel( pilot_SNRdB)
self.gen_WR_pilot_ch( pilot_SNRdB, alpha, self.model)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def get_BER_pilot_ch_model_eqsnr(
self,
SNRdB_l = [5,6,7],
param_NtNrNx = (2,4,100),
Nloop = 1000,
pilot_ch = False,
alpha = 0,
model = "Ridge"):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
"""
Nt, Nr, Nx = param_NtNrNx
BER_pilot = list()
for SNRdB in SNRdB_l:
# if pilot channel is used, SNRdB is given
# Otherwise, ideal channel estimation is assumed.
if pilot_ch:
pilot_SNRdB = SNRdB
else:
pilot_SNRdB = None
if alpha > 0:
"""
Ridge or Lasso is used.
"""
self.model = model
ber = self.run_pilot_ch_model( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
LinearRegression is used.
"""
ber = self.run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER_pilot_ch_model( self,
SNRdB_l = [5,6,7],
param_NtNrNx = (2,4,100),
Nloop = 1000,
pilot_SNRdB = None,
alpha = 0,
model = "Ridge"):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
This function becomes a member function of class MIMO.
"""
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
if alpha > 0:
"""
Ridge or Lasso is used.
"""
for SNRdB in SNRdB_l:
self.model = model
ber = self.run_pilot_ch_model( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
LinearRegression is used.
"""
for SNRdB in SNRdB_l:
ber = self.run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER( SNRdB_l = [5,6,7], param_NtNrNx = (2,4,100), Nloop = 1000, pilot_SNRdB = None):
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER_pilot_ch( SNRdB_l = [5,6,7], param_NtNrNx = (2,4,100), Nloop = 1000, pilot_SNRdB = None, alpha = 0):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
"""
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
if alpha > 0:
"""
LinearRegression is using.
"""
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot_ch( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
Ridge is using.
"""
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER_pilot_ch_model(
SNRdB_l = [5,6,7],
param_NtNrNx = (2,4,100),
Nloop = 1000,
pilot_SNRdB = None,
alpha = 0,
model = "Ridge"):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
"""
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
if alpha > 0:
"""
Ridge or Lasso is used.
"""
for SNRdB in SNRdB_l:
ber = MIMO( model = model).run_pilot_ch_model( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
LinearRegression is used.
"""
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def pd_gen_4_snr_pilot(Method, BER_l, alpha = None, Npilot = 10,
sim_task = "Fixed SNRpilot", pilot_SNRdB = 7,
param_NtNrNx = (2,10,100), SNRdB_l = range(-5, 5, 5)):
"""
This is a generalized pd_gen() which can be used for both
fixed_snr_pilot() and snr_snr_pilot().
"""
pdi = pd.DataFrame()
pdi["Simulation task"] = [ sim_task] * len( BER_l)
pdi["Method"] = [ Method] * len( BER_l)
if type(pilot_SNRdB) is list:
pdi["SNRpilot"] = pilot_SNRdB
else:
pdi["SNRpilot"] = [pilot_SNRdB] * len( BER_l)
pdi["#pilots"] = [Npilot] * len( BER_l)
pdi["Nt,Nr,Nx"] = [param_NtNrNx] * len( BER_l)
if alpha is None:
pdi["alpha"] = ["Not defined"] * len( BER_l)
else:
pdi["alpha"] = [alpha] * len( BER_l)
pdi["SNR"] = SNRdB_l
pdi["BER"] = BER_l
return pdi
def fixed_snr_pilot( SNRdB_l = range(-5, 5, 1), param_NtNrNx = (2,10,100), pilot_SNRdB = 7,
alpha_l = [0.01, 0.1, 1, 10, 100], Nloop = 5000):
"""
Simulate BER for fixed SNRpilot cases
the results will be saved to pandas dataframe.
The basic parameters are given from the input argements.
"""
def pd_gen(Method, BER_l, alpha = None, Npilot = 10):
"""
This is a meta-function of pd_gen_4_snr_pilot()
"""
return pd_gen_4_snr_pilot( Method = Method, BER_l = BER_l, Npilot = Npilot, alpha = alpha,
sim_task = "Fixed SNRpilot", pilot_SNRdB = pilot_SNRdB,
param_NtNrNx = param_NtNrNx, SNRdB_l = SNRdB_l)
pdi_l = list()
BER_l = get_BER( SNRdB_l, param_NtNrNx = param_NtNrNx, Nloop = Nloop, pilot_SNRdB = None)
pdi_l.append( pd_gen( "Ideal, ZF Rx", BER_l))
BER_l = get_BER_pilot_ch( SNRdB_l, param_NtNrNx = param_NtNrNx, Nloop = Nloop, pilot_SNRdB = pilot_SNRdB)
pdi_l.append( pd_gen( r"Pilot, $\alpha$=0 (MMSE)", BER_l, alpha = 0))
for alpha in alpha_l:
BER_l = get_BER_pilot_ch( SNRdB_l, param_NtNrNx = param_NtNrNx, Nloop = Nloop,
pilot_SNRdB = pilot_SNRdB, alpha = alpha)
pdi_l.append( pd_gen( r"Pilot, $\alpha$={}".format(alpha),BER_l, alpha))
pdo = pd.concat( pdi_l, ignore_index = True)
return pdo
def snr_snr_pilot( SNRdB_l = range(-5, 5, 1), param_NtNrNx = (2,10,100),
alpha_l = [0.01, 0.1, 1, 10, 100], Npilot = 15, Nloop = 5000):
"""
Simulate BER for fixed SNRpilot cases
the results will be saved to pandas dataframe.
The basic parameters are given from the input argements.
"""
def pd_gen(Method, BER_l, alpha = None):
"""
This is a meta-function of pd_gen_4_snr_pilot()
"""
return pd_gen_4_snr_pilot( Method = Method, BER_l = BER_l, alpha = alpha,
Npilot = Npilot, sim_task = "SNRpilot = SNR", pilot_SNRdB = SNRdB_l,
param_NtNrNx = param_NtNrNx, SNRdB_l = SNRdB_l)
pdi_l = list()
mlm = MIMO( Npilot = Npilot)
print( "Ideal channel estimation without considering noise: ZF decoding with perfect H")
BER_l = mlm.get_BER_pilot_ch_model_eqsnr( SNRdB_l, param_NtNrNx = param_NtNrNx,
Nloop = Nloop, pilot_ch = False)
pdi_l.append( pd_gen( "Ideal, ZF Rx", BER_l))
print( "General channel estimation: MMSE decoding with H and noise")
BER_l = mlm.get_BER_pilot_ch_model_eqsnr( SNRdB_l, param_NtNrNx = param_NtNrNx,
Nloop = Nloop, pilot_ch = True)
pdi_l.append( pd_gen( r"Pilot, $\alpha$=0 (MMSE)", BER_l, alpha = 0))
print( "Ridge channel estimation: MMSE decoding with H and noise")
for alpha in alpha_l:
print( "Ridge with alpha =", alpha)
BER_l = mlm.get_BER_pilot_ch_model_eqsnr( SNRdB_l, param_NtNrNx = param_NtNrNx,
Nloop = Nloop, pilot_ch = True, alpha = alpha, model = "Ridge")
pdi_l.append( pd_gen( r"Pilot, $\alpha$={}".format(alpha),BER_l, alpha))
pdo = pd.concat( pdi_l, ignore_index = True)
return pdo
| mit | -8,609,976,906,017,575,000 | 24.850195 | 114 | 0.629864 | false | 2.340291 | false | false | false |
gene1wood/trophy-store | trophystore/trophystore/models.py | 1 | 5171 | from django.db import models
import ast
# from utils import get_config
class ListField(models.TextField):
__metaclass__ = models.SubfieldBase
description = "Stores a python list"
def __init__(self, *args, **kwargs):
super(ListField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
value = []
if isinstance(value, list):
return value
return ast.literal_eval(value)
def get_prep_value(self, value):
if value is None:
return value
return unicode(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value, None)
class CertDestinations(models.Model):
name = models.CharField(max_length=255,
help_text="Name of the destination")
iam_role_arn = models.CharField(max_length=255,
help_text="ARN of the IAM Role to assume when accessing the AWS ELB"
"destination.")
zlb_hostname = models.CharField(max_length=253,
help_text="DNS name of the Zeus load balancer destination")
username = models.CharField(max_length=255,
help_text="Username to access the destination")
password = models.CharField(max_length=255,
help_text="Password to access the destination")
DESTINATION_TYPE_CHOICES = (
('', 'None'),
('elb', 'AWS ELB'),
('zlb', 'Zeus Load Balancer'),
('usr', 'The user')
)
type = models.CharField(max_length=3,
choices=DESTINATION_TYPE_CHOICES,
default='',
blank=True)
def __unicode__(self):
return self.name
class Certificate(models.Model):
common_name = models.CharField(max_length=255,
help_text="Primary DNS name for the certificate")
sans = ListField(blank=True,
help_text="List of alternative DNS names for the certificate")
validity = models.PositiveSmallIntegerField(default=1,
help_text="Number of years certificate is valid for")
server_type = models.SmallIntegerField(blank=True, default="-1",
help_text="2: Apache, 45: Nginx, -1: Other")
signature_hash = models.CharField(max_length=255,
blank=True, help_text="sha1 or sha256", default="sha256")
org_unit = models.CharField(max_length=255,
blank=True)
org_name = models.CharField(max_length=255,
help_text="Mozilla Foundation or Mozilla Corporation")
org_addr1 = models.CharField(max_length=255,
default="331 E Evelyn Ave")
org_addr2 = models.CharField(max_length=255, blank=True)
org_city = models.CharField(max_length=255,
default="Mountain View")
org_state = models.CharField(max_length=255,
default="CA")
org_zip = models.CharField(max_length=255,
default="94041")
org_country = models.CharField(max_length=255,
default="US")
# telephone = models.CharField(max_length=255)
# org_contact_job_title = models.CharField(max_length=255)
# org_contact_firstname = models.CharField(max_length=255)
# org_contact_lastname = models.CharField(max_length=255)
# org_contact_email = models.EmailField()
# org_contact_telephone = models.CharField(max_length=255)
# org_contact_telephone_ext = models.CharField(max_length=255)
ev = models.BooleanField(default=False)
destinations = models.ManyToManyField(CertDestinations)
private_key = models.TextField(max_length=16384, blank=True)
certificate_request = models.TextField(max_length=16384, blank=True)
request_id = models.IntegerField(null=True)
order_id = models.IntegerField(null=True)
serial = models.TextField(max_length=32, blank=True)
certificate = models.TextField(max_length=16384, blank=True)
intermediate_cert = models.TextField(max_length=2097152, blank=True)
root_cert = models.TextField(max_length=16384, blank=True)
pkcs7 = models.TextField(max_length=2097152, blank=True)
business_unit = models.CharField(max_length=255,
blank=True)
STATE_CHOICES = (
('', 'None'),
('req', 'Requested'),
('rej', 'Rejected'),
('app', 'Approved'),
('iss', 'Issued'),
('dep', 'Deployed')
)
REQUESTED = 'req'
REJECTED = 'rej'
APPROVED = 'app'
ISSUED = 'iss'
DEPLOYED = 'dep'
state = models.CharField(max_length=3,
choices=STATE_CHOICES,
default='',
blank=True)
# RFC 5280
openssl_arg_map = {'common_name': 'commonName',
'org_city': 'localityName',
'org_country': 'countryName',
'org_unit': 'organizationalUnitName',
'org_name': 'organizationName',
'org_state': 'stateOrProvinceName'}
def __unicode__(self):
return self.common_name
| bsd-3-clause | 1,828,234,501,277,176,000 | 36.201439 | 77 | 0.594276 | false | 3.950344 | false | false | false |
nugget/python-insteonplm | tests/mockCallbacks.py | 1 | 2602 | """Mock callback module to support device and state testing."""
import logging
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
# pylint: disable=too-many-instance-attributes
class MockCallbacks():
"""Mock callback class to support device and state testing."""
def __init__(self):
"""Init the MockCallbacks Class."""
self.callbackvalue1 = None
self.callbackvalue2 = None
self.callbackvalue3 = None
self.callbackvalue4 = None
self.callbackvalue5 = None
self.callbackvalue6 = None
self.callbackvalue7 = None
self.callbackvalue8 = None
self.callbackvalue9 = None
def callbackmethod1(self, addr, state, value):
"""Receive notice of callback method 1."""
self._report_callback(1, addr, state, value)
self.callbackvalue1 = value
def callbackmethod2(self, addr, state, value):
"""Receive notice of callback method 2."""
self._report_callback(2, addr, state, value)
self.callbackvalue2 = value
def callbackmethod3(self, addr, state, value):
"""Receive notice of callback method 3."""
self._report_callback(3, addr, state, value)
self.callbackvalue3 = value
def callbackmethod4(self, addr, state, value):
"""Receive notice of callback method 5."""
self._report_callback(4, addr, state, value)
self.callbackvalue4 = value
def callbackmethod5(self, addr, state, value):
"""Receive notice of callback method 5."""
self._report_callback(5, addr, state, value)
self.callbackvalue5 = value
def callbackmethod6(self, addr, state, value):
"""Receive notice of callback method 6."""
self._report_callback(6, addr, state, value)
self.callbackvalue6 = value
def callbackmethod7(self, addr, state, value):
"""Receive notice of callback method 7."""
self._report_callback(7, addr, state, value)
self.callbackvalue7 = value
def callbackmethod8(self, addr, state, value):
"""Receive notice of callback method 8."""
self._report_callback(8, addr, state, value)
self.callbackvalue8 = value
def callbackmethod9(self, addr, state, value):
"""Receive notice of callback method 9."""
_LOGGER.debug('Called method 9 callback')
self.callbackvalue9 = value
@staticmethod
def _report_callback(callback, addr, state, value):
_LOGGER.debug('Called method %d for address %s group %s value %s',
callback, addr, state, value)
| mit | -8,229,655,377,828,727,000 | 35.138889 | 74 | 0.641045 | false | 4.258592 | false | false | false |
guillempalou/scikit-cv | skcv/multiview/two_views/fundamental_matrix.py | 1 | 7756 | import numpy as np
from numpy.linalg import svd
from math import log
from scipy.optimize import leastsq
from skcv.multiview.util import normalize_points
def fundamental_matrix_from_two_cameras(camera1, camera2):
""" Computes the fundamental matrix from two projection
matrices
Parameters
----------
camera1: numpy array
Projection matrix of first camera
camera2: numpy array
Projection matrix of second camera
Returns
-------
Fundamental matrix
"""
Pp = np.linalg.pinv(camera1)
# camera center
u, d, vh = svd(camera1)
center = vh[3, :]
# epipole on the second image
e = np.dot(camera2, center)
se = np.array(((0, -e[2], e[1]),
(e[2], 0, -e[0]),
(-e[1], e[0], 0)))
f_matrix = np.dot(se, np.dot(camera2, Pp))
return f_matrix
def eight_point_algorithm(x1, x2):
""" Computes the fundamental matrix from 8 (or more) projection
point pairs
Parameters
----------
x1: numpy array
projections of points in the first image, in homogeneous coordinates
x2: numpy array
projections of points in the second image, in homogeneous coordinates
Returns
-------
F, the fundamental matrix satisfying x2.T * F * x1 = 0
"""
n_points = x1.shape[1]
if x2.shape[1] != n_points: # pragma: no cover
raise ValueError("Shape must be the same")
# normalize points
x1n, t1 = normalize_points(x1, is_homogeneous=True)
x2n, t2 = normalize_points(x2, is_homogeneous=True)
# build the vector
a = np.vstack((x2n[0, :] * x1n,
x2n[1, :] * x1n,
x2n[2, :] * x1n))
# find F in the normalized coordinates and transform it
u, d, vh = svd(a.T, full_matrices=True)
f_matrix = np.reshape(vh[8, :], (3, 3))
# force the rank 2 constraint
u, d, vh = svd(f_matrix, full_matrices=True)
d[2] = 0
f_matrix = np.dot(u, np.dot(np.diag(d), vh))
# transform coordinates
f_matrix = np.dot(t2.T, np.dot(f_matrix, t1))
return f_matrix
def right_epipole(f_matrix):
"""
Computes the right epipole (first image) of fundamental matrix
the right epipole satisfies Fe = 0
**Parameters**
f_matrix: numpy array
Fundamental matrix
**Returns**
the right epipole
"""
u, d, vh = svd(f_matrix)
return vh[2, :]
def left_epipole(f_matrix):
"""
Computes the right epipole (first image) of fundamental matrix
the left epipole satisfies Fe = 0
**Parameters**
f_matrix: numpy array
Fundamental matrix
**Returns**
the left epipole
"""
u, d, vh = svd(f_matrix)
return u[:, 2]
def canonical_cameras_from_f(f_matrix):
"""
Retrieves the two canonical cameras given a fundamental matrix
**Parameters**
f_matrix: numpy array
Fundamental matrix
**Returns**
one pair of canonical cameras
"""
# the first camera is the identity
camera1 = np.eye(3, 4)
e = left_epipole(f_matrix)
se = np.array(((0, -e[2], e[1]),
(e[2], 0, -e[0]),
(-e[1], e[0], 0)))
camera2 = np.hstack((np.dot(se, f_matrix), e[:, np.newaxis]))
return camera1, camera2
def sampson_error(x1, x2, f_matrix):
"""
Computes the sampson error for a set of point pairs
Parameters
----------
x1: numpy array
projections of points in the first image, in homogeneous coordinates
x2: numpy array
projections of points in the second image, in homogeneous coordinates
f_matrix: numpy_array
fundamental matrix
Returns
-------
sampson error of each point pair
"""
f_x1 = np.dot(f_matrix, x1)
f_x2 = np.dot(f_matrix.T, x2)
#get the denominator
den = np.sum(f_x1[:2, :] ** 2, axis=0) +\
np.sum(f_x2[:2, :] ** 2, axis=0)
#get the numerator
num = np.sum((x2 * f_x1), axis=0)**2
return num / den
def reprojection_error(x1, x2, f_matrix):
"""
Computes the sampson error for a set of point pairs
Parameters
----------
x1: numpy array
projections of points in the first image, in homogeneous coordinates
x2: numpy array
projections of points in the second image, in homogeneous coordinates
f_matrix: numpy_array
fundamental matrix
Returns
-------
reprojection error of each point pair
"""
def __sampson_residual(f, x1, x2):
"""
computes the residual of the sampson error
"""
f_matrix = np.reshape(f, (3, 3))
f_x1 = np.dot(f_matrix, x1)
f_x2 = np.dot(f_matrix.T, x2)
#get the denominator
den = np.sum(f_x1[:2, :] ** 2, axis=0) +\
np.sum(f_x2[:2, :] ** 2, axis=0)
#get the numerator
num = np.sum((x2 * f_x1), axis=0)
return num / np.sqrt(den)
def robust_f_estimation(x1, x2,
max_iter=1000,
distance='sampson',
n_samples=8,
prob = 0.99,
refine_result=True,
inlier_threshold=2):
""" Computes the fundamental matrix using the eight point algorithm
(Hartley 1997)
Parameters
----------
x1: numpy array
projections of points in the first image, in homogeneous coordinates
x2: numpy array
projections of points in the second image, in homogeneous coordinates
max_iter: int, optional
maximum number of iterations of the ransac algorithm
distance: string, option
distance to use to find inliers/outliers
n_samples: int, optional
number of points to samples at each RANSAC iteration
prob: float, optional
probability of having a free from outliers sample
refine_result: bool, optional
whether after RANSAC a non linear estimation is performed
inlier_threshold: float, optional
maximum distance to consider a point pair inlier
Returns
-------
F, the fundamental matrix satisfying x2.T * F * x1 = 0
"""
iteration = 0
n_points = x1.shape[1]
is_inlier = np.zeros(n_points, dtype=bool)
# variables to store the best result found
best_inliers = is_inlier
best_n_inliers = 0
while iteration < max_iter:
#select 8 points at random
idx = np.random.choice(n_points, n_samples, replace=False)
selected_x1 = x1[:, idx]
selected_x2 = x2[:, idx]
#get inliers
f_matrix = eight_point_algorithm(selected_x1,
selected_x2)
# find the error distance
if distance == 'sampson':
e = sampson_error(x1, x2, f_matrix)
else: # pragma : no cover
raise ValueError()
is_inlier = e < inlier_threshold
n_inliers = np.count_nonzero(is_inlier)
if n_inliers > best_n_inliers:
best_inliers = is_inlier
best_n_inliers = n_inliers
#update max_iterations if estimation is improved
# the epsilon (1e-10) is added in case of all inliers
eps = 1 - n_inliers / n_points
new_iter = log(1 - prob) / log(1e-10 + 1 - (1-eps)**n_samples)
if new_iter < max_iter:
max_iter = new_iter
iteration += 1
#refine the estimate using all inliers
best_x1 = x1[:, best_inliers]
best_x2 = x2[:, best_inliers]
f_matrix = eight_point_algorithm(best_x1, best_x2)
if refine_result:
if distance == 'sampson':
f = np.reshape(f_matrix, 9)
f_matrix, jac = leastsq(__sampson_residual, f, args=(best_x1, best_x2))
f_matrix = np.reshape(f_matrix, (3, 3))
return f_matrix | bsd-3-clause | 8,682,359,868,658,212,000 | 22.867692 | 83 | 0.582904 | false | 3.467143 | false | false | false |
gsteenss/Hacking-Team-Sweeper | simplescripts/sweeplinux.py | 1 | 1105 | #!/usr/bin/python2.7
# sweeplinux v0.1: a simple script to look for signs of HackingTeam RCS Linux agent
# gsteenss@riseup.net
#
# based on: https://github.com/0xPoly/Hacking-Team-Sweeper/blob/master/signatures/linux.md
import glob
import sys
from platform import platform,architecture
from os.path import expanduser
whoopsie=expanduser('~/.whoopsie*')
crashreports='/var/crash/.reports-*-*'
tmpreports='/var/tmp/.reports-*-*'
#print(sys.version,platform(),architecture())
ok=True
if glob.glob(whoopsie)!=[]:
print('WARNING: Detected HT whoopsie file in home directory, Your computer may be infected with a version of HackingTeam RCS Agent!')
ok=False
if glob.glob(crashreports)!=[]:
print('WARNING: Detected HT crash reports, Your computer may be infected with a version of HackingTeam RCS Agent!')
ok=False
if glob.glob(tmpreports)!=[]:
print('WARNING: Detected HT tmp reports, Your computer may be infected with a version of HackingTeam RCS Agent!')
ok=False
if ok:
print('OK: Nothing strange to report.')
else:
print('Please shutdown your network connection NOW!')
| agpl-3.0 | -2,293,747,633,987,668,500 | 26.625 | 135 | 0.741176 | false | 3.328313 | false | false | false |
gpmidi/MCEdit-Unified | viewports/chunk.py | 6 | 2421 | from camera import CameraViewport
from OpenGL import GL
import mceutils
class ChunkViewport(CameraViewport):
defaultScale = 1.0 # pixels per block
def __init__(self, *a, **kw):
CameraViewport.__init__(self, *a, **kw)
def setup_projection(self):
w, h = (0.5 * s / self.defaultScale
for s in self.size)
minx, maxx = - w, w
miny, maxy = - h, h
minz, maxz = -4000, 4000
GL.glOrtho(minx, maxx, miny, maxy, minz, maxz)
def setup_modelview(self):
x, y, z = self.cameraPosition
GL.glRotate(90.0, 1.0, 0.0, 0.0)
GL.glTranslate(-x, 0, -z)
def zoom(self, f):
x, y, z = self.cameraPosition
mx, my, mz = self.blockFaceUnderCursor[0]
dx, dz = mx - x, mz - z
s = min(4.0, max(1 / 16., self.defaultScale / f))
if s != self.defaultScale:
self.defaultScale = s
f = 1.0 - f
self.cameraPosition = x + dx * f, self.editor.level.Height, z + dz * f
self.editor.renderer.loadNearbyChunks()
incrementFactor = 1.4
def zoomIn(self):
self.zoom(1.0 / self.incrementFactor)
def zoomOut(self):
self.zoom(self.incrementFactor)
def mouse_down(self, evt):
if evt.button == 4: # wheel up - zoom in
# if self.defaultScale == 4.0:
# self.editor.swapViewports()
# else:
self.zoomIn()
elif evt.button == 5: # wheel down - zoom out
self.zoomOut()
else:
super(ChunkViewport, self).mouse_down(evt)
def rightClickDown(self, evt):
pass
def rightClickUp(self, evt):
pass
def mouse_move(self, evt):
pass
@mceutils.alertException
def mouse_drag(self, evt):
if evt.buttons[2]:
x, y, z = self.cameraPosition
dx, dz = evt.rel
self.cameraPosition = (
x - dx / self.defaultScale,
y,
z - dz / self.defaultScale)
else:
super(ChunkViewport, self).mouse_drag(evt)
def render(self):
super(ChunkViewport, self).render()
@property
def tooltipText(self):
text = super(ChunkViewport, self).tooltipText
if text == "1 W x 1 L x 1 H":
return None
return text
def drawCeiling(self):
pass
| isc | -7,397,464,745,655,106,000 | 25.604396 | 82 | 0.530772 | false | 3.478448 | false | false | false |
weareua/MarkIT | groups/views.py | 1 | 1326 | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
from django.core.urlresolvers import reverse_lazy
from django.views.generic import DeleteView
from django.contrib import messages
from groups.models import Group
def groups_list(request):
groups = Group.objects.all()
# order groups list
order_by = request.GET.get('order_by', '')
# ordering groups by title by default
groups = groups.order_by('title')
if order_by in ('title', 'leader', 'id'):
groups = groups.order_by(order_by)
if request.GET.get('reverse', '') == '1':
groups = groups.reverse()
return render(request, 'groups_list.html', {'groups': groups})
def groups_add(request):
return HttpResponse('<h1>Group Add Form</h1>')
def groups_edit(request, gid):
return HttpResponse('<h1>Edit Group %s</h1>' % gid)
class GroupDeleteView(DeleteView):
model = Group
template_name = 'groups_confirm_delete.html'
success_url = reverse_lazy('home')
success_message = u"Група видалена успішно."
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(GroupDeleteView, self).delete(
request,
*args,
**kwargs)
| mit | 5,010,538,765,097,069,000 | 25.12 | 66 | 0.657734 | false | 3.658263 | false | false | false |
kyokley/BattlePyEngine | src/battlePy/random_player.py | 1 | 1105 | import random
from battlePy.default_config import BOARD_HEIGHT, BOARD_WIDTH
from battlePy.player import Player
from battlePy.ship import RIGHT, UP
class RandomPlayer(Player):
def initPlayer(self):
self.name = 'RandomPlayer'
def placeShips(self):
for ship in self.ships:
isValid = False
while not isValid:
orientation = random.choice([UP, RIGHT])
if orientation == UP:
location = (
random.randint(0, BOARD_WIDTH - 1),
random.randint(0, BOARD_HEIGHT - 1 - ship.size),
)
else:
location = (
random.randint(0, BOARD_WIDTH - 1 - ship.size),
random.randint(0, BOARD_HEIGHT - 1),
)
ship.placeShip(location, orientation)
if self.isShipPlacedLegally(ship):
isValid = True
def fireShot(self):
return (random.randint(0, BOARD_WIDTH - 1), random.randint(0, BOARD_HEIGHT - 1))
| mit | -7,574,655,506,449,167,000 | 32.484848 | 88 | 0.517647 | false | 4.316406 | false | false | false |
lowerquality/gentle | gentle/util/cyst.py | 1 | 2574 | # Twisted lazy computations
# (from rmo-sketchbook/cyst/cyst.py)
import mimetypes
import os
from twisted.web.static import File
from twisted.web.resource import Resource
from twisted.web.server import Site, NOT_DONE_YET
from twisted.internet import reactor
class Insist(Resource):
isLeaf = True
def __init__(self, cacheloc):
self.cacheloc = cacheloc
self.cachefile = None
if os.path.exists(cacheloc):
self.cachefile = File(cacheloc)
self.reqs_waiting = []
self.started = False
Resource.__init__(self)
def render_GET(self, req):
# Check if someone else has created the file somehow
if self.cachefile is None and os.path.exists(self.cacheloc):
self.cachefile = File(self.cacheloc)
# Check if someone else has *deleted* the file
elif self.cachefile is not None and not os.path.exists(self.cacheloc):
self.cachefile = None
if self.cachefile is not None:
return self.cachefile.render_GET(req)
else:
self.reqs_waiting.append(req)
req.notifyFinish().addErrback(
self._nevermind, req)
if not self.started:
self.started = True
reactor.callInThread(self.desist)
return NOT_DONE_YET
def _nevermind(self, _err, req):
self.reqs_waiting.remove(req)
def desist(self):
self.serialize_computation(self.cacheloc)
reactor.callFromThread(self.resist)
def _get_mime(self):
return mimetypes.guess_type(self.cacheloc)[0]
def resist(self):
if not os.path.exists(self.cacheloc):
# Error!
print("%s does not exist - rendering fail!" % (self.cacheloc))
for req in self.reqs_waiting:
req.headers[b"Content-Type"] = b"text/plain"
req.write(b"cyst error")
req.finish()
return
self.cachefile = File(self.cacheloc)
# Send content to all interested parties
for req in self.reqs_waiting:
self.cachefile.render(req)
def serialize_computation(self, outpath):
raise NotImplemented
class HelloCyst(Insist):
def serialize_computation(self, outpath):
import time
time.sleep(10)
open(outpath, "w").write("Hello, World")
if __name__=='__main__':
import sys
c = HelloCyst(sys.argv[1])
site = Site(c)
port = 7984
reactor.listenTCP(port, site)
print("http://localhost:%d" % (port))
reactor.run()
| mit | -2,386,617,986,116,249,600 | 29.282353 | 78 | 0.607615 | false | 3.818991 | false | false | false |
jotes/pontoon | pontoon/base/tests/test_admin.py | 1 | 3321 | import pytest
from django.contrib.auth.models import User
from django.contrib.admin.sites import AdminSite
from django.test.client import RequestFactory
from pontoon.base.admin import UserAdmin
from pontoon.base.models import PermissionChangelog
from pontoon.test.factories import (
GroupFactory,
LocaleFactory,
)
@pytest.fixture
def locale_c():
translators_group = GroupFactory.create(name="locale translators",)
managers_group = GroupFactory.create(name="locale managers",)
return LocaleFactory.create(
code="nv",
name="Na'vi",
translators_group=translators_group,
managers_group=managers_group,
)
@pytest.fixture
def user_form_request():
"""
Mock for a request object which is passed to every django admin form.
"""
def _get_user_form_request(request_user, user, **override_fields):
rf = RequestFactory()
fields = (
"username",
"email",
"first_name",
"last_name",
)
form_request = {f: (getattr(user, f, "") or "") for f in fields}
form_request["date_joined_0"] = "2018-01-01"
form_request["date_joined_1"] = "00:00:00"
form_request.update(override_fields)
request = rf.post("/dummy/", form_request,)
request.user = request_user
return request
return _get_user_form_request
@pytest.fixture
def get_useradmin_form():
"""
Get a UserAdmin form instance.
"""
def _get_user_admin_form(request, user):
useradmin = UserAdmin(User, AdminSite(),)
form = useradmin.get_form(request=request, obj=user,)
return (
useradmin,
form(request.POST, instance=user, initial={"password": "password"},),
)
return _get_user_admin_form
@pytest.mark.django_db
def test_user_admin_form_log_no_changes(
user_a, user_b, user_form_request, get_useradmin_form,
):
_, form = get_useradmin_form(user_form_request(user_a, user_b), user_b,)
assert form.is_valid()
form.save()
assert list(PermissionChangelog.objects.all()) == []
@pytest.mark.django_db
def test_user_admin_form_log_add_groups(
locale_c,
user_a,
user_b,
user_form_request,
get_useradmin_form,
assert_permissionchangelog,
):
request = user_form_request(user_a, user_b, groups=[locale_c.managers_group.pk],)
useradmin, form = get_useradmin_form(request, user_b,)
assert form.is_valid()
useradmin.save_model(request, user_b, form, True)
(changelog_entry0,) = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0, "added", user_a, user_b, locale_c.managers_group,
)
@pytest.mark.django_db
def test_user_admin_form_log_removed_groups(
locale_c,
user_a,
user_b,
user_form_request,
get_useradmin_form,
assert_permissionchangelog,
):
user_b.groups.add(locale_c.managers_group)
request = user_form_request(user_a, user_b, groups=[],)
useradmin, form = get_useradmin_form(request, user_b,)
assert form.is_valid()
useradmin.save_model(request, user_b, form, True)
(changelog_entry0,) = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0, "removed", user_a, user_b, locale_c.managers_group,
)
| bsd-3-clause | 5,479,512,884,144,492,000 | 25.357143 | 85 | 0.650407 | false | 3.452183 | true | false | false |
public0821/nettest | nettest/tools/tcpreceiver.py | 1 | 1927 | #!/usr/bin/env python3
from nettest.sockets import TcpSocket
import argparse
import time
import select
import socket
class TcpReceiver(object):
def __init__(self):
self._setup_args()
def _setup_args(self):
parser = argparse.ArgumentParser(description=_("accept tcp connection and receive tcp message"))
parser.add_argument('--ip', type=str, help=_("Specifies the ip to bind"), default='0.0.0.0')
parser.add_argument('port', type=int, help=_("Specifies the port to bind"))
parser.add_argument('-q', '--quiet', action='store_true', help=_("Quiet mode, don't print the message received"))
self._args = parser.parse_args()
def run(self):
sock = TcpSocket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self._args.ip, self._args.port))
sock.listen(10)
sockets = [sock,]
while True:
infds, outfds, errfds = select.select(sockets, [], [])
for fd in infds:
if fd == sock:
client, client_addr = sock.accept()
sockets.append(client)
if not self._args.quiet:
print(_("accept connection from {0}".format(client_addr)))
else:
buffer = fd.recv(1024)
if len(buffer) != 0:
if not self._args.quiet:
print(fd.getpeername(),buffer)
else:
client_addr = fd.getpeername()
fd.close()
if not self._args.quiet:
print(_("close connection from {0}".format(client_addr)))
sockets.remove(fd)
if __name__ == '__main__':
try:
tool = TcpReceiver()
tool.run()
except KeyboardInterrupt:
print()
| apache-2.0 | -5,926,593,848,506,962,000 | 37.54 | 121 | 0.514271 | false | 4.359729 | false | false | false |
goidor/chat-django | chat_django/chat/views.py | 1 | 2110 | from django.shortcuts import render, redirect
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from django.db.models import Q
from django.views.generic.edit import (
CreateView,
UpdateView
)
from django.contrib.auth.models import User
from .models import Room, Message
from .forms import MessageSendForm
class ListUsers(ListView):
model = User
context_object_name = 'user_list'
queryset = User.objects.filter(is_active=True, is_superuser=False)
template_name = 'chat/user_list.html'
def list_chats(request, user_id):
if user_id is not None:
user = User.objects.get(id=user_id)
users = User.objects.filter(~Q(id=user_id), is_active=True, is_superuser=False)
return render(request, 'chat/room_list.html',
{'list_users': users,
'usuario': user.id})
else:
return render(request, 'chat/room_list.html')
def messages(request, user_id, room_id=None):
user = User.objects.get(id=user_id)
form = MessageSendForm()
if request.method == 'POST':
form = MessageSendForm(request.POST)
if form.is_valid():
#import pdb; pdb.set_trace()
room_chat = Room.objects.get(id=room_id)
message = form.save(commit=False)
message.message = request.POST['message']
message.room = room_chat
message.user = user
message.save()
if room_id:
room_chat, created = Room.objects.get_or_create(user=user_id)
#messages = Message.objects.filter(room=room_chat[0], user=)
messages = reversed(room_chat.messages.order_by('-time')[:50])
users = User.objects.filter(~Q(id=user_id), is_active=True, is_superuser=False)
return render(request, 'chat/chat.html',
{'messages': messages,
'users': users,
'user_chat': user.username,
'usuario': user.id,
'user_name': '%s %s' % (user.first_name, user.last_name),
'form': form})
else:
return render(request, 'chat/room_list.html')
| gpl-3.0 | -1,029,722,574,820,200,400 | 33.032258 | 87 | 0.627962 | false | 3.534338 | false | false | false |
navigator8972/ensemble_ioc | PyMDP_Utils.py | 1 | 6193 | """
Utilities for PyMDP module
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
def BarycentricInterpolation(bins, pnts):
"""
barycentricinterpolation for given points,
return the barycentric coordinates for points within the grids
INPUT
bins - grids for discretization,
m-length array where bins[i] indicates the mesh along dimension i
pnts - an array of pnts, each points is an m-length indicates the Cartesian coordinates
can be n pnts in total
RETURN
indices - an n-length list of indices, each indices is d-length (d=m+1) for interpolating points invovled
coeffs - an n-length list of coefficients, each coefficients is d-length for reconstructing points n
A pythonic version barycentricinterpolation from Russ' drake utility function
does not support dcoefs currently...
"""
#note here the layout of input and output is different from the C++ version of drake
m = pnts.shape[1]
n = pnts.shape[0]
d = m+1
if len(bins) != m:
print 'The number of bins must equal to the dimension of the points.' #validation
return None, None
binsize = [len(bins[i]) for i in range(m)]
nskip = np.concatenate([[1], np.cumprod([binsize[i] for i in range(m-1)])])
#a list of bary points for future sorting...
b = [{'dim':0, 'fracway':0.0, 'dfracway':0.0} for i in range(d)]
indices = np.zeros((n, d))
coeffs = np.zeros((n, d))
for j in range(n):
sidx = 0 # 0-index in our case...
for i in range(m):
pt = pnts[j, i]
curr_bin = bins[i]
curr_bin_size = binsize[i]
b[i]['dim'] = i
if curr_bin_size == 1: #singleton dimensions
#sidx is unchanged
b[i]['fracway'] = 1.0
elif pt > curr_bin[curr_bin_size-1]:
#larger than max bound of bin
sidx += nskip[i] * (curr_bin_size-1)
b[i]['fracway'] = 1.0
b[i]['dfracway'] = 0.0
elif pt < curr_bin[0]:
#less than min bound of bin
sidx += nskip[i]
b[i]['fracway'] = 0.0
b[i]['dfracway'] = 0.0
else:
#Russ commented that smarter search can be done here...
#i guess we can do it in a pythonic way...
next_bin_index = np.argmax(curr_bin>pt)
sidx += nskip[i]*next_bin_index
b[i]['fracway'] = (pt - curr_bin[next_bin_index-1])/(curr_bin[next_bin_index]- curr_bin[next_bin_index-1])
b[i]['dfracway'] = 1./(curr_bin[next_bin_index]- curr_bin[next_bin_index-1])
#sort dimension based on fracway (lowest to highest)
b_sorted = sorted(b[:-1], key=lambda b_elem: b_elem['fracway'])
# final element of b_sorted,
b_sorted.append({'dim':m-1,'fracway':1.0, 'dfracway':0.0})
# top right corner
indices[j, 0] = sidx
coeffs[j, 0] = b_sorted[0]['fracway']
for i in range(m):
if binsize[b_sorted[i]['dim']] > 1:
#support singletone dimension
sidx -= nskip[b_sorted[i]['dim']]
indices[j, i+1] = sidx
coeffs[j, i+1] = b_sorted[i+1]['fracway'] - b_sorted[i]['fracway']
return indices, coeffs
def add_arrow_to_line2D(
axes, line, arrow_locs=[0.2, 0.4, 0.6, 0.8],
arrowstyle='-|>', arrowsize=1, transform=None):
"""
Add arrows to a matplotlib.lines.Line2D at selected locations.
Parameters:
-----------
axes:
line: list of 1 Line2D obbject as returned by plot command
arrow_locs: list of locations where to insert arrows, % of total length
arrowstyle: style of the arrow
arrowsize: size of the arrow
transform: a matplotlib transform instance, default to data coordinates
Returns:
--------
arrows: list of arrows
"""
if (not(isinstance(line, list)) or not(isinstance(line[0],
mlines.Line2D))):
raise ValueError("expected a matplotlib.lines.Line2D object")
x, y = line[0].get_xdata(), line[0].get_ydata()
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
color = line[0].get_color()
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
raise NotImplementedError("multicolor lines not supported")
else:
arrow_kw['color'] = color
linewidth = line[0].get_linewidth()
if isinstance(linewidth, np.ndarray):
raise NotImplementedError("multiwidth lines not supported")
else:
arrow_kw['linewidth'] = linewidth
if transform is None:
transform = axes.transData
arrows = []
for loc in arrow_locs:
s = np.cumsum(np.sqrt(np.diff(x) ** 2 + np.diff(y) ** 2))
n = np.searchsorted(s, s[-1] * loc)
arrow_tail = (x[n], y[n])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
p = mpatches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform,
**arrow_kw)
axes.add_patch(p)
arrows.append(p)
return arrows
def draw_err_bar_with_filled_shape(ax, x_data, y_data, err=None, color=(0, 0, 1), transp=0.2):
'''
wrapped function to draw curve with filled shape to indicate error bar
'''
line = None
shade = None
#validate the length of err
if err is not None:
if len(x_data) != len(y_data) or len(x_data) != len(err):
print 'The length of data and err must be consistent.'
return line, shade
line, = ax.plot(x_data, y_data, color=color)
ax.hold(True)
#<hyin/Jan-22nd-2016> linewidth=0 does not work on matplotlib 1.4.2, it is fixed on 1.4.3 though...
shade = plt.fill_between(x_data, y_data-err, y_data+err, alpha=transp, edgecolor=color, facecolor=color, linewidth=3.0)
return line, shade
else:
#just draw curve...
line, = ax.plot(x_data, y_data, color=color)
return line, shade | bsd-2-clause | 482,157,451,004,387,400 | 36.08982 | 127 | 0.582593 | false | 3.467525 | false | false | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/animation/animatedtiles/animatedtiles.py | 1 | 6785 | #!/usr/bin/env python
# This file was taken from Riverbank's examples,
# which was an adaptation of the original C++ Qt's examples.
from PySide import QtCore, QtGui
import animatedtiles_rc
# PyQt doesn't support deriving from more than one wrapped class so we use
# composition and delegate the property.
class Pixmap(QtCore.QObject):
def __init__(self, pix):
super(Pixmap, self).__init__()
self.pixmap_item = QtGui.QGraphicsPixmapItem(pix)
self.pixmap_item.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
def set_pos(self, pos):
self.pixmap_item.setPos(pos)
def get_pos(self):
return self.pixmap_item.pos()
pos = QtCore.Property(QtCore.QPointF, get_pos, set_pos)
class Button(QtGui.QGraphicsWidget):
pressed = QtCore.Signal()
def __init__(self, pixmap, parent=None):
super(Button, self).__init__(parent)
self._pix = pixmap
self.setAcceptHoverEvents(True)
self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
def boundingRect(self):
return QtCore.QRectF(-65, -65, 130, 130)
def shape(self):
path = QtGui.QPainterPath()
path.addEllipse(self.boundingRect())
return path
def paint(self, painter, option, widget):
down = option.state & QtGui.QStyle.State_Sunken
r = self.boundingRect()
grad = QtGui.QLinearGradient(r.topLeft(), r.bottomRight())
if option.state & QtGui.QStyle.State_MouseOver:
color_0 = QtCore.Qt.white
else:
color_0 = QtCore.Qt.lightGray
color_1 = QtCore.Qt.darkGray
if down:
color_0, color_1 = color_1, color_0
grad.setColorAt(0, color_0)
grad.setColorAt(1, color_1)
painter.setPen(QtCore.Qt.darkGray)
painter.setBrush(grad)
painter.drawEllipse(r)
color_0 = QtCore.Qt.darkGray
color_1 = QtCore.Qt.lightGray
if down:
color_0, color_1 = color_1, color_0
grad.setColorAt(0, color_0)
grad.setColorAt(1, color_1)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(grad)
if down:
painter.translate(2, 2)
painter.drawEllipse(r.adjusted(5, 5, -5, -5))
painter.drawPixmap(-self._pix.width() / 2, -self._pix.height() / 2,
self._pix)
def mousePressEvent(self, ev):
self.pressed.emit()
self.update()
def mouseReleaseEvent(self, ev):
self.update()
class View(QtGui.QGraphicsView):
def resizeEvent(self, event):
super(View, self).resizeEvent(event)
self.fitInView(self.sceneRect(), QtCore.Qt.KeepAspectRatio)
if __name__ == '__main__':
import sys
import math
app = QtGui.QApplication(sys.argv)
kineticPix = QtGui.QPixmap(':/images/kinetic.png')
bgPix = QtGui.QPixmap(':/images/Time-For-Lunch-2.jpg')
scene = QtGui.QGraphicsScene(-350, -350, 700, 700)
items = []
for i in range(64):
item = Pixmap(kineticPix)
item.pixmap_item.setOffset(-kineticPix.width() / 2,
-kineticPix.height() / 2)
item.pixmap_item.setZValue(i)
items.append(item)
scene.addItem(item.pixmap_item)
# Buttons.
buttonParent = QtGui.QGraphicsRectItem()
ellipseButton = Button(QtGui.QPixmap(':/images/ellipse.png'), buttonParent)
figure8Button = Button(QtGui.QPixmap(':/images/figure8.png'), buttonParent)
randomButton = Button(QtGui.QPixmap(':/images/random.png'), buttonParent)
tiledButton = Button(QtGui.QPixmap(':/images/tile.png'), buttonParent)
centeredButton = Button(QtGui.QPixmap(':/images/centered.png'), buttonParent)
ellipseButton.setPos(-100, -100)
figure8Button.setPos(100, -100)
randomButton.setPos(0, 0)
tiledButton.setPos(-100, 100)
centeredButton.setPos(100, 100)
scene.addItem(buttonParent)
buttonParent.scale(0.75, 0.75)
buttonParent.setPos(200, 200)
buttonParent.setZValue(65)
# States.
rootState = QtCore.QState()
ellipseState = QtCore.QState(rootState)
figure8State = QtCore.QState(rootState)
randomState = QtCore.QState(rootState)
tiledState = QtCore.QState(rootState)
centeredState = QtCore.QState(rootState)
# Values.
for i, item in enumerate(items):
# Ellipse.
ellipseState.assignProperty(item, 'pos',
QtCore.QPointF(math.cos((i / 63.0) * 6.28) * 250,
math.sin((i / 63.0) * 6.28) * 250))
# Figure 8.
figure8State.assignProperty(item, 'pos',
QtCore.QPointF(math.sin((i / 63.0) * 6.28) * 250,
math.sin(((i * 2)/63.0) * 6.28) * 250))
# Random.
randomState.assignProperty(item, 'pos',
QtCore.QPointF(-250 + QtCore.qrand() % 500,
-250 + QtCore.qrand() % 500))
# Tiled.
tiledState.assignProperty(item, 'pos',
QtCore.QPointF(((i % 8) - 4) * kineticPix.width() + kineticPix.width() / 2,
((i // 8) - 4) * kineticPix.height() + kineticPix.height() / 2))
# Centered.
centeredState.assignProperty(item, 'pos', QtCore.QPointF())
# Ui.
view = View(scene)
view.setWindowTitle("Animated Tiles")
view.setViewportUpdateMode(QtGui.QGraphicsView.BoundingRectViewportUpdate)
view.setBackgroundBrush(QtGui.QBrush(bgPix))
view.setCacheMode(QtGui.QGraphicsView.CacheBackground)
view.setRenderHints(
QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform)
view.show()
states = QtCore.QStateMachine()
states.addState(rootState)
states.setInitialState(rootState)
rootState.setInitialState(centeredState)
group = QtCore.QParallelAnimationGroup()
for i, item in enumerate(items):
anim = QtCore.QPropertyAnimation(item, 'pos')
anim.setDuration(750 + i * 25)
anim.setEasingCurve(QtCore.QEasingCurve.InOutBack)
group.addAnimation(anim)
trans = rootState.addTransition(ellipseButton.pressed, ellipseState)
trans.addAnimation(group)
trans = rootState.addTransition(figure8Button.pressed, figure8State)
trans.addAnimation(group)
trans = rootState.addTransition(randomButton.pressed, randomState)
trans.addAnimation(group)
trans = rootState.addTransition(tiledButton.pressed, tiledState)
trans.addAnimation(group)
trans = rootState.addTransition(centeredButton.pressed, centeredState)
trans.addAnimation(group)
timer = QtCore.QTimer()
timer.start(125)
timer.setSingleShot(True)
trans = rootState.addTransition(timer.timeout, ellipseState)
trans.addAnimation(group)
states.start()
sys.exit(app.exec_())
| epl-1.0 | -753,075,092,396,192,300 | 29.426009 | 91 | 0.64112 | false | 3.48306 | false | false | false |
DotNetAge/freezes | freezes/views.py | 1 | 6371 | # -*- coding: utf-8 -*-
# !/usr/bin/python
__author__ = 'Ray'
from flask import g, render_template, send_from_directory, Blueprint, current_app, url_for, jsonify
from os import path
from urlparse import urljoin
from werkzeug.contrib.atom import AtomFeed
from fnmatch import fnmatch
from datetime import datetime
from werkzeug.exceptions import abort
from flask_babel import gettext, refresh
class SimplePage(object):
title = ''
path = ''
_locale = ''
_is_default = False
def __init__(self, title=''):
self.title = title
def set_lang(locale_name):
if locale_name != '':
g.lang = locale_name.split('-')[0]
refresh()
def create_views(name, app):
main = Blueprint(name, name,
template_folder='templates',
static_url_path='/static',
static_folder='static')
try:
if app.config['TESTING'] is False:
pkg_file = path.join(app.path, '__init__.py')
if path.exists(pkg_file):
import imp
ext_module = imp.load_source(name, pkg_file)
routes_func_name = 'register'
if hasattr(ext_module, routes_func_name) and callable(getattr(ext_module, routes_func_name)):
ext_module.register(main)
finally:
__init_views(main, app)
app.register_blueprint(main)
return main
def __init_views(main, app):
@main.route('/')
@main.route('/<path:page_path>/')
def index(page_path='index'):
if fnmatch(page_path, '*.*'):
_abs_path = path.abspath(path.join('pages', path.dirname(page_path)))
return send_from_directory(_abs_path,
path.basename(page_path))
page = current_app.pages.get_or_404(page_path)
default_layout = 'page'
if page._is_post:
default_layout = 'post'
set_lang(page._locale)
template = 'layouts/%s.html' % page.meta.get('layout', default_layout)
return render_template(template, page=page, locale=page._locale, site=current_app.site)
@main.route('/api/pages/<path:search_path>.json')
def data_pages(search_path):
if search_path == 'all':
return jsonify(pages=[p.to_json for p in current_app.site.pages])
else:
_page = current_app.pages.get_or_404(search_path)
json_page = _page.to_json
json_page.update(pages=[p.to_json for p in current_app.site.query(search_path)])
return jsonify(json_page)
@main.route('/api/posts.json')
def data_posts():
return jsonify(posts=[p.to_json for p in current_app.site.posts])
@main.route('/<regex("[a-z]{2}-[A-Z]{2}"):locale_name>/tags/')
@main.route('/tags/')
def tags(locale_name=''):
set_lang(locale_name)
return render_template('layouts/tags.html',
page=SimplePage('All tags'),
site=current_app.site,
locale=locale_name)
@main.route('/<regex("[a-z]{2}-[A-Z]{2}"):locale_name>/tags/<name>/')
@main.route('/tags/<name>/')
def tag(name, locale_name=''):
set_lang(locale_name)
if (name is None) or name == '':
abort(404)
return render_template('layouts/tagged.html',
page=SimplePage(gettext(u'Articles tagged with:%(value)s', value=name)),
tag=name,
locale=locale_name,
site=current_app.site)
@main.route('/<regex("[a-z]{2}-[A-Z]{2}"):locale_name>/archives/')
@main.route('/archives/')
def archives(locale_name=''):
set_lang(locale_name)
return render_template('layouts/archives.html',
page=SimplePage(gettext(u'All archives')),
locale=locale_name,
site=current_app.site)
@main.route('/<regex("[a-z]{2}-[A-Z]{2}"):locale_name>/archives/<name>/')
@main.route('/archives/<name>/')
def archive(name, locale_name=''):
set_lang(locale_name)
results = [a for a in current_app.site.archives if a.title == name]
if len(results) == 0:
abort(404)
return render_template('layouts/archived.html',
page=SimplePage(gettext(u'Archive:%(value)s', value=name)),
locale=locale_name,
archive=results[0],
site=current_app.site)
def render_404():
"""Render the not found page
"""
return render_template('404.html', page={'title': gettext(u'Page not found'), 'path': '404'},
locale='',
site=current_app.site)
@app.errorhandler(404)
def page_not_found(e):
return render_404(), 404
@main.route('/404.html')
def static_404():
return render_404()
@app.route('/sitemap.xml')
def sitemap():
locations = [(lambda p: (post.url, post.last_updated))(post) for post in current_app.pages]
sites = [(current_app.site.url + l[0], l[1]) for l in locations]
return render_template('sitemap.xml', sites=sites), 200, {'Content-Type': 'application/xml; charset=utf-8'}
@app.route('/feeds/<path:name>.atom')
def feed(name='recent'):
_feed_url = url_for('.feed', name=name, _external=True)
_posts = current_app.site.posts
if name != 'recent':
_posts = current_app.site.query(name, all=True)
feed = AtomFeed(gettext('Recent posts'),
feed_url=_feed_url,
url=current_app.site.url)
# if len(_posts) > 20:
# _posts = _posts[20]
for post in _posts:
feed.add(post.meta.get('title', ''),
unicode(post.html),
content_type='html',
subtitle=post.meta.get('summary', ''),
author=post.meta.get('author'),
url=urljoin(current_app.site.url, post.url),
updated=post.last_updated,
published=post.published)
return feed.get_response()
| bsd-3-clause | 3,633,069,214,335,844,400 | 33.437838 | 115 | 0.53304 | false | 3.880024 | false | false | false |
201213580/midescuentogt | servidor/conexiones.py | 1 | 2042 | import MySQLdb
def consultaRecuperar(consulta):
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
cursor.execute(consulta)
#cnx.close() no se cierra porque activa una excepcion.
return cursor
def consultaSQL(consulta):
respuesta=False
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
cursor.execute(consulta)
respuesta=cursor.fetchone()
if respuesta=='1':
respuesta=True
cnx.close()
return respuesta
def consultaId(consulta):
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
cursor.execute(consulta)
respuesta=cursor.fetchone()
#cnx.close()
return respuesta
def consultaPromociones(consulta):
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
try:
cursor.execute(consulta)
#cnx.close() no se cierra porque activa una excepcion.
except Exception, e:
print ' '
return cursor
def registroSQL(consulta):
respuesta=False
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
try:
cursor.execute(consulta)
cnx.commit()
cnx.close()
respuesta=True
except Exception, e:
print 'No se logro realizar la accion'
return respuesta
def consultaCodigo(consulta):
respuesta=False
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
try:
cursor.execute(consulta)
cnx.commit()
respuesta=cursor.fetchone()
#cnx.close() no se cierra porque activa una excepcion.
except Exception, e:
print ' '
return respuesta
| apache-2.0 | 6,424,390,343,573,191,000 | 31.412698 | 123 | 0.739961 | false | 2.539801 | false | false | false |
myron0330/caching-research | section_cmab/simulation/base.py | 1 | 2092 | # -*- coding: UTF-8 -*-
# **********************************************************************************#
# File:
# **********************************************************************************#
from section_cmab.agent import Agent
from section_cmab.algorithms.lp_solvers import primal_dual_recover
from section_cmab.display.rewards import display_single_
def simulate_with_(algorithm, config=None, circles=200, dump=True,
algorithm_type='original', fixed_theta=False, prefix='', **kwargs):
"""
Simulate with parameters.
Args:
algorithm(function): algorithm
config(string): config path
circles(int): circles
dump(boolean): whether to dump result to file
algorithm_type(string): original, optimal, comparison
fixed_theta(boolean): fixed theta
prefix(string): prefix
"""
config = config or '../cfg/default.cfg'
agent = Agent.from_(config)
if algorithm_type == 'optimal':
algorithm_rewards = agent.find_optimal_with_bnd_(primal_dual_recover, circles=circles, dump=dump,
fixed_theta=fixed_theta,
prefix=prefix)
elif algorithm_type == 'original':
algorithm_rewards = agent.iter_with_(algorithm, circles=circles, dump=dump, prefix=prefix)
elif algorithm_type == 'greedy':
algorithm_rewards = agent.iter_with_greedy_(algorithm, circles=circles, dump=dump, prefix=prefix)
else:
algorithm_rewards = agent.comparison_(algorithm, circles=circles, dump=dump, prefix=prefix)
return algorithm_rewards
if __name__ == '__main__':
config_path = '../cfg/myron.cfg'
current_algorithm = primal_dual_recover
# current_algorithm = branch_and_bound
rewards = simulate_with_(current_algorithm, config=config_path, circles=30,
dump=False)
display_single_(rewards, all_curves=False, display_length=500, line_width=1.8,
title_size=20, label_size=16, color='#1E90FF')
| mit | -477,107,893,832,023,940 | 45.488889 | 105 | 0.573614 | false | 4.192385 | true | false | false |
ds-hwang/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py | 59 | 4028 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from urllib2 import HTTPError
from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout
from webkitpy.common.system.logtesting import LoggingTestCase
class NetworkTransactionTest(LoggingTestCase):
exception = Exception("Test exception")
def test_success(self):
transaction = NetworkTransaction()
self.assertEqual(transaction.run(lambda: 42), 42)
def _raise_exception(self):
raise self.exception
def test_exception(self):
transaction = NetworkTransaction()
did_process_exception = False
did_throw_exception = True
try:
transaction.run(lambda: self._raise_exception())
did_throw_exception = False
except Exception, e:
did_process_exception = True
self.assertEqual(e, self.exception)
self.assertTrue(did_throw_exception)
self.assertTrue(did_process_exception)
def _raise_500_error(self):
self._run_count += 1
if self._run_count < 3:
raise HTTPError("http://example.com/", 500, "internal server error", None, None)
return 42
def _raise_404_error(self):
raise HTTPError("http://foo.com/", 404, "not found", None, None)
def test_retry(self):
self._run_count = 0
transaction = NetworkTransaction(initial_backoff_seconds=0)
self.assertEqual(transaction.run(lambda: self._raise_500_error()), 42)
self.assertEqual(self._run_count, 3)
self.assertLog(['WARNING: Received HTTP status 500 loading "http://example.com/". '
'Retrying in 0 seconds...\n',
'WARNING: Received HTTP status 500 loading "http://example.com/". '
'Retrying in 0.0 seconds...\n'])
def test_convert_404_to_None(self):
transaction = NetworkTransaction(convert_404_to_None=True)
self.assertEqual(transaction.run(lambda: self._raise_404_error()), None)
def test_timeout(self):
self._run_count = 0
transaction = NetworkTransaction(initial_backoff_seconds=60*60, timeout_seconds=60)
did_process_exception = False
did_throw_exception = True
try:
transaction.run(lambda: self._raise_500_error())
did_throw_exception = False
except NetworkTimeout, e:
did_process_exception = True
self.assertTrue(did_throw_exception)
self.assertTrue(did_process_exception)
| bsd-3-clause | 3,983,772,429,179,721,700 | 42.311828 | 92 | 0.693396 | false | 4.450829 | true | false | false |
dfang/odoo | addons/fleet/__manifest__.py | 23 | 1331 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name' : 'Fleet Management',
'version' : '0.1',
'sequence': 165,
'category': 'Human Resources',
'website' : 'https://www.odoo.com/page/fleet',
'summary' : 'Vehicle, leasing, insurances, costs',
'description' : """
Vehicle, leasing, insurances, cost
==================================
With this module, Odoo helps you managing all your vehicles, the
contracts associated to those vehicle as well as services, fuel log
entries, costs and many other features necessary to the management
of your fleet of vehicle(s)
Main Features
-------------
* Add vehicles to your fleet
* Manage contracts for vehicles
* Reminder when a contract reach its expiration date
* Add services, fuel log entry, odometer values for all vehicles
* Show all costs associated to a vehicle or to a type of service
* Analysis graph for costs
""",
'depends': [
'base',
'mail',
],
'data': [
'security/fleet_security.xml',
'security/ir.model.access.csv',
'views/fleet_view.xml',
'views/fleet_board_view.xml',
'data/fleet_cars_data.xml',
'data/fleet_data.xml',
],
'demo': ['data/fleet_demo.xml'],
'installable': True,
'application': True,
}
| agpl-3.0 | 2,143,881,206,663,646,000 | 29.25 | 74 | 0.629602 | false | 3.530504 | false | false | false |
adamklawonn/CityCircles | citycircles_iphone/build_back2/iphoneDistribution-iphonesimulator/CityCircles.app/globalmaptiles.py | 28 | 16529 | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL2Tiles, Google Summer of Code 2007 & 2008
# Global Map Tiles Classes
# Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326,
# generate a simple HTML viewers based on Google Maps and OpenLayers
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
#
###############################################################################
# Copyright (c) 2008 Klokan Petr Pridal. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
"""
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
import math
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.2572235630016,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon ):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my ):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = self.Resolution( zoom )
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution( zoom )
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels( mx, my, zoom)
return self.PixelsToTile( px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters( tx*self.tileSize, ty*self.tileSize, zoom )
maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty+1)*self.tileSize, zoom )
return ( minx, miny, maxx, maxy )
def TileLatLonBounds(self, tx, ty, zoom ):
"Returns bounds of the given tile in latutude/longitude using WGS84 datum"
bounds = self.TileBounds( tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return ( minLat, minLon, maxLat, maxLon )
def Resolution(self, zoom ):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize ):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > self.Resolution(i):
return i-1 if i!=0 else 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom ):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
#---------------------
class GlobalGeodetic(object):
"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tileSize = 256):
self.tileSize = tileSize
def LatLonToPixels(self, lat, lon, zoom):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = 180 / 256.0 / 2**zoom
px = (180 + lat) / res
py = (90 + lon) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def Resolution(self, zoom ):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return 180 / 256.0 / 2**zoom
#return 180 / float( 1 << (8+zoom) )
def TileBounds(tx, ty, zoom):
"Returns bounds of the given tile"
res = 180 / 256.0 / 2**zoom
return (
tx*256*res - 180,
ty*256*res - 90,
(tx+1)*256*res - 180,
(ty+1)*256*res - 90
)
if __name__ == "__main__":
import sys, os
def Usage(s = ""):
print "Usage: globalmaptiles.py [-profile 'mercator'|'geodetic'] zoomlevel lat lon [latmax lonmax]"
print
if s:
print s
print
print "This utility prints for given WGS84 lat/lon coordinates (or bounding box) the list of tiles"
print "covering specified area. Tiles are in the given 'profile' (default is Google Maps 'mercator')"
print "and in the given pyramid 'zoomlevel'."
print "For each tile several information is printed including bonding box in EPSG:900913 and WGS84."
sys.exit(1)
profile = 'mercator'
zoomlevel = None
lat, lon, latmax, lonmax = None, None, None, None
boundingbox = False
argv = sys.argv
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-profile':
i = i + 1
profile = argv[i]
if zoomlevel is None:
zoomlevel = int(argv[i])
elif lat is None:
lat = float(argv[i])
elif lon is None:
lon = float(argv[i])
elif latmax is None:
latmax = float(argv[i])
elif lonmax is None:
lonmax = float(argv[i])
else:
Usage("ERROR: Too many parameters")
i = i + 1
if profile != 'mercator':
Usage("ERROR: Sorry, given profile is not implemented yet.")
if zoomlevel == None or lat == None or lon == None:
Usage("ERROR: Specify at least 'zoomlevel', 'lat' and 'lon'.")
if latmax is not None and lonmax is None:
Usage("ERROR: Both 'latmax' and 'lonmax' must be given.")
if latmax != None and lonmax != None:
if latmax < lat:
Usage("ERROR: 'latmax' must be bigger then 'lat'")
if lonmax < lon:
Usage("ERROR: 'lonmax' must be bigger then 'lon'")
boundingbox = (lon, lat, lonmax, latmax)
tz = zoomlevel
mercator = GlobalMercator()
mx, my = mercator.LatLonToMeters( lat, lon )
print "Spherical Mercator (ESPG:900913) coordinates for lat/lon: "
print (mx, my)
tminx, tminy = mercator.MetersToTile( mx, my, tz )
if boundingbox:
mx, my = mercator.LatLonToMeters( latmax, lonmax )
print "Spherical Mercator (ESPG:900913) cooridnate for maxlat/maxlon: "
print (mx, my)
tmaxx, tmaxy = mercator.MetersToTile( mx, my, tz )
else:
tmaxx, tmaxy = tminx, tminy
for ty in range(tminy, tmaxy+1):
for tx in range(tminx, tmaxx+1):
tilefilename = "%s/%s/%s" % (tz, tx, ty)
print tilefilename, "( TileMapService: z / x / y )"
gx, gy = mercator.GoogleTile(tx, ty, tz)
print "\tGoogle:", gx, gy
quadkey = mercator.QuadTree(tx, ty, tz)
print "\tQuadkey:", quadkey, '(',int(quadkey, 4),')'
bounds = mercator.TileBounds( tx, ty, tz)
print
print "\tEPSG:900913 Extent: ", bounds
wgsbounds = mercator.TileLatLonBounds( tx, ty, tz)
print "\tWGS84 Extent:", wgsbounds
print "\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif" % (
bounds[0], bounds[1], bounds[2], bounds[3], "<your-raster-file-in-epsg900913.ext>", tz, tx, ty)
print
| gpl-3.0 | 4,539,766,909,753,469,400 | 36.565909 | 103 | 0.651885 | false | 3.278913 | false | false | false |
Bitka0/serenade | homepage/navigation/templatetags/navigation_t.py | 1 | 2401 | # coding: utf-8
# Copyright (c) 2011 Lukas Martini, Phillip Thelen.
# This file may be used and distributed under the terms found in the
# file COPYING, which you should have received along with this
# program. If you haven't, please refer to bofh@junge-piraten.de.
from django.template import Library, Node
from homepage.navigation.models import Entry
register = Library()
class SimpleMenuNode(Node):
def __init__(self, menu=None):
if menu != None:
self.menu = menu
else:
self.menu = 1
def addmenu(self, parentid = None):
entrylist = Entry.objects.all().filter(menu__menuname = self.menu, parent__id = parentid)
self.menuhtml += '<ul>'
for entry in entrylist:
self.menuhtml += '<li><a href="{0}">{1}</a></li>'.format(entry.target, entry.name)
if entry.children.count() != 0:
self.addmenu(entry.id)
self.menuhtml += '</ul>'
def render(self, context):
self.menuhtml = ''
self.addmenu()
return self.menuhtml
class SimpleMenuOneNode(Node):
def __init__(self, menu=None, parent=None):
if menu != None:
self.menu = menu
else:
self.menu = 1
if parent != None:
self.parent = parent
else:
self.parent = None
def render(self, context):
entrylist = Entry.objects.all().filter(menu__menuname = self.menu, parent__id = self.parent)
menuhtml = '<ul>'
for entry in entrylist:
menuhtml += '<li><a href="{0}">{1}</a></li>'.format(entry.target, entry.name)
menuhtml += '</ul>'
return menuhtml
class CheckmenuNode(Node):
def render(self, context):
return ''
def simpleMenu(parser, token):
try:
tag_name, menu = token.split_contents()
except:
menu = None
return SimpleMenuNode(menu)
def simpleMenuOne(parser, token):
parent = None
menu = None
try:
content = token.split_contents()
except:
menu = None
if len(content) > 1:
if len(content) > 2:
menu = content[1]
parent = content[2]
else:
menu = content[1]
return SimpleMenuOneNode(menu, parent)
def checkmenu(parser, token):
try:
tag_name, menuname = token.split_contents()
entrylist = Entry.objects.all().filter(menu__menuname = menuname)
except:
parser.skip_past('endcheckmenu')
return CheckmenuNode()
def endcheckmenu(parser, token):
return CheckmenuNode()
simpleMenu = register.tag(simpleMenu)
simpleMenuOne = register.tag(simpleMenuOne)
checkmenu = register.tag(checkmenu)
endcheckmenu = register.tag(endcheckmenu)
| mit | -9,106,934,015,875,573,000 | 24.273684 | 94 | 0.685964 | false | 3.008772 | false | false | false |
astrorigin/oroboros | oroboros/core/bicharts.py | 1 | 11219 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Charts with two subcharts.
"""
from decimal import Decimal
import swisseph as swe
from oroboros.core.charts import Chart
from oroboros.core.planets import all_planets
from oroboros.core.aspects import all_aspects
from oroboros.core.results import PlanetDataList
from oroboros.core.aspectsresults import AspectDataList, MidPointAspectDataList, InterMidPointAspectDataList
__all__ = ['BiChart']
class BiChart(list):
"""Chart object with comparisons functions for two subcharts."""
__slots__ = ('_interaspects', '_intermidp1', '_intermidp2',
'_intermidpoints', '_switched')
def _get_interaspects(self):
"""Get inter-aspects.
:rtype: AspectDataList
"""
if self._interaspects == None:
self._calc_interaspects()
return self._interaspects
def _get_intermidp1(self):
"""Get aspects between chart 1 midpoints and chart 2 planets.
:rtype: MidPointAspectDataList
"""
if self._intermidp1 == None:
self._calc_intermidp(0)
return self._intermidp1
def _get_intermidp2(self):
"""Get aspects between chart 2 midpoints and chart 1 planets.
:rtype: MidPointAspectDataList
"""
if self._intermidp2 == None:
self._calc_intermidp(1)
return self._intermidp2
def _get_intermidpoints(self):
"""Get aspects between midpoints.
:rtype: InterMidPointAspectDataList
"""
if self._intermidpoints == None:
self._calc_intermidpoints()
return self._intermidpoints
def _get_switched(self):
"""Get switch state flag.
:rtype: bool
"""
return self._switched
def _set_switched(self, boolean):
"""Set switched state flag.
:type boolean: bool
"""
self._switched = bool(boolean)
interaspects = property(_get_interaspects,
doc='Inter-aspects.')
intermidp1 = property(_get_intermidp1,
doc='Aspects to chart 1 midpoints.')
intermidp2 = property(_get_intermidp2,
doc='Aspects to chart 2 midpoints.')
intermidpoints = property(_get_intermidpoints,
doc='Aspects between midpoints.')
switched = property(_get_switched, _set_switched,
doc='Bichart switched state (bool).')
def __init__(self, cht1=None, cht2=None):
"""Init bi-chart.
:type cht1: Chart, str, int or None
:type cht2: Chart, str, int or None
"""
self._switched = False
if cht1 != None:
self.append(cht1)
if cht2 != None:
self.append(cht2)
self.calc()
def append(self, cht):
"""Append a chart.
:type cht: Chart, str or int
:raise TypeError: invalid chart
"""
if not isinstance(cht, Chart):
try:
cht = Chart(cht)
except:
raise
raise TypeError('Invalic chart %s.' % cht)
list.append(self, cht)
self.calc()
def insert(self, idx, cht):
"""Insert a chart.
:type idx: int
:type cht: Chart, str or int
:raise IndexError: invalid index
:raise TypeError: invalid chart
"""
if idx > 1 or idx < -2:
raise IndexError('Invalid index %s.' % idx)
if not isinstance(cht, Chart):
try:
cht = Chart(cht)
except:
raise TypeError('Invalic chart %s.' % cht)
list.insert(self, idx, cht)
self.calc()
def __setitem__(self, idx, cht):
if idx > 1 or idx < -2:
raise IndexError('Invalid index %s.' % idx)
if not isinstance(cht, Chart):
try:
cht = Chart(cht)
except:
raise TypeError('Invalic chart %s.' % cht)
list.__setitem__(self, idx, cht)
self.calc()
def __delitem__(self, idx):
self._switched = False
list.__delitem__(self, idx)
def set(self, idx, **kwargs):
"""Set charts properties."""
self[idx].set(**kwargs)
if any((x for x in kwargs if x in ('datetime', 'calendar', 'location',
'latitude', 'longitude', 'altitude', 'zoneinfo', 'timezone', 'dst',
'utcoffset', 'filter'))):
self.reset_calc()
def reset_calc(self):
"""Trigger recalculation of aspects."""
self._interaspects = None
self._intermidp1 = None
self._intermidp2 = None
self._intermidpoints = None
# calculations
def _calc_interaspects(self):
"""Calculate inter-aspects of planets between charts 1 and 2."""
res = AspectDataList()
if len(self) != 2:
self._interaspects = res
return
f1 = self[0]._filter
f2 = self[1]._filter
all_asp = all_aspects()
for pos1 in self[0]._planets:
p1, lon1, lonsp1 = pos1._planet, pos1._longitude, pos1._lonspeed
for pos2 in self[1]._planets:
p2, lon2, lonsp2 = pos2._planet, pos2._longitude, pos2._lonspeed
for asp, doasp in f1._aspects.items():
if not doasp:
continue
if not f2._aspects[asp]:
continue
if not f1._asprestr[p1._name] or not f2._asprestr[p1._name]:
continue
if not f2._asprestr[p2._name] or not f2._asprestr[p2._name]:
continue
asp = all_asp[asp]
orb = (f1._orbs[asp._name]+f2._orbs[asp._name])/Decimal('2')
orbmod1 = f1.orbrestr[p1._name].get_absolute(orb)
orbmod2 = f2.orbrestr[p2._name].get_absolute(orb)
orb += (orbmod1 + orbmod2) / Decimal('2')
if orb < 0:
continue
diff, apply, factor = swe._match_aspect2(
lon1, lonsp1, lon2, lonsp2,
float(asp._angle), float(orb))
if diff != None:
res.feed(pos1, pos2, asp, diff, apply, factor)
self._interaspects = res
def _calc_intermidp(self, idx):
"""Calculate aspects between one midpoints and other planets."""
res = MidPointAspectDataList()
try:
if len(self) != 2 or not self[idx]._filter._calc_midp:
if idx == 0:
self._intermidp1 = res
else:
self._intermidp2 = res
return
except IndexError:
if idx == 0:
self._intermidp1 = res
else:
self._intermidp2 = res
return
# ok do calc
oth = 1 if idx in (0, -2) else 0 # other's idx
midpres = self[idx]._midpoints
jd = self[oth].julday
flag = self[oth]._filter.get_calcflag()
self[oth]._setup_swisseph()
f = self[idx]._filter._midpoints
all_pl = all_planets()
all_asp = all_aspects()
# get all concerned planets, if not already calculated
plres = PlanetDataList()
for pl in [x for x in f._planets if f._planets[x] and f._asprestr[x]]:
try:
plres.append(self[oth]._planets.get_data(pl))
except KeyError:
p = all_pl[pl]
plres.feed(p, p.calc_ut(jd, flag, self[oth]))
# get midp aspects
plres.sort_by_ranking()
for i, midp in enumerate(midpres):
##p1, p2 = midp._planet, midp._planet2
lon1, lonsp1 = midp._longitude, midp._lonspeed
for pos in plres:
pl, lon2, lonsp2 = pos._planet, pos._longitude, pos._lonspeed
for asp, doasp in f._aspects.items():
if not doasp: # dont use this aspect
continue
asp = all_asp[asp]
# modify orb
orb = f._orbs[asp._name]
#orbmod1 = plorbfilt[p1._name].get_absolute(orb)
orbmod1 = 0 # todo?: midp obrestr
orbmod2 = f._orbrestr[pl._name].get_absolute(orb)
orb += (orbmod1 + orbmod2) / Decimal('2')
if orb < 0: # we'll never get such a precision
continue
# check aspect match
diff, apply, factor = swe._match_aspect2(
lon1, lonsp1, lon2, lonsp2,
float(asp._angle), float(orb))
if diff != None:
res.feed(midp, pos, asp, diff, apply, factor)
if idx == 0:
self._intermidp1 = res
else:
self._intermidp2 = res
def _calc_intermidpoints(self):
"""Calculate aspects between midpoints."""
res = InterMidPointAspectDataList()
if len(self) != 2:
self._intermidpoints = res
return
elif not self[0]._filter._calc_midp or not self[1]._filter._calc_midp:
self._intermidpoints = res
return
f1 = self[0]._filter._midpoints
f2 = self[1]._filter._midpoints
all_asp = all_aspects()
# begin calc
for i, pos1 in enumerate(self[0]._midpoints):
p1, lon1, lonsp1 = pos1._data2, pos1._longitude, pos1._lonspeed
for pos2 in self[1]._midpoints:
p2, lon2, lonsp2 = pos2._data2, pos2._longitude, pos2._lonspeed
for asp, doasp in f1._aspects.items():
if not doasp: # dont use this aspect
continue
if not f2._aspects[asp]:
continue
# no asp restr
asp = all_asp[asp]
# modify orb
orb1 = f1._orbs[asp._name]
orb2 = f2._orbs[asp._name]
orb = orb1 + orb2 / Decimal('2')
# nor orb restr
# check aspect match
diff, apply, factor = swe._match_aspect2(
lon1, lonsp1, lon2, lonsp2,
float(asp._angle), float(orb))
if diff != None:
res.feed(pos1, pos2, asp, diff, apply, factor)
self._intermidpoints = res
def calc(self):
"""Do all calculations."""
self._calc_interaspects()
self._calc_intermidp(0)
self._calc_intermidp(1)
self._calc_intermidpoints()
def _all_draw_aspects(self):
"""Return a list of all drawable aspects (incl. activated midpoints).
:rtype: AspectDataList
"""
ret = AspectDataList()
ret.extend(self._interaspects)
try:
if self[0]._filter._draw_midp:
ret.extend(self._intermidp1)
except IndexError: # none chart
pass
try:
if self[1]._filter._draw_midp:
ret.extend(self._intermidp2)
except IndexError: # none chart
pass
# try:
# if self[0]._filter._draw_midp and self[1]._filter._draw_midp:
# ret.extend(self._intermidpoints)
# except IndexError: # none chart
# pass
return ret
def _all_draw_planets(self, idx=0):
"""Get all planets and midpoints to draw when comparing charts.
:type idx: int
:rtype: PlanetDataList
"""
ret = PlanetDataList()
if idx == 0:
ret.extend(self[0]._planets)
ret.extend(self._intermidp1.get_midpoints())
else:
ret.extend(self[1]._planets)
ret.extend(self._intermidp2.get_midpoints())
return ret
def switch(self):
"""Switch chart 1 and 2."""
self.reverse()
self._switched = not self._switched
self.calc()
def synastry_mode(self):
"""Set comparison mode transit/synastry."""
for i, cht in enumerate(self):
self[i].calc()
self.calc()
def progression_of(self, idx=0):
"""Set comparison mode progression.
:type idx: int
:raise IndexError: missing chart
"""
if len(self) != 2:
raise IndexError('Missing chart(s).')
if idx == 0:
cht1 = 0
cht2 = 1
elif idx == 1:
cht1 = 1
cht2 = 0
self[cht2].progression_of(self[cht1].julday)
self.calc()
def direction_of(self, idx=0):
"""Set comparison mode direction.
:type idx: int
:raise IndexError: missing chart
"""
if len(self) != 2:
raise IndexError('Missing chart(s)')
if idx == 0:
cht1 = 0
cht2 = 1
elif idx == 1:
cht1 = 1
cht2 = 0
self[cht2].direction_of(self[cht1].julday)
self.calc()
def multiply_pos(self, value, idx):
"""Multiply positions by value.
:type value: numeric
:type idx: int
"""
self[idx].multiply_pos(value)
self.calc()
def add_pos(self, value, idx):
"""Add value to positions.
:type value: numeric
:type idx: int
"""
self[idx].add_pos(value)
self.calc()
def profection_of(self, op, value, unit, idx=0):
"""Profection.
:type op: str
:type value: numeric
:type unit: str
:type idx: int
:raise IndexError: missing chart
"""
if len(self) != 2:
raise IndexError('Missing chart(s)')
if idx == 0:
cht1 = 0
cht2 = 1
elif idx == 1:
cht1 = 1
cht2 = 0
self[cht2].profection_of(op, value, unit, self[cht1].julday)
self.calc()
def __repr__(self):
return "BiChart(%s)" % ', '.join([repr(x) for x in self])
# End.
| gpl-3.0 | 7,579,024,174,989,201,000 | 24.439909 | 108 | 0.639807 | false | 2.730348 | false | false | false |
joshalbrecht/memdam | memdam/eventstore/sqlite.py | 1 | 23420 |
import uuid
import datetime
import re
import os
import sqlite3
import time
import itertools
import pytz
import lockfile
import memdam
import memdam.common.field
import memdam.common.event
import memdam.eventstore.api
@memdam.vtrace()
def execute_sql(cur, sql, args=()):
'''Just for debugging'''
return cur.execute(sql, args)
@memdam.vtrace()
def execute_many(cur, sql, values=()):
'''Just for debugging'''
cur.executemany(sql, values)
#TODO: validate the various bits of data--should not start or end with _, should not contain __, should only contain numbers and digits
#also have to validate all of the things that we are inserting in a raw way
class Eventstore(memdam.eventstore.api.Eventstore):
"""
An archive for all events that uses Sqlite as the backing store.
Stores all tables in their own file for the following reasons:
- Lower contention (read and write) when working with multiple data types at once
- Smaller files (easier to back up, encrypt, decrypt, etc)
- Safety. Reduces chances of corrupting all data.
Note: pass in a folder called :memory: to keep everything in memory for testing
When inserting new events, automatically creates new columns if necessary.
All columns are given appropriate indices (usually ASC, except in the case of TEXT, which is
given an FTS virtual table, and the column in the main table because an INTEGER that refers
to the document id in the FTS table)
Columns are created with exactly the same name as the variables.
Variable names uniquely define the type of the column, as well as the type of any index.
TEXT attributes will createa column that contains docid integer references in the main table,
AS WELL AS a second (virtual, fts4) table (name__text__docs)
Indices are named "name__type__secondary__indextype"
"""
EXTENSION = '.sql'
LOCK_EXTENSION = '.lock'
CREATE_TABLE_EXTENSION = '.creating_sql'
def __init__(self, folder):
self.folder = folder
self.memory_connection = None
def save(self, events):
memdam.log().debug("Saving events")
sorted_events = sorted(events, key=lambda x: x.namespace)
for namespace, grouped_events in itertools.groupby(sorted_events, lambda x: x.namespace):
table_name = namespace_to_table_name(namespace)
self._save_events(list(grouped_events), table_name)
def get(self, event_id):
for table_name in self._all_table_names():
conn = self._connect(table_name, read_only=True)
namespace = table_name_to_namespace(table_name)
cur = conn.cursor()
sql = "SELECT * FROM %s WHERE id__id = ?;" % (table_name)
execute_sql(cur, sql, (buffer(event_id.bytes),))
names = [x[0] for x in cur.description]
for row in cur.fetchall():
return _create_event_from_row(row, names, namespace, conn)
raise Exception("event with id %s not found" % (event_id))
def find(self, query):
events = []
for table_name in self._all_table_names():
if _matches_namespace_filters(table_name, query):
events += self._find_matching_events_in_table(table_name, query)
return events
def delete(self, event_id):
for table_name in self._all_table_names():
conn = self._connect(table_name, read_only=False)
cur = conn.cursor()
cur.execute("BEGIN EXCLUSIVE")
sql = "SELECT _id FROM %s WHERE id__id = ?;" % (table_name)
execute_sql(cur, sql, (buffer(event_id.bytes),))
for row in cur.fetchall():
rowid = row[0]
names = [x[0] for x in cur.description]
for i in range(0, len(names)):
name = names[i]
if name == '_id':
continue
if memdam.common.event.Event.field_type(name) == memdam.common.field.FieldType.TEXT:
execute_sql(cur, "DELETE FROM %s__%s__docs WHERE docid = ?;" % (table_name, name), (rowid))
execute_sql(cur, "DELETE FROM %s WHERE _id = %s" % (table_name, rowid), ())
conn.commit()
def _find_matching_events_in_table(self, table_name, query):
conn = self._connect(table_name, read_only=True)
namespace = table_name_to_namespace(table_name)
cur = conn.cursor()
args = ()
sql = "SELECT * FROM %s" % (table_name)
field_filters, _ = _separate_filters(query.filters)
if field_filters:
filter_string, new_args = _get_field_filter_string(field_filters)
args = args + new_args
sql += " WHERE " + filter_string
if query.order:
order_string = self._get_order_string(query.order)
sql += " ORDER BY " + order_string
if query.limit:
sql += " LIMIT " + str(long(query.limit))
sql += ';'
execute_sql(cur, sql, args)
events = []
names = list(map(lambda x: x[0], cur.description))
for row in cur.fetchall():
events.append(_create_event_from_row(row, names, namespace, conn))
return events
def _get_order_string(self, order):
sql_order_elems = []
for elem in order:
order_type = 'ASC'
if elem[1] == False:
order_type = 'DESC'
safe_column_name = elem[0].lower()
assert SqliteColumn.SQL_NAME_REGEX.match(safe_column_name), "Invalid name for column: %s" % (safe_column_name)
assert memdam.common.event.Event.field_type(safe_column_name) != memdam.common.field.FieldType.TEXT, "text keys are currently unsupported for ordering. Doesn't make a lot of sense."
sql_order_elems.append("%s %s" % (safe_column_name, order_type))
return ", ".join(sql_order_elems)
def _all_table_names(self):
"""
:returns: the names of all tables
:rtype: list(unicode)
"""
if self.folder == ":memory:":
#list all tables that are not "__docs"
conn = self._get_or_create_memory_connection()
cur = conn.cursor()
execute_sql(cur, "SELECT * FROM sqlite_master WHERE type='table';")
tables = []
for row in cur.fetchall():
table_name = row[1]
if not "__docs" in table_name:
tables.append(table_name)
else:
tables = [r[:-1*len(Eventstore.EXTENSION)] for r in list(os.listdir(self.folder)) if r.endswith(Eventstore.EXTENSION)]
return [unicode(r) for r in tables]
def _get_or_create_memory_connection(self):
assert self.folder == ":memory:"
#TODO: when all tests are passing again, do we need memory_connection at all? I don't think so...
if self.memory_connection == None:
self.memory_connection = sqlite3.connect(self.folder, isolation_level="EXCLUSIVE")
return self.memory_connection
def _connect(self, table_name, read_only=True):
"""
Connect to the database with this namespace in it.
"""
if self.folder == ":memory:":
return self._get_or_create_memory_connection()
db_file = os.path.join(self.folder, table_name + Eventstore.EXTENSION)
if read_only:
conn = sqlite3.connect(db_file, isolation_level="DEFERRED")
#TODO: set PRAGMA read_uncommitted = TRUE;
#otherwise can't read while writing
return conn
else:
return sqlite3.connect(db_file, isolation_level="EXCLUSIVE")
def _save_events(self, events, table_name):
"""
Save all events of the same type to the database at once
"""
memdam.log().debug("Saving %s events to %s" % (len(events), table_name))
if len(events) <= 0:
return
assert SqliteColumn.SQL_NAME_REGEX.match(table_name), "Invalid name for table: %s" % (table_name)
key_names = set()
for event in events:
for key in event.keys:
key_names.add(key)
#certain key names are ignored because they are stored implicity in the location of
#this database (user, namespace)
for reserved_name in ("type__namespace", "user__id"):
if reserved_name in key_names:
key_names.remove(reserved_name)
should_update_columns = True
if self.folder != ":memory:":
#does table not exist?
db_file = os.path.join(self.folder, table_name + Eventstore.EXTENSION)
if not os.path.exists(db_file):
#try to acquire lock
lock_file = os.path.join(self.folder, table_name + Eventstore.LOCK_EXTENSION)
lock = lockfile.LockFile(lock_file)
with lock:
#two possible scenarios:
#1. we got the lock AFTER someone else, who already made the table:
if os.path.exists(db_file):
#TODO: move this somewhere more sensible
try:
os.remove(lock)
except:
pass
#2. we got the lock BEFORE anyone else, so we're responsible for making the table:
else:
should_update_columns = False
#make the table and create the columns
temp_db_file = os.path.join(self.folder, table_name + Eventstore.CREATE_TABLE_EXTENSION)
self._create_database(table_name, key_names, temp_db_file)
#move the file back to it's regular location
os.rename(temp_db_file, db_file)
#TODO: move this somewhere more sensible
try:
os.remove(lock)
except:
pass
conn = self._connect(table_name, read_only=False)
if should_update_columns:
def update_columns():
cur = conn.cursor()
existing_columns = self._query_existing_columns(cur, table_name)
required_columns = self._generate_columns(cur, key_names, table_name)
self._update_columns(cur, existing_columns, required_columns)
#TODO: use the locking approach for updating as well as creating?
execute_with_retries(update_columns, 5)
cur = conn.cursor()
cur.execute("BEGIN EXCLUSIVE")
self._insert_events(cur, events, key_names, table_name)
conn.commit()
def _create_database(self, table_name, key_names, db_file):
assert self.folder != ":memory:", 'because we don\'t have to do this with memory'
conn = sqlite3.connect(db_file, isolation_level="EXCLUSIVE")
cur = conn.cursor()
#TODO: this should NOT have the side-effect of creating the table, that is just weird
existing_columns = self._query_existing_columns(cur, table_name)
required_columns = self._generate_columns(cur, key_names, table_name)
self._update_columns(cur, existing_columns, required_columns)
def _query_existing_columns(self, cur, table_name):
"""
:param cur: the current writable database cursor
:type cur: sqlite3.Cursor
:returns: a list of SqliteColumn's
"""
columns = {}
execute_sql(cur, "PRAGMA table_info(%s);" % (table_name,))
allrows = cur.fetchall()
if len(allrows) == 0:
self._create_table(cur, table_name)
execute_sql(cur, "PRAGMA table_info(%s);" % (table_name,))
allrows = cur.fetchall()
for row in allrows:
#ignore our unique id row
if row[1] == '_id':
continue
col = SqliteColumn.from_row(row, table_name)
columns[col.name] = col
return columns
def _create_table(self, cur, table_name):
"""
Create a table with the default column (sample_time)
"""
execute_sql(cur, "PRAGMA encoding = 'UTF-8';")
execute_sql(cur, "CREATE TABLE %s(_id INTEGER PRIMARY KEY, time__time INTEGER, id__id STRING);" % (table_name,))
execute_sql(cur, "CREATE INDEX %s__time__time__asc ON %s (time__time ASC);" % (table_name, table_name))
execute_sql(cur, "CREATE INDEX %s__id__id__asc ON %s (id__id ASC);" % (table_name, table_name))
def _generate_columns(self, cur, key_names, table_name):
"""
Make a bunch of SqliteColumn's based on the key names of all of the events
:param cur: the current writable database cursor
:type cur: sqlite3.Cursor
:param key_names: the superset of all key field names
:type key_names: set(string)
:returns: a list of SqliteColumn's
"""
return [SqliteColumn(key, table_name) for key in key_names]
def _update_columns(self, cur, existing_column_map, required_columns):
"""
Modify the schema of the table to include new columns or indices if necessary
"""
for required_column in required_columns:
if required_column.name in existing_column_map:
existing_column = existing_column_map[required_column.name]
assert required_column.sql_type == existing_column.sql_type
else:
required_column.create(cur)
def _insert_events(self, cur, events, key_names, table_name):
"""
Insert all events at once.
Assumes that the schema is correct.
"""
#required because of stupid text fields.
#we need to explicitly set the ids of everything inserted, or iteratively insert and check for lastrowid (which is slow and pathological and will end up doing this effectively anyway I think)
#figure out what the next id to insert should be
cur.execute("SELECT _id FROM %s ORDER BY _id DESC LIMIT 1" % (table_name))
next_row_id = 1
results = cur.fetchall()
if len(results) > 0:
next_row_id = results[0][0] + 1
#need to insert text documents into separate docs tables
for key in key_names:
if memdam.common.event.Event.field_type(key) == memdam.common.field.FieldType.TEXT:
sql = "INSERT INTO %s__%s__docs (docid,data) VALUES (?,?);" % (table_name, key)
values = [(next_row_id + i, getattr(events[i], key, None)) for i in range(0, len(events))]
execute_many(cur, sql, values)
#finally, insert the actual events into the main table
column_names = list(key_names)
column_name_string = ", ".join(column_names)
value_tuple_string = "(" + ", ".join(['?'] * (len(column_names)+1)) + ")"
sql = "INSERT INTO %s (_id, %s) VALUES %s;" % (table_name, column_name_string, value_tuple_string)
values = [make_value_tuple(events[i], key_names, next_row_id + i) for i in range(0, len(events))]
execute_many(cur, sql, values)
#TODO: this whole notion of filters needs to be better thought out
@memdam.vtrace()
def _separate_filters(filters):
field_filters = []
namespaces = []
for f in filters:
if f.rhs == 'namespace__namespace':
assert f.operator == '='
namespaces.append(f.lhs)
elif f.lhs == 'namespace__namespace':
assert f.operator == '='
namespaces.append(f.rhs)
else:
field_filters.append(f)
return field_filters, namespaces
@memdam.vtrace()
def _matches_namespace_filters(table_name, query):
_, namespaces = _separate_filters(query.filters)
if len(namespaces) <= 0:
return True
return table_name_to_namespace(table_name) in namespaces
@memdam.vtrace()
def _get_field_filter_string(field_filters):
#TODO (security): lol so bad.
filter_string = ' AND '.join(('%s %s %s' % (f.lhs, f.operator, f.rhs) for f in field_filters))
return filter_string, ()
@memdam.vtrace()
def make_value_tuple(event, key_names, event_id):
"""Turns an event into a sql value tuple"""
values = [event_id]
for key in key_names:
value = getattr(event, key, None)
if value != None:
#convert time to long for more efficient storage (and so it can be used as a primary key)
if isinstance(value, datetime.datetime):
value = convert_time_to_long(value)
#convert text tuple entries into references to the actual text data
elif memdam.common.event.Event.field_type(key) == memdam.common.field.FieldType.TEXT:
value = event_id
#convert UUIDs to byte representation
elif memdam.common.event.Event.field_type(key) == memdam.common.field.FieldType.ID:
value = buffer(value.bytes)
elif memdam.common.event.Event.field_type(key) == memdam.common.field.FieldType.FILE:
value = value.name
values.append(value)
return values
@memdam.vtrace()
def convert_time_to_long(value):
"""turns a datetime.datetime into a long"""
return long(round(1000000.0 * (value - EPOCH_BEGIN).total_seconds()))
@memdam.vtrace()
def convert_long_to_time(value):
"""turns a long into a datetime.datetime"""
return EPOCH_BEGIN + datetime.timedelta(microseconds=value)
@memdam.vtrace()
def table_name_to_namespace(table_name):
return table_name.replace(u'_', u'.')
@memdam.vtrace()
def namespace_to_table_name(namespace):
return namespace.replace(u'.', u'_')
@memdam.vtrace()
def _create_event_from_row(row, names, namespace, conn):
"""returns a memdam.common.event.Event, generated from the row"""
data = {}
table_name = namespace_to_table_name(namespace)
for i in range(0, len(names)):
name = names[i]
if name == '_id':
continue
value = row[i]
if value != None:
field_type = memdam.common.event.Event.field_type(name)
if field_type == memdam.common.field.FieldType.TIME:
value = convert_long_to_time(value)
elif field_type == memdam.common.field.FieldType.TEXT:
cur = conn.cursor()
execute_sql(cur, "SELECT data FROM %s__%s__docs WHERE docid = '%s';" % (table_name, name, value))
value = cur.fetchall()[0][0]
elif field_type == memdam.common.field.FieldType.ID:
value = uuid.UUID(bytes=value)
elif field_type == memdam.common.field.FieldType.BOOL:
value = value == 1
elif field_type == memdam.common.field.FieldType.FILE:
parsed_data = value.split('.')
value = memdam.common.blob.BlobReference(uuid.UUID(parsed_data[0]), parsed_data[1])
data[name] = value
data['type__namespace'] = namespace
return memdam.common.event.Event(**data)
EPOCH_BEGIN = datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)
class SqliteColumn(memdam.Base):
"""
Represents a column in sqlite.
Note that the name here is the raw key name (eg, without the data type or index)
:attr name: the name of the column. No type, no index, none of that nonsense.
:type name: string
:attr data_type: the type of data
:type data_type: memdam.common.field.FieldType
:attr table_name: the name of the table. The namespace for the events
:type table_name: string
"""
SQL_NAME_REGEX = re.compile(r"[a-z][a-z0-9_]*")
data_type_to_sql_type = {
memdam.common.field.FieldType.NUMBER: 'FLOAT',
memdam.common.field.FieldType.STRING: 'TEXT',
#this might seems strange, but it's because we store an index to a document in another table
memdam.common.field.FieldType.TEXT: 'INTEGER',
memdam.common.field.FieldType.ENUM: 'TEXT',
memdam.common.field.FieldType.RAW: 'BLOB',
memdam.common.field.FieldType.BOOL: 'BOOL',
memdam.common.field.FieldType.TIME: 'INTEGER',
memdam.common.field.FieldType.ID: 'TEXT',
memdam.common.field.FieldType.LONG: 'INTEGER',
memdam.common.field.FieldType.FILE: 'TEXT',
memdam.common.field.FieldType.NAMESPACE: 'TEXT',
}
def __init__(self, column_name, table_name):
self.column_name = column_name
name = memdam.common.event.Event.raw_name(column_name)
assert SqliteColumn.SQL_NAME_REGEX.match(name), "Invalid name for column: %s" % (name)
self.name = name
self.data_type = memdam.common.event.Event.field_type(column_name)
assert SqliteColumn.SQL_NAME_REGEX.match(name), "Invalid name for table: %s" % (table_name)
self.table_name = table_name
@property
def is_text(self):
"""
:returns: True iff this is a text "column", which must be handled specially
"""
return self.data_type == memdam.common.field.FieldType.TEXT
def create(self, cur):
"""
Create the column and index.
Only call if the column and index don't already exist.
"""
if self.is_text:
execute_sql(cur, "CREATE VIRTUAL TABLE %s__%s__docs USING fts4(data,tokenize=porter);" % (self.table_name, self.column_name))
execute_sql(cur, "ALTER TABLE %s ADD COLUMN %s %s;" % (self.table_name, self.column_name, self.sql_type))
if self.sql_index != None:
index_name = self.table_name + "__" + self.column_name + "__" + self.sql_index
execute_sql(cur, "CREATE INDEX %s ON %s (%s %s);" % (index_name, self.table_name, self.column_name, self.sql_index))
def __repr__(self):
data_type_name = memdam.common.field.FieldType.names[self.data_type]
return "SqliteColumn(%s/%s/%s)" % (self.table_name, self.name, data_type_name)
def __str__(self):
return self.__repr__()
@property
def sql_type(self):
"""
:returns: the sqlite type corresponding to our data_type
:rtype: string
"""
return self.data_type_to_sql_type[self.data_type]
@property
def sql_index(self):
"""
Note: everything returns ASC because the only alternative is FTS, which is handled specially
and ends up making an ASC index on the column anyway.
:returns: the sqlite type corresponding to our index type
:rtype: string
"""
if self.data_type == memdam.common.field.FieldType.RAW:
return None
return 'ASC'
@staticmethod
def from_row(row, table_name):
"""
Alternative constructor from a sqlite row.
"""
column_name = row[1]
return SqliteColumn(column_name, table_name)
@memdam.vtrace()
def execute_with_retries(command, num_retries=3, retry_wait_time=0.1, retry_growth_rate=2.0):
"""
Try to accomplish the command a few times before giving up.
"""
retry = 0
last_exception = None
while retry < num_retries:
try:
return command()
except Exception, e:
last_exception = e
time.sleep(retry_wait_time)
retry_wait_time *= retry_growth_rate
else:
break
retry += 1
raise last_exception
| gpl-2.0 | -464,807,978,104,279,200 | 41.659381 | 199 | 0.598719 | false | 3.805037 | false | false | false |
Fmakdemir/f-qr-fixer | fqrfixer.py | 1 | 14438 | #!/usr/bin/env python3
#from __future__ import division
import PIL
import sys
import os
import argparse
import numpy as np
def print_fqr_format():
print('''fqr file format:
*...xxx***x*x**x
xx****xxxx*..***
'x' or 'X' => black
'.' => white
'*' => unknown
It should be an NxN matrix with only 'x', '.' and '*' characters
Spaces around lines will be erased and empty lines will be ignored
Size must be NxN where N is (4*qr_version+17) meaning 21, 25, 29..., 177
1<=qr_version<=40
''')
class MalformedFQRException(Exception):
def __init__(self, msg):
super(MalformedFQRException, self).__init__(msg)
# calculate mask val at pos i, j with mask k
def get_mask(k):
if k == 0:
return lambda i, j: (i + j) % 2 == 0
if k == 1:
return lambda i, j: i % 2 == 0
if k == 2:
return lambda i, j: j % 3 == 0
if k == 3:
return lambda i, j: (i + j) % 3 == 0
if k == 4:
return lambda i, j: (i // 2 + j // 3) % 2 == 0
if k == 5:
return lambda i, j: (i * j) % 2 + (i * j) % 3 == 0
if k == 6:
return lambda i, j: ((i * j) % 2 + (i * j) % 3) % 2 == 0
if k == 7:
return lambda i, j: ((i * j) % 3 + (i + j) % 2) % 2 == 0
def bin_ar_to_int(bin_ar):
bs = ''.join(bin_ar).replace('x', '1').replace('.', '0')
return int(bs, 2)
class FQR(object):
FINDER_POS = ['LT', 'RT', 'LB', 'LT']
FINDER_POS_PATTERN = np.array([ list(x) for x in [
'xxxxxxx',
'x.....x',
'x.xxx.x',
'x.xxx.x',
'x.xxx.x',
'x.....x',
'xxxxxxx'
]
])
ALIGN_PATTERN = np.array([ list(x) for x in [
'xxxxx',
'x...x',
'x.x.x',
'x...x',
'xxxxx'
]
])
# version, location list
ALIGN_PATTERN_LOC = [
(2, [6, 18]),
(3, [6, 22]),
(4, [6, 26]),
(5, [6, 30]),
(6, [6, 34]),
(7, [6, 22, 38]),
(8, [6, 24, 42]),
(9, [6, 26, 46]),
(10, [6, 28, 50]),
(11, [6, 30, 54]),
(12, [6, 32, 58]),
(13, [6, 34, 62]),
(14, [6, 26, 46, 66]),
(15, [6, 26, 48, 70]),
(16, [6, 26, 50, 74]),
(17, [6, 30, 54, 78]),
(18, [6, 30, 56, 82]),
(19, [6, 30, 58, 86]),
(20, [6, 34, 62, 90]),
(21, [6, 28, 50, 72, 94]),
(22, [6, 26, 50, 74, 98]),
(23, [6, 30, 54, 78, 102]),
(24, [6, 28, 54, 80, 106]),
(25, [6, 32, 58, 84, 110]),
(26, [6, 30, 58, 86, 114]),
(27, [6, 34, 62, 90, 118]),
(28, [6, 26, 50, 74, 98, 122]),
(29, [6, 30, 54, 78, 102, 126]),
(30, [6, 26, 52, 78, 104, 130]),
(31, [6, 30, 56, 82, 108, 134]),
(32, [6, 34, 60, 86, 112, 138]),
(33, [6, 30, 58, 86, 114, 142]),
(34, [6, 34, 62, 90, 118, 146]),
(35, [6, 30, 54, 78, 102, 126]),
(36, [6, 24, 50, 76, 102, 128]),
(37, [6, 28, 54, 80, 106, 132]),
(38, [6, 32, 58, 84, 110, 136]),
(39, [6, 26, 54, 82, 110, 138]),
(40, [6, 30, 58, 86, 114, 142])
]
BLACK = ord('x')
WHITE = ord('.')
UNKNW = ord('*')
# Error Correction Level, mask, format string
FORMATS = [
('L', 0, 'xxx.xxxxx...x..'),
('L', 1, 'xxx..x.xxxx..xx'),
('L', 2, 'xxxxx.xx.x.x.x.'),
('L', 3, 'xxxx...x..xxx.x'),
('L', 4, 'xx..xx...x.xxxx'),
('L', 5, 'xx...xx...xx...'),
('L', 6, 'xx.xx...x.....x'),
('L', 7, 'xx.x..x.xxx.xx.'),
('M', 0, 'x.x.x.....x..x.'),
('M', 1, 'x.x...x..x..x.x'),
('M', 2, 'x.xxxx..xxxxx..'),
('M', 3, 'x.xx.xx.x..x.xx'),
('M', 4, 'x...x.xxxxxx..x'),
('M', 5, 'x......xx..xxx.'),
('M', 6, 'x..xxxxx..x.xxx'),
('M', 7, 'x..x.x.x.x.....'),
('Q', 0, '.xx.x.x.x.xxxxx'),
('Q', 1, '.xx.....xx.x...'),
('Q', 2, '.xxxxxx..xx...x'),
('Q', 3, '.xxx.x......xx.'),
('Q', 4, '.x..x..x.xx.x..'),
('Q', 5, '.x....xx.....xx'),
('Q', 6, '.x.xxx.xx.xx.x.'),
('Q', 7, '.x.x.xxxxx.xx.x'),
('H', 0, '..x.xx.x...x..x'),
('H', 1, '..x..xxx.xxxxx.'),
('H', 2, '..xxx..xxx..xxx'),
('H', 3, '..xx..xxx.x....'),
('H', 4, '....xxx.xx...x.'),
('H', 5, '.....x..x.x.x.x'),
('H', 6, '...xx.x....xx..'),
('H', 7, '...x.....xxx.xx')
]
# bit encryption modes
MODES = {
'0001':'numeric',
'0010':'alphanumeric',
'0100':'byte',
'1000':'kanji',
'0000':'terminator'
}
ALPHANUM = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:'
@staticmethod
def get_char_count_ind_len(mode, version):
mode = 4-mode.find('1') # fix this but too lazy now
# I first wrote as 1 2 3 4 then converted to 0001 strings upper line is a quick fix
if version < 10:
if mode == 1: return 10
if mode == 2: return 9
if mode == 3: return 8
if mode == 4: return 8
if version < 27:
if mode == 1: return 12
if mode == 2: return 11
if mode == 3: return 16
if mode == 4: return 10
if mode == 1: return 14
if mode == 2: return 13
if mode == 3: return 16
if mode == 4: return 12
def __init__(self, path=None):
self.dirty = True
self.N = -1
self.qr = []
# corner position
self.pos_finderp = [] # 0: LT, 1: RT, 2: LB, 3: LT as in FINDER_POS
# align position
self.pos_align = [] # 0,1,... depends on version
if path is not None:
self.load_qr(path)
def get_qr(self):
return self.qr
@staticmethod
def print_qr(qr):
print('\n'+'\n'.join([ ''.join(x) for x in qr])+'\n')
# '*' in mstr will ignored cstr can't have '*'
@staticmethod
def _qstr_match(cstr, mstr):
cstr = ''.join(cstr)
mstr = ''.join(mstr)
for a, b in zip(cstr, mstr):
if a != '*' and a != b:
return False
return True
@staticmethod
def size2version(N):
error = 'Size is invalid must be N = (4*version + 17) and NxN N='+str(N)
N -= 17
if N % 4 != 0:
raise MalformedFQRException(error)
N /= 4
if N < 0 or N > 40:
raise MalformedFQRException('Unknown version: ' + N)
return N
@staticmethod
def version2size(N):
return 4*N+17
# if path is set save image to path
@staticmethod
def save_qr_img(qr, path=None):
dqr = qr[:, :] # copy
dqr[dqr == 'x'] = '0' # turn str array to color array
dqr[dqr == '.'] = '1'
dqr[dqr == '*'] = '2'
dqr = dqr.astype(np.uint32)
dqr[dqr == 0] = 0
dqr[dqr == 1] = 255
dqr[dqr == 2] = 128
from PIL import Image
N = len(dqr)
nqr = np.zeros((N*8, N*8)) # x8 zoom image
for i in range(N*8):
for j in range(N*8):
nqr[i, j] = dqr[i//8, j//8]
if nqr[i, j] == 128:
nqr[i, j] = ((i+j)%2)*255
img = Image.fromarray(np.uint8(nqr))
if path is None:
img.show()
else:
img.save(path)
def load_qr(self, path):
self.dirty = True
with open(path, 'r') as f:
# read non empty lines, erase end of lines
self.qr = np.array([ list( x.strip('|\n').lower() ) for x in f.readlines() if len(x)>1])
self.N = len(self.qr)
self.version = FQR.size2version(self.N)
print("Version:", self.version, "\nSize: {0}x{0}".format(self.N), "\n")
error = ''
for line in self.qr:
print(''.join(line))
if len(line) != self.N:
error = 'Dimensions does not match: line_len, N: '+str(len(line))+', '+str(self.N)
elif any(ch not in 'x.*' for ch in line):
error = 'Not allowed character(s): ' + ', '.join([ch for ch in line if ch not in 'x.*'])
if error != '':
raise MalformedFQRException(error)
self.dirty = False
self.bc_qr = self.qr[:, :] # take a copy for reversing
print('FQR file loaded successfully:', path, '\n')
# TODO: make this accept a percentage of matches i.e there can be * in there
# TODO: add this timing finder as well so as to more accurate results
def find_positioning(self):
s_qr = self.qr[:7, :7]
if np.array_equal(FQR.FINDER_POS_PATTERN, s_qr):
print('Position found: LT')
self.pos_finderp.append(0)
s_qr = self.qr[:7, -7:]
if np.array_equal(FQR.FINDER_POS_PATTERN, s_qr):
print('Position found: RT')
self.pos_finderp.append(1)
s_qr = self.qr[-7:, :7]
if np.array_equal(FQR.FINDER_POS_PATTERN, s_qr):
print('Position found: LB')
self.pos_finderp.append(2)
s_qr = self.qr[-7:, -7:]
if np.array_equal(FQR.FINDER_POS_PATTERN, s_qr):
print('Position found: RB')
self.pos_finderp.append(3)
# get not found corners
miss_finder = [x for x in range(4) if x not in self.pos_finderp]
return miss_finder
# assumes alignment is found
# need to check other format positions currently only RT is checked
def find_format(self):
fstr = ''.join(self.qr[8, -8:])
res = []
for f in FQR.FORMATS:
print(f)
print(fstr)
print(f[2][-len(fstr):])
print()
if self._qstr_match(f[2][-len(fstr):], fstr):
res.append(f)
return res
def fix_rotation(self, align, qr=None):
if qr is None:
qr = self.qr
num_turns = [2, 1, 3, 0]
qr = np.rot90(qr, num_turns[align])
# assumes rotation is already fixed and fixes finder patterns
def fix_position_patterns(self, qr=None):
if qr is None:
qr = self.qr
#fix LT
qr[:7, :7] = FQR.FINDER_POS_PATTERN[:, :]
for i in range(8):
qr[7][i] = qr[i][7] = '.'
# fix RT
qr[:7, -7:] = FQR.FINDER_POS_PATTERN[:, :]
for i in range(8):
qr[7][-i-1] = qr[i][ -8] = '.'
# fix LB
qr[-7:, :7] = FQR.FINDER_POS_PATTERN[:, :]
for i in range(8):
qr[-i-1][7] = qr[-8][i] = '.'
# RB is always empty
def fix_finder_patterns(self, qr=None):
if qr is None:
qr = self.qr
pass
def fix_timing_patterns(self, qr=None):
if qr is None:
qr = self.qr
for i in range(7, len(qr)-7):
p = ('x' if i%2 == 0 else '.')
qr[i][6] = qr[6][i] = p
def fix_format(self, f, qr=None):
if qr is None:
qr = self.qr
fs = np.array(list(f))
print('Fixing format with:', fs)
qr[8, :6] = fs[:6]
qr[8, 7:9] = fs[6:8]
qr[7, 8] = fs[8]
qr[8, -8:] = fs[-8:]
qr[:6, 8] = np.transpose(fs[-6:])[::-1]
qr[-7:, 8] = np.transpose(fs[:7])[::-1]
def fix_alignment_patterns(self, qr=None):
if qr is None:
qr = self.qr
if len(qr) <= 21: # these dont have align patterns
return
locs = None
for l in FQR.ALIGN_PATTERN_LOC:
if self.version == l[0]:
locs = l[1]
break
loc1 = locs[0] # first loc
locN = locs[len(locs)-1] # last loc
for i in locs:
for j in locs:
if i == loc1 and (j == loc1 or j == locN):
continue
elif i == locN and j == loc1:
continue
qr[i-2:i+3, j-2:j+3] = FQR.ALIGN_PATTERN[:, :]
def fix_dark_module(self, qr=None):
if qr is None:
qr = self.qr
qr[4*self.version+9][8] = 'x'
@staticmethod
def get_next_bit(qr):
N = len(qr)
j = N-1
while j > 0:
if j == 6: # skip vertical timing patt.
j -= 1
for i in range(N-1, -1, -1):
yield i, j
yield i, j-1
j -= 2
for i in range(0, N, 1):
yield i, j
yield i, j-1
j -= 2
def try_read(self):
# generate protected area of qr code by mimicing fixes
pr_qr = np.zeros(self.qr.shape, dtype=str)
self.fix_dark_module(pr_qr)
self.fix_dark_module(pr_qr)
self.fix_position_patterns(pr_qr)
self.fix_alignment_patterns(pr_qr)
self.fix_finder_patterns(pr_qr)
self.fix_timing_patterns(pr_qr)
self.fix_format('...............', pr_qr)
# convert string to truth values
is_data = (pr_qr == '')
mask = get_mask(self.format[1])
d = ''
for i, j in FQR.get_next_bit(self.qr):
if not is_data[i][j]:
continue
c = self.qr[i][j]
m = mask(i, j)
if not m:
d += c
elif c == 'x':
d += '.'
else:
d += 'x'
### TODO find a better solution for here sinde data segments are constant
ds = d[:26*8].replace('x', '1').replace('.', '0')
# re arrange d1-d13 and d14-d26
d = ''
for i in range(0, len(ds), 16):
d += ds[i:i+8]
for i in range(8, len(ds), 16):
d += ds[i:i+8]
ds = d
print('Read valid data: ', ds)
LDS = len(ds)
k = 0
res = ''
while k < LDS:
mode = ds[k:k+4]
k += 4
print(k, 'Read: ', ds[:k])
ds = ds[k:]
k = 0
if mode not in FQR.MODES:
raise TypeError('Bits are broken unknown mode: '+mode)
if mode == '0000':
print('Found:', res)
return res
print('Mode:', FQR.MODES[mode])
ind_len = FQR.get_char_count_ind_len(mode, self.version)
char_cnt = bin_ar_to_int(ds[k:k+ind_len])
k += ind_len
print('Ind len:', ind_len)
print('Char count:', char_cnt)
if mode == '0001': # numeric
for t in range(char_cnt):
raise NotImplementedError('will look how to do later')
k += 3
elif mode == '0010': # alphanumeric
for t in range(char_cnt//2):
x = bin_ar_to_int(ds[k:k+11])
x1 = x//45
x2 = x%45
c1 = FQR.ALPHANUM[x1]
res += c1
c2 = FQR.ALPHANUM[x2]
res += c2
print('ch1:', c1, x1)
print('ch2:', c2, x2)
k += 11
if char_cnt % 2 == 1:
x = bin_ar_to_int(ds[k:k+11])
print('ch3:', FQR.ALPHANUM[x], x)
res += FQR.ALPHANUM[x]
k += 11
elif mode == '0100': # byte
for t in range(char_cnt):
x = bin_ar_to_int(ds[k:k+8])
c = chr(x)
res += c
k += 8
print('ch0:', c, x, ds[k-8:k])
elif mode == '1000': # kanji
raise NotImplementedError('will look how to do later (sorry you bumped into one using :)')
def fix_qr(self):
poses = self.find_positioning()
poses = [3]
for p in poses:
print('Trying alignment:', p)
bc_qr = self.qr[:, :]
self.fix_rotation(p)
self.fix_dark_module()
self.fix_position_patterns()
self.fix_alignment_patterns()
self.fix_finder_patterns()
self.fix_timing_patterns()
fmts = self.find_format()
if len(fmts) == 0:
print('no matching format for: ', p)
continue
for f in fmts:
print('Trying format:', f)
fbc_qr = self.qr[:, :]
self.format = f
self.fix_format(self.format[2])
res = self.try_read()
if res is not None:
return res
self.qr = fbc_qr
self.qr = bc_qr
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='FQR file to fix')
parser.add_argument('-g','--gen-qr', action='store', type=int, help='generate empty fqr matrix')
parser.add_argument('--show-format', action='store_true', help='shows fqr matrix format')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
if args.gen_qr:
N = args.gen_qr
if N < 1: N = 1
if N > 40: N = 40
N = N*4+17
qr = [['*' for col in range(N)] for row in range(N)]
qr_str = '\n'.join([''.join(s) for s in qr])+'\n'
if args.file:
with open(args.file, 'w') as f:
f.write(qr_str)
else:
print(qr_str)
sys.exit(0)
if args.show_format:
print_fqr_format()
sys.exit(0)
fqr = FQR(args.file)
res = fqr.fix_qr()
print('Result:', res)
#fqr.print_qr(fqr.get_qr())
FQR.save_qr_img(fqr.get_qr(), args.file+'-fixed.png')
'''
TODO LIST
* for each possible fqr matrix we will try to fix it by
** trying possible missing bits
** give possible results (with filters such as visible ascii)
'''
| mit | 6,055,672,657,051,218,000 | 24.241259 | 97 | 0.541418 | false | 2.392774 | false | false | false |
Hazardius/pySimpleSpatialFactsBase | pssfb_spacial_fact.py | 1 | 3373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" File containing simple spacial fact definition. """
from pssfb_additional import enum
# RCC5
# DR - disconnected
# PO - partially overlapping
# PP - proper part
# PPI - proper part inversed
# EQ - equal
rel_type = enum(DR = "DR", PO = "PO", PP = "PP", PPI = "PPI", EQ = "EQ")
ALL_RELATIONS = set([rel_type.DR, rel_type.PO, rel_type.PP, rel_type.PPI, rel_type.EQ])
class spacial_fact(object):
f_subject = None
f_object = None
f_relation = None
def __init__(self, sub, rel, obj):
"""Constructor."""
self.f_subject = sub
self.f_object = obj
if type(rel) is type(set()):
self.f_relation = set() | rel
else:
self.f_relation = set()
self.f_relation.add(rel)
def __repr__(self):
return str(self.f_subject) + " " + str(self.f_relation) + " " + str(self.f_object)
def get_id(self):
return str(self.f_subject) + " " + str(self.f_relation).replace("', '", ",").replace("set(['", "--").replace("'])", "->") + " " + str(self.f_object)
def compose(self, second_fact):
if str(self.f_object) == str(second_fact.f_subject):
new_rel = set()
for one_fr_rel in self.f_relation:
if new_rel == ALL_RELATIONS:
break;
for one_to_rel in second_fact.f_relation:
new_rel = new_rel | _compose_relations_(one_fr_rel, one_to_rel)
return spacial_fact(self.f_subject, new_rel, second_fact.f_object)
else:
# Tried to compose facts without common part!
return None
def _compose_(prev_rel_set, next_rel_set):
new_rel = set()
for one_fr_rel in prev_rel_set:
if new_rel == ALL_RELATIONS:
break;
for one_to_rel in next_rel_set:
new_rel = new_rel | _compose_relations_(one_fr_rel, one_to_rel)
return new_rel
def _compose_relations_(prev_rel, next_rel):
""" Typical for RCC5. """
if next_rel == rel_type.EQ:
return set([prev_rel])
elif prev_rel == rel_type.EQ:
return set([next_rel])
elif next_rel == rel_type.PPI:
if prev_rel == rel_type.PP:
return ALL_RELATIONS
elif prev_rel == rel_type.PO:
return set([rel_type.DR, rel_type.PO, rel_type.PPI])
elif prev_rel == rel_type.DR:
return set([prev_rel])
else:
return set([next_rel])
elif next_rel == rel_type.PP:
if prev_rel == rel_type.DR:
return set([rel_type.DR, rel_type.PO, rel_type.PP])
elif prev_rel == rel_type.PO:
return set([rel_type.PO, rel_type.PP])
elif prev_rel == rel_type.PPI:
return set([rel_type.PO, rel_type.PP, rel_type.PPI, rel_type.EQ])
else:
return set([next_rel])
elif next_rel == rel_type.PO:
if prev_rel == rel_type.PO:
return ALL_RELATIONS
elif prev_rel == rel_type.PPI:
return set([rel_type.PO, rel_type.PPI])
else:
return set([rel_type.DR, rel_type.PO, rel_type.PP])
else:
if prev_rel == rel_type.DR:
return ALL_RELATIONS
elif prev_rel == rel_type.PP:
return set([next_rel])
else:
return set([rel_type.DR, rel_type.PO, rel_type.PPI])
| mit | 4,269,032,095,824,677,000 | 33.773196 | 156 | 0.54373 | false | 3.206274 | false | false | false |
spreeker/democracygame | democracy/gamelogic/score.py | 1 | 1385 | """
This module implements the Emocracy game rules as far as score keeping is
concerned. The rest of the game rules are in actions.py
this module needs to get a lot bigger..
"""
from gamelogic.levels import change_score
VOTE_SCORE = 1
USER_VOTE_SCORE = 20
TAG_SCORE = 1
PROPOSE_SCORE = 2
PROPOSE_VOTE_SCORE = 1
ISSUE_VOTE_SCORE = 1
def vote(user, issue, direction , voted_already):
"""Score keeping for voting."""
if not voted_already:
# User only gets poinst if it is the first vote on the issue.
change_score(user , VOTE_SCORE )
if direction in [-1, 1]:
# Proposer only gets points if the issue gets a for or against vote
change_score(issue.user , PROPOSE_VOTE_SCORE )
issue.score += ISSUE_VOTE_SCORE
# Write all changes back to the database.
issue.save()
def vote_user(user, voted_user, direction, voted_already):
"""score keeping for voting on an other user
"""
if not voted_already:
# User only gets points if user is the first vote.
change_score(voted_user, USER_VOTE_SCORE)
change_score(user, USER_VOTE_SCORE)
change_score(voted_user, 0) #check parelement score voted_user
def propose(user):
"""Score keeping for proposing of issues"""
change_score(user, PROPOSE_SCORE)
def tag(user, tag):
pass
def multiply(user, issue):
pass
| bsd-3-clause | 8,474,513,008,962,748,000 | 26.7 | 79 | 0.666426 | false | 3.329327 | false | false | false |
ken0-1n/GenomonHotspotCall | lib/hotspotCall/fisher_info.py | 1 | 9661 |
#! /usr/bin/env python
import math
import numpy
from scipy.stats import fisher_exact as fisher
import re
target = re.compile( '([\+\-])([0-9]+)([ACGTNRMacgtnrm]+)' )
remove_chr = re.compile( '\^.' )
class FisherInfo:
def __init__(self):
self.chr = 0
self.start = 0
self.end = 0
self.ref = ""
self.tumor_bases = {
"A": 0,
"C": 0,
"G": 0,
"T": 0,
"a": 0,
"c": 0,
"g": 0,
"t": 0
}
self.ctrl_bases = {
"A": 0,
"C": 0,
"G": 0,
"T": 0,
"a": 0,
"c": 0,
"g": 0,
"t": 0
}
self.rna_bases = {
"A": 0,
"C": 0,
"G": 0,
"T": 0,
"a": 0,
"c": 0,
"g": 0,
"t": 0
}
self.tumor_quals = {
"A": [],
"C": [],
"G": [],
"T": [],
"a": [],
"c": [],
"g": [],
"t": []
}
self.ctrl_quals = {
"A": [],
"C": [],
"G": [],
"T": [],
"a": [],
"c": [],
"g": [],
"t": []
}
self.rna_quals = {
"A": [],
"C": [],
"G": [],
"T": [],
"a": [],
"c": [],
"g": [],
"t": []
}
def bases_format_process(self, read_bases, qual_list):
deleted = 0
iter = target.finditer( read_bases )
for m in iter:
site = m.start()
type = m.group( 1 )
num = m.group( 2 )
bases = m.group( 3 )[ 0:int( num ) ]
read_bases = read_bases[ 0:site - deleted ] + read_bases[ site + int( num ) + len( num ) + 1 - deleted: ]
deleted += 1 + len( num ) + int( num )
# Remove '^.' and '$'
read_bases = remove_chr.sub( '', read_bases )
read_bases = read_bases.translate( None, '$' )
# Error check
if len( read_bases ) != len( qual_list ):
print >> sys.stderr, ("mpileup data is not good: {0}, {1}".format( read_bases, read_bases ))
return None
# Count mismatch
return read_bases
def set_mpileup_data(self, mp_list):
# Prepare mpileup data
self.chr = mp_list[0]
self.start = mp_list[1]
self.end = mp_list[1]
tumor_bases = self.bases_format_process(mp_list[4], mp_list[5])
for base in tumor_bases:
self.add_tumor_base(base)
for base, qual in zip(tumor_bases, mp_list[5]):
self.add_tumor_quals(base, qual)
if len(mp_list) > 7:
ctrl_bases = self.bases_format_process(mp_list[7], mp_list[8])
for base in ctrl_bases:
self.add_ctrl_base(base)
for base, qual in zip(ctrl_bases, mp_list[8]):
self.add_ctrl_quals(base, qual)
if len(mp_list) > 10:
rna_bases = self.bases_format_process(mp_list[10], mp_list[11])
for base in rna_bases:
self.add_rna_base(base)
for base, qual in zip(rna_bases, mp_list[11]):
self.add_rna_quals(base, qual)
def set_ref(self,ref):
self.ref = ref
def add_base(self,bases,base):
if base in 'ATGCatgc':
bases[base] += 1
def add_tumor_base(self, base):
self.add_base(self.tumor_bases, base)
def add_ctrl_base(self, base):
self.add_base(self.ctrl_bases, base)
def add_rna_base(self, base):
self.add_base(self.rna_bases, base)
def add_quals(self, quals, base, qual):
if base in 'ATGCatgc':
ord_qual = (int(ord(qual))-33)
q = quals[base]
q.append(min(ord_qual,41))
def add_tumor_quals(self, base, qual):
self.add_quals(self.tumor_quals, base, qual)
def add_ctrl_quals(self, base, qual):
self.add_quals(self.ctrl_quals, base, qual)
def add_rna_quals(self, base, qual):
self.add_quals(self.rna_quals, base, qual)
def get_depth(self, bases):
count = 0
for n in "ACGTacgt":
count += bases[n]
return count
def get_tumor_depth(self):
return self.get_depth(self.tumor_bases)
def get_ctrl_depth(self):
return self.get_depth(self.ctrl_bases)
def get_rna_depth(self):
return self.get_depth(self.rna_bases)
def get_depth_plus_strand(self, bases):
count = 0
for n in "ACGT":
count += bases[n]
return count
def get_tumor_depth_plus_strand(self):
return self.get_depth_plus_strand(self.tumor_bases)
def get_ctrl_depth_plus_strand(self):
return self.get_depth_plus_strand(self.ctrl_bases)
def get_rna_depth_plus_strand(self):
return self.get_depth_plus_strand(self.rna_bases)
def get_depth_minus_strand(self, bases):
count = 0
for n in "acgt":
count += bases[n]
return count
def get_tumor_depth_minus_strand(self):
return self.get_depth_minus_strand(self.tumor_bases)
def get_ctrl_depth_minus_strand(self):
return self.get_depth_minus_strand(self.ctrl_bases)
def get_rna_depth_minus_strand(self):
return self.get_depth_minus_strand(self.rna_bases)
def get_tumor_base_total(self, base):
return (self.tumor_bases[base.upper()] + self.tumor_bases[base.lower()])
def get_ctrl_base_total(self, base):
return (self.ctrl_bases[base.upper()] + self.ctrl_bases[base.lower()])
def get_rna_base_total(self, base):
return (self.rna_bases[base.upper()] + self.rna_bases[base.lower()])
def get_tumor_base_plus_strand(self, base):
return (self.tumor_bases[base.upper()])
def get_ctrl_base_plus_strand(self, base):
return (self.ctrl_bases[base.upper()])
def get_rna_base_plus_strand(self, base):
return (self.rna_bases[base.upper()])
def get_tumor_base_minus_strand(self, base):
return (self.tumor_bases[base.lower()])
def get_ctrl_base_minus_strand(self, base):
return (self.ctrl_bases[base.lower()])
def get_rna_base_minus_strand(self, base):
return (self.rna_bases[base.lower()])
def get_misrate(self,mis_base_count,depth):
if mis_base_count == 0:
return float(0)
else:
return (mis_base_count / float(depth))
def get_tumor_misrate(self,base):
return self.get_misrate(self.get_tumor_base_total(base), self.get_tumor_depth())
def get_ctrl_misrate(self,base):
return self.get_misrate(self.get_ctrl_base_total(base), self.get_ctrl_depth())
def get_rna_misrate(self,base):
return self.get_misrate(self.get_rna_base_total(base), self.get_rna_depth())
def get_strand_ratio(self,mis_base_count_plus,mis_base_count_minus):
if (mis_base_count_plus + mis_base_count_minus) == 0:
return float(-1)
elif mis_base_count_plus == 0:
return float(0)
else:
return (mis_base_count_plus / float(mis_base_count_plus + mis_base_count_minus))
def get_tumor_strand_ratio(self,base):
return self.get_strand_ratio(self.get_tumor_base_plus_strand(base), self.get_tumor_base_minus_strand(base))
def get_ctrl_strand_ratio(self, base):
return self.get_strand_ratio(self.get_ctrl_base_plus_strand(base), self.get_ctrl_base_minus_strand(base))
def get_rna_strand_ratio(self, base):
return self.get_strand_ratio(self.get_rna_base_plus_strand(base), self.get_rna_base_minus_strand(base))
def get_fisher_pvalue(self,base):
odds_ratio, fisher_pvalue = fisher(
((int(self.get_tumor_base_total(self.ref)), int(self.get_ctrl_base_total(self.ref))),
(int(self.get_tumor_base_total(base)), int(self.get_ctrl_base_total(base)))),
alternative='two-sided'
)
val = float(0.0)
if fisher_pvalue < 10**(-60):
val = float(60.0)
elif fisher_pvalue > 1.0 - 10**(-10) :
val = float(0.0)
else:
val = -math.log( fisher_pvalue, 10 )
return val
def lod_qual(self, base):
score = float(0)
for qual in self.tumor_quals[base]:
q = float(qual)
p = 10**-(q/10)
score += -math.log(p/(1-p),10)
return score
def get_lod_score(self,base):
return (self.lod_qual(base.upper()) + self.lod_qual(base.lower()))
def get_lod_score_plus_strand(self,base):
return self.lod_qual(base.upper())
def get_lod_score_minus_strand(self,base):
return self.lod_qual(base.lower())
def get_score_median(self,base):
med = 0
if len(self.tumor_quals[base]) != 0 or len(self.tumor_quals[base.lower()]) != 0:
alt_array = self.tumor_quals[base] + self.tumor_quals[base.lower()]
med = numpy.median(alt_array)
return med
| gpl-3.0 | 432,586,200,704,145,800 | 30.366883 | 117 | 0.495497 | false | 3.323357 | false | false | false |
Axilent/ace-client | ace/plugins/dox/utils.py | 1 | 3668 | """
Utilities for Dox.
"""
from ace import config
import os.path
from os import getcwd, mkdir, remove, walk
import hashlib
def check_init():
"""
Checks if Dox has been properly initialized.
"""
env = config.get_env()
if not env.has_option('Project','project'):
raise ValueError('Project not set. Set project with `ace project set` command.')
if not env.has_section('Dox'):
raise ValueError('Dox not initalized. Initialze Dox with `ace dox init --content-type=<content-type> --body-field=<body-field> --key-field=<key-field>` command.')
def dox_dir():
"""
Gets or creates the .dox directory.
"""
dox_dirpath = os.path.join(getcwd(),'.dox')
if not os.path.exists(dox_dirpath):
mkdir(dox_dirpath)
return dox_dirpath
def is_modified(markdown_file_path):
"""
Tests if the markdown file has been modified.
"""
with open(markdown_file_path,'r') as markdown_file:
hashfile_path = '%s.hash' % os.path.join(dox_dir(),'hashes',os.path.split(markdown_file.name)[1])
if os.path.exists(hashfile_path):
d = hashlib.sha256()
d.update(markdown_file.read())
digest = d.hexdigest()
with open(hashfile_path) as hashfile:
stored_hash = hashfile.read()
if stored_hash != digest:
return True # non-matching hashes - file is modified
else:
return False # hashes match - file has not been modified
else:
return True # no stored hash - file is modified by definition
def write_hash(markdown_file_path):
"""
Scans the file and records a hash digest of the contents.
"""
with open(markdown_file_path) as markdown_file:
d = hashlib.sha256()
d.update(markdown_file.read())
digest = d.hexdigest()
hash_file_path = '%s.hash' % os.path.join(dox_dir(),'hashes',os.path.split(markdown_file.name)[1])
with open(hash_file_path,'wb') as hash_file:
hash_file.write(digest)
def clean_hashes():
"""
Cleans the local file hash directory out.
"""
hash_path = os.path.join(dox_dir(),'hashes')
if os.path.exists(hash_path):
for root, dirs, files in walk(hash_path):
for name in files:
if name.endswith('.hash'):
remove(os.path.join(root,name))
else:
mkdir(hash_path)
def get_keyfields():
"""
Gets the keyfields data.
"""
dirpath = dox_dir()
keyfield_path = os.path.join(dirpath,'keyfields.json')
if os.path.exists(keyfield_path):
with open(keyfield_path,'r') as keyfield_file:
keyfield_data = json.loads(keyfield_file.read())
return keyfield_data
else:
return {}
def write_keyfields(data):
"""
Writes the keyfield data file.
"""
dirpath = dox_dir()
keyfield_path = os.path.join(dirpath,'keyfields.json')
with open(keyfield_path,'wb') as keyfield_file:
keyfield_file.write(json.dumps(data))
def get_keymap():
"""
Gets the keymap data.
"""
dirpath = dox_dir()
keymap_path = os.path.join(dirpath,'keymap.json')
if os.path.exists(keymap_path):
with open(keymap_path,'r') as keymap_file:
keymap_data = json.loads(keymap_file.read())
return keymap_data
else:
return {}
def write_keymap(data):
"""
Saves the keymap data.
"""
dirpath = dox_dir()
keymap_path = os.path.join(dirpath,'keymap.json')
with open(keymap_path,'wb') as keymap_file:
keymap_file.write(json.dumps(data))
| bsd-3-clause | -1,341,919,190,596,814,000 | 30.084746 | 171 | 0.59542 | false | 3.550823 | false | false | false |
kenhys/tokyodebian-monthly-report | utils/gae/testSystem.py | 1 | 14049 | #coding=utf-8
#
# Code to test the system.
#
import unittest
import urlparse
import os
from webtest import TestApp
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.api import mail_stub
from google.appengine.api import user_service_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.taskqueue import taskqueue_stub
from debianmeeting import application
APP_ID = u'debianmeeting'
AUTH_DOMAIN = 'gmail.com'
LOGGED_IN_ADMIN = 'test2@example.com'
LOGGED_IN_USER = 'test3@example.com'
TITLE = 'test1'
PREWORK = 'test4'
USER_PREWORK = 'test4'
USER_REALNAME = 'Mr Test9'
CAPACITY = 123456789
class SystemTest(unittest.TestCase):
def setUp(self):
"""set up stub
"""
# API proxy
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
# have a dummy datastore
stub = datastore_file_stub.DatastoreFileStub(
APP_ID,
'/dev/null',
'/dev/null')
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', stub)
os.environ['APPLICATION_ID'] = APP_ID
# user authentication
apiproxy_stub_map.apiproxy.RegisterStub(
'user', user_service_stub.UserServiceStub())
os.environ['AUTH_DOMAIN'] = AUTH_DOMAIN
os.environ['USER_EMAIL'] = LOGGED_IN_ADMIN
# I don't know why this is needed but there's a warning from taskqueue.
os.environ['HTTP_HOST'] = 'localhost:8080'
# mail
apiproxy_stub_map.apiproxy.RegisterStub(
'mail', mail_stub.MailServiceStub())
# memcache
apiproxy_stub_map.apiproxy.RegisterStub(
'memcache', memcache_stub.MemcacheServiceStub())
# taskqueue
apiproxy_stub_map.apiproxy.RegisterStub(
'taskqueue', taskqueue_stub.TaskQueueServiceStub())
self.taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub( 'taskqueue' )
self.taskqueue_stub._root_path = os.path.dirname(__file__)
# ==============================================================
# Utility functions
# ==============================================================
def login(self, username):
"""change login account"""
os.environ['USER_EMAIL'] = username
def createPageCommitHelper(self, app, capacity=CAPACITY):
"""
Creates an event.
@return eventid
"""
response = app.post('/eventadmin/register', {
'eventid': 'na',
'title': TITLE,
'prework': PREWORK,
'capacity': capacity,
})
self.assertEqual('302 Moved Temporarily', response.status)
self.assertTrue('/thanks?eventid=' in response.location)
eventid = response.location.split('=')[1]
return eventid
def verifyThanksPage(self, app, eventid):
"""verify that the Thanks Page content is okay."""
response = app.get('/thanks?eventid=%s' % eventid)
self.assertEqual('200 OK', response.status)
self.assertTrue(eventid in response)
def userEventEntryFormSimple(self, app, eventid, new_entry):
response = app.get('/event', {
'eventid': eventid,
'ui': 'simple',
})
self.assertEqual('200 OK', response.status)
self.assertTrue('<!-- simple_ui -->' in response)
self.assertEqual(not new_entry, '<!-- not new entry -->' in response)
return response
def userEventEntryForm(self, app, eventid, new_entry):
"""Show the page user is prompted with before registration to an event.
"""
response = app.get('/event', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue('<!-- non_simple_ui -->' in response)
self.assertEqual(not new_entry, '<!-- not new entry -->' in response)
return response
def checkUserEventEntryFormReturnValue(
self, app, eventid, remaining_seats, response):
"""Check remaining seats value for event entry form."""
self.assertTrue(str(remaining_seats) in response)
def userEventEntry(self, app, eventid, capacity=CAPACITY,
user_realname=USER_REALNAME):
"""Register user to event.
Check that state changes before and after the event.
"""
# check entry page has right number of remaining seats in the
# two possible UIs.
self.checkUserEventEntryFormReturnValue(
app, eventid, capacity,
self.userEventEntryFormSimple(app, eventid, True))
self.checkUserEventEntryFormReturnValue(
app, eventid, capacity,
self.userEventEntryForm(app, eventid, True))
response = app.post('/eventregister', {
'eventid': eventid,
'user_prework': USER_PREWORK,
'user_attend': 'attend',
'user_enkai_attend': 'enkai_attend',
'user_realname': user_realname,
})
self.assertEqual('302 Moved Temporarily', response.status)
self.assertTrue('/thanks?eventid=%s' % eventid
in response.location)
self.verifyThanksPage(app, eventid)
# check entry page has right number of remaining seats
self.checkUserEventEntryFormReturnValue(
app, eventid, capacity - 1,
self.userEventEntryFormSimple(app, eventid, False))
self.checkUserEventEntryFormReturnValue(
app, eventid, capacity - 1,
self.userEventEntryForm(app, eventid, False))
def createEnquete(self, app, eventid, question_text = '''question 1
question 2
question 3'''):
"""Create an enquete. Should be ran as the admin."""
response = app.get('/enquete/edit', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
response = app.post('/enquete/editdone', {
'eventid': eventid,
'overall_message': 'hello',
'question_text': question_text,
})
self.assertEqual('200 OK', response.status)
# make sure the next time to edit will show the content the
# next time.
response = app.get('/enquete/edit', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue(question_text in response)
# ==============================================================
# Tests
# ==============================================================
def testTopPage(self):
"""test displaying of the top page."""
app = TestApp(application)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertTrue('Debian勉強会予約管理システム' in response)
def testCreatePage(self):
app = TestApp(application)
response = app.get('/newevent')
self.assertEqual('200 OK', response.status)
self.assertTrue('幹事用イベント管理ページ' in response)
def testCreatePageCommit(self):
app = TestApp(application)
eventid = self.createPageCommitHelper(app)
# basic sanity checking of the event ID value.
self.assertEqual(len(eventid), 40)
def testListKnownAdminEvents(self):
"""Check admin dashboard if the newly created event can be seen.
"""
app = TestApp(application)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertFalse(TITLE in response)
# generate event data
self.createPageCommitHelper(app)
# check the event is viewable.
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertTrue(TITLE in response)
def testThanksPageFailCase(self):
"""test that Thanks page will fail when wrong eventid is requested."""
app = TestApp(application)
# try to get some incorrect eventid
eventid = 'zzz'
response = app.get('/thanks?eventid=%s' % eventid, status=404)
self.assertTrue(eventid in response)
def testUserRegisterEvent(self):
"""Test user registration workflow.
"""
# generate event data first
app = TestApp(application)
eventid = self.createPageCommitHelper(app)
# check user does not see the event yet
self.login(LOGGED_IN_USER)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertFalse(TITLE in response)
# check user sees the event after registering
self.userEventEntry(app, eventid)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertTrue(TITLE in response)
def testUserRegisterEventFull(self):
"""Test user registration failure workflow.
"""
# generate event data first
app = TestApp(application)
# generate a event with capacity of 1
eventid = self.createPageCommitHelper(app, capacity=1)
# check user does not see the event yet
self.login(LOGGED_IN_USER)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertFalse(TITLE in response)
# check user sees the event after registering
self.userEventEntry(app, eventid, capacity=1)
response = app.get('/')
self.assertEqual('200 OK', response.status)
self.assertTrue(TITLE in response)
# check adding a different user to the event
self.login(LOGGED_IN_ADMIN)
response = app.post('/eventregister', {
'eventid': eventid,
'user_prework': USER_PREWORK,
'user_attend': 'attend',
'user_enkai_attend': 'enkai_attend',
'user_realname': USER_REALNAME,
}, status=404)
self.assertTrue('you cannot reserve a place' in response)
def testAdminReviewEvent(self):
"""Verify the event admin summary review flow.
"""
app = TestApp(application)
# register the event
eventid = self.createPageCommitHelper(app)
# user joins the event
self.login(LOGGED_IN_USER)
self.userEventEntry(app, eventid)
self.login(LOGGED_IN_ADMIN)
response = app.get('/eventadmin/summary', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue(LOGGED_IN_USER in response)
self.assertTrue(USER_PREWORK in response)
def testLatexEnqueteEscape(self):
app = TestApp(application)
eventid = self.createPageCommitHelper(app)
# user joins the event
self.login(LOGGED_IN_USER)
self.userEventEntry(app, eventid,
user_realname='man_with_underscore')
# be the admin and create the enquete.
self.login(LOGGED_IN_ADMIN)
response = app.get('/eventadmin/preworklatex', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue('man\_{}with\_{}underscore' in response.body)
def testEnqueteCreate(self):
"""Test Enquete creation flow.
"""
# generate event data first
app = TestApp(application)
eventid = self.createPageCommitHelper(app)
# user joins the event
self.login(LOGGED_IN_USER)
self.userEventEntry(app, eventid)
# does not see enquete request because there is no enquete yet.
response = app.get('/', {
'eventid': eventid,
})
self.assertFalse('アンケートに回答する' in response)
# be the admin and create the enquete.
self.login(LOGGED_IN_ADMIN)
self.createEnquete(app, eventid)
# admin sends out the enquete mail.
response = app.get('/enquete/sendmail', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
# user responds to enquete
# user sees top page with enquete requirement.
self.login(LOGGED_IN_USER)
response = app.get('/', {
'eventid': eventid,
})
self.assertTrue('アンケートに回答する' in response)
# user responds to enquete
response = app.get('/enquete/respond', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertTrue('question 1' in response)
self.assertTrue('question 2' in response)
self.assertTrue('question 3' in response)
response = app.post('/enquete/responddone', {
'eventid': eventid,
'question0': 0,
'question1': 5,
'question2': 4,
'overall_comment': 'hello world',
})
self.assertEqual('200 OK', response.status)
# user no longer sees top page with enquete requirement
response = app.get('/', {
'eventid': eventid,
})
self.assertFalse('アンケートに回答する' in response)
# admin views the list
self.login(LOGGED_IN_ADMIN)
response = app.get('/enquete/showresult', {
'eventid': eventid,
})
self.assertEqual('200 OK', response.status)
self.assertEquals("question 1,question 2,question 3,自由記入\r\nNA,5,4,hello world\r\n", response.body)
# admin views all the results
self.login(LOGGED_IN_ADMIN)
response = app.get('/enquete/showallresults')
self.assertEqual('200 OK', response.status)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 5,947,947,450,598,396,000 | 34.012563 | 107 | 0.583567 | false | 4.087709 | true | false | false |
devunt/ika | ika/ircobjects.py | 1 | 5758 | import re
from datetime import datetime
from ika.models import Account, Channel
from ika.utils import tokenize_modestring
class IRCModeMixin:
modesdef = dict()
def __init__(self):
self.modes = dict()
@property
def modestring(self):
string = '+'
params = list()
for k, v in self.modes.items():
if not isinstance(v, set):
string += k
if v:
params.append(v)
if len(params) > 0:
string += ' ' + ' '.join(params)
return string
@property
def listmodestring(self):
string = '+'
params = list()
for k, v in self.modes.items():
if isinstance(v, set):
for e in v:
string += k
params.append(e)
if len(params) > 0:
string += ' ' + ' '.join(params)
return string
def update_modes(self, *modes):
adds, removes = tokenize_modestring(self.modesdef, *modes)
for k, v in adds.items():
if isinstance(v, set):
s = self.modes.get(k, set())
self.modes[k] = s | v
else:
self.modes[k] = v
for k, v in removes.items():
if isinstance(v, set):
self.modes[k] -= v
if len(self.modes[k]) == 0:
del self.modes[k]
else:
del self.modes[k]
return adds, removes
class IRCUser(IRCModeMixin):
def __init__(self, uid, timestamp, nick, host, dhost, ident, ipaddress, signon, gecos):
super().__init__()
self.uid = uid
self.timestamp = int(timestamp)
self.nick = nick
self.host = host
self.dhost = dhost
self.ident = ident
self.ipaddress = ipaddress
self.signon = int(signon)
self.gecos = gecos
self.opertype = None
self.metadata = dict()
# For backref
self.channels = set()
def __str__(self):
return self.nick
def __repr__(self):
return f'<IRCUser {self.mask}>'
def match_mask(self, mask):
pattern = re.escape(mask)
pattern = pattern.replace('\*', '.+?')
pattern = '^{}$'.format(pattern)
return re.match(pattern, self.mask, re.IGNORECASE) is not None
@property
def mask(self):
return '{}!{}@{}'.format(self.nick, self.ident, self.dhost)
@property
def account(self) -> Account:
name = self.metadata.get('accountname')
return name and Account.get(name)
@property
def connected_at(self):
return datetime.fromtimestamp(self.signon)
@property
def is_operator(self):
return self.opertype == 'NetAdmin'
@property
def is_service(self):
return self.opertype == 'Services'
def update_modes(self, *modes):
adds, removes = super().update_modes(*modes)
if 'o' in removes.keys():
self.opertype = None
class IRCChannel(IRCModeMixin):
umodesdef = dict()
def __init__(self, name, timestamp):
super().__init__()
self.name = name
self.timestamp = int(timestamp)
self.users = dict()
self.usermodes = dict()
self.metadata = dict()
def __str__(self):
return self.name
def __repr__(self):
return f'<IRCChannel {self.name}>'
@property
def umodestring(self):
return ' '.join([f'{"".join(mode)},{uid}' for uid, mode in self.usermodes.items()])
@property
def channel(self):
try:
return Channel.get(self.name)
except UnicodeEncodeError:
# surrogates are not allowed.
return None
def update_modes(self, *modes):
super().update_modes(*modes)
adds, removes = tokenize_modestring(self.umodesdef, *modes)
for mode, v in adds.items():
for uid in v:
self.usermodes.setdefault(uid, set())
self.usermodes[uid].add(mode)
for mode, v in removes.items():
for uid in v:
self.usermodes[uid].remove(mode)
def generate_synchronizing_modestring(self, uid=None, account=None, mask=None):
if account and mask:
raise ValueError('Exactly one of [account, mask] must be set')
if not self.channel:
return ''
to_be_added = list()
to_be_removed = list()
if uid:
usermodes = {uid: self.usermodes[uid]}
else:
usermodes = self.usermodes
for uid, umode in usermodes.items():
user = self.users[uid]
if user.is_service:
continue
if mask and (not user.match_mask(mask)):
continue
if account and (user.account != account):
continue
flags = self.channel.get_flags_by_user(user)
modes = flags.modes
adds = modes - umode
removes = umode - modes
for add in adds:
to_be_added.append((add, uid))
for remove in removes:
to_be_removed.append((remove, uid))
modestring = str()
params = list()
if len(to_be_added) > 0:
modestring += '+'
for mode, uid in to_be_added:
modestring += mode
params.append(uid)
if len(to_be_removed) > 0:
modestring += '-'
for mode, uid in to_be_removed:
modestring += mode
params.append(uid)
if len(params) > 0:
modestring += ' '
modestring += ' '.join(params)
return modestring
| agpl-3.0 | 8,605,814,057,841,487,000 | 25.781395 | 91 | 0.516325 | false | 4.066384 | false | false | false |
dirko/pyhacrf | setup.py | 2 | 1515 | from setuptools import setup, Extension
from codecs import open
from os import path
# from Michael Hoffman's http://www.ebi.ac.uk/~hoffman/software/sunflower/
class NumpyExtension(Extension):
def __init__(self, *args, **kwargs):
from numpy import get_include
from numpy.distutils.misc_util import get_info
kwargs.update(get_info('npymath'))
kwargs['include_dirs'] += [get_include()]
Extension.__init__(self, *args, **kwargs)
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyhacrf',
version='0.1.2',
packages=['pyhacrf'],
install_requires=['numpy>=1.9', 'PyLBFGS>=0.1.3'],
ext_modules=[NumpyExtension('pyhacrf.algorithms',
['pyhacrf/algorithms.c'])],
url='https://github.com/dirko/pyhacrf',
download_url='https://github.com/dirko/pyhacrf/tarball/0.1.2',
license='BSD',
author='Dirko Coetsee',
author_email='dpcoetsee@gmail.com',
description='Hidden alignment conditional random field, a discriminative string edit distance',
long_description=long_description,
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
)
| bsd-3-clause | -776,888,844,945,481,500 | 31.934783 | 99 | 0.645545 | false | 3.65942 | false | false | false |
Brazelton-Lab/lab_scripts | derive_pathway_steps.py | 1 | 16412 | #! /usr/bin/env python2
"""
Copyright:
derive_pathway+steps.py Obtain gene list from pathway databases
Copyright (C) 2016 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import atexit
import argparse
import os
import pycyc
import re
import stat
from subprocess import CalledProcessError, Popen
import sys
import time
__author__ = 'Alex Hyer'
__email__ = 'theonehyer@gmail.com'
__license__ = 'GPLv3'
__maintainer__ = 'Alex Hyer'
__status__ = 'Alpha'
__version__ = '0.0.1a16'
def print_nested_list(lst, level=0):
yield(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))
for l in lst[1:]:
if type(l) is list:
for i in print_nested_list(l, level + 1):
yield i
else:
yield(' ' * level + '+---' + str(l))
# This method is literally just the Python 3.5.1 which function from the
# shutil library in order to permit this functionality in Python 2.
# Minor changes to style were made to account for indentation.
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise
# we have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in
pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def main(args):
"""Run program
Args:
args (NameSpace): ArgParse arguments controlling program flow
"""
def shutdown(pid):
print('>>> Shutdown sequence initiated.')
print('>>> Terminating Pathway Tools LISP Daemon')
pid.terminate()
pid.wait()
print('>>> Daemon destroyed.')
print('>>> Until next time. :)')
print('>>> Hi, I am DPS (Derive Pathway Steps).')
print('>>> I will be analyzing pathways for you today.')
print('>>> I am using the {0} database as per your command.'
.format(args.database))
if args.database == 'metacyc':
# Obtain executable
pathway_tools = which('pathway-tools', path=args.executable)
if pathway_tools is None:
raise EnvironmentError('I cannot find pathway-tools: please '
'specify -e.')
else:
print('>>> I found pathway-tools: {0}.'.format(pathway_tools))
# Start pathway-tools daemon
while True:
print('>>> Summoning Pathway Tools LISP Daemon.')
pid = Popen([pathway_tools, '-lisp', '-api'],
stderr=open(os.devnull, 'w'),
stdout=open(os.devnull, 'w'))
print('>>> Let\'s give it five seconds to spawn.')
time.sleep(5)
if os.path.exists('/tmp/ptools-socket') and \
stat.S_ISSOCK(os.stat('/tmp/ptools-socket').st_mode):
print('>>> The daemon is is up!')
break
else:
print('>>> The daemon took too long to boot. :(')
print('>>> This makes me sad, so I will kill it.')
pid.kill()
print('>>> Let\'s wait five seconds for it to die!')
time.sleep(5)
pid.poll()
if pid.returncode is None:
raise CalledProcessError('Pathway Tools won\'t die!')
else:
print('>>> The daemon is dead!')
print('>>> I miss it. :( I\'m going to try again. :)')
atexit.register(shutdown, pid)
# Connect to daemon
try:
metacyc = pycyc.open('meta')
except IOError:
print('>>> I cannot connect to Pathway Tools Daemon.')
print('>>> Here is the original error message:')
raise
else:
print('>>> I have connected to the Pathway Tools Daemon.')
print('>>> Phenomenal cosmic powers! Itty bitty memory footprint!')
# Index genes file
print('>>> Indexing {0}.'.format(args.reactions_file.name))
reactions_to_genes = {}
start_time = time.time()
for line in args.reactions_file:
parts = line.strip().split()
reactions_to_genes[parts[0]] = (parts[1], parts[2:])
end_time = time.time()
print('>>> I indexed {0} reactions in {1} seconds.'
.format(str(len(reactions_to_genes)),
str(end_time - start_time)))
print('>>> I\'m so fast.')
# Index all pathways by name
print('>>> Time to index all the pathways from Metacyc.')
pathways = {}
start_time = time.time()
for frame in metacyc.all_pathways():
pathways[frame.common_name] = frame
end_time = time.time()
print('>>> I indexed {0} pathways in {1} seconds.'
.format(str(len(pathways)), str(end_time - start_time)))
print('>>> Aren\'t you proud of me?')
# Index gene abundance
print('>>> Recording gene abundances from {0}.'
.format(args.abundance_file.name))
abundances = {}
start_time = time.time()
for line in args.abundance_file:
gene, abundance = line.strip().split('\t')
abundances[gene] = abundance
end_time = time.time()
print('>>> I indexed {0} gene abundances in {1} seconds.'
.format(str(len(abundances)), str(end_time - start_time)))
# Obtain pathway of interest
print('>>> Time to do some science!')
print('>>> Note: you can input all or part of a pathway name.')
print('>>> Type "q" for input at any time to exit the program.')
while True: # Rest of program runs in a loop until user ends it
possibilities = {}
user_input = raw_input('>>> Enter a pathway: ')
if user_input.lower() == 'q':
break
for name, frame in pathways.items():
if user_input in name:
possibilities[name] = frame
if len(possibilities) == 0:
print('>>> I couldn\'t find any pathways matching your '
'request.')
print('>>> Try an alternative name for the pathway.')
continue
print('>>> I found {0} pathways matching your request.'
.format(str(len(possibilities))))
shutdown = False
restart = False
pathway = None
while True:
print('>>> Here are possible pathways:')
max_entry = len(possibilities) - 1
for possibility in enumerate(possibilities.items()):
print('{0}: {1}'.format(str(possibility[0]),
possibility[1][1].common_name))
path_num = raw_input('>>> Select a pathway ("r" to restart): ')
if path_num.lower() == 'q':
shutdown = True
break
elif path_num.lower() == 'r':
restart = True
break
else:
try:
path_num = int(path_num)
except ValueError:
print('>>> Your answer is not an integer.')
print('>>> I only understand integers.')
print('>>> Please correct.')
continue
if path_num > max_entry or path_num < 0:
print('>>> {0} is not a valid pathway.'
.format(str(path_num)))
print('>>> Valid pathways are: {0}.'.format(' '.join(
[str(i) for i in range(max_entry + 1)])))
print('>>> Try again.')
continue
pathway = possibilities[possibilities.keys()[path_num]]
print('>>> You selected: {0}.'.format(pathway.common_name))
print('>>> Neat! I\'ll analyze it now.')
break
if restart is True:
continue
if shutdown is True:
break
# Add genes and abundances to pathway reactions
print('>>> Collecting reactions in pathway.')
try:
if type(pathway.reaction_list) is list:
rxns = [str(rxn) for rxn in pathway.reaction_list]
else:
rxns = [str(pathway.reaction_list)]
except KeyError:
print('>>> I cannot access the reactions for this pathway. :(')
print('>>> I\'m sorry I\'ve failed you. :(')
print('>>> Please have me analyze something else.')
continue
print('>>> Analyzing pathway for key reactions.')
if hasattr(pathway, 'key_reactions') is True and\
pathway.key_reactions is not None:
key_rxns = [str(key) for key in pathway.key_reactions]
for rxn in enumerate(rxns):
if rxn[1] in key_rxns:
rxns[rxn[0]] = rxn[1] + '*'
print('>>> Acquiring gene families for each reaction from {0}.'
.format(args.reactions_file.name))
reactions = {}
for rxn in rxns:
rxn_name = re.sub('\*$', '', rxn)
if rxn_name in reactions_to_genes.keys():
ec, uniref_list = reactions_to_genes[rxn_name]
rxn_name = rxn + ' (' + ec + ')'
reactions[rxn_name] = {}
for uniref in uniref_list:
reactions[rxn_name][uniref] = 0.0
print('>>> Adding abundances from {0}.'
.format(args.abundance_file.name))
for rxn in reactions.keys():
for gene in reactions[rxn]:
if gene in abundances.keys():
reactions[rxn][gene] = abundances[gene]
print('>>> Removing unused gene families.')
for rxn in reactions.keys():
for uniref in reactions[rxn].keys():
if reactions[rxn][uniref] == 0.0:
del reactions[rxn][uniref]
for rxn in reactions.keys():
if reactions[rxn] == {}:
reactions[rxn] = 'None\tN/A'
continue
# Format reactions for printing
rxn_list = [pathway.common_name]
for rxn in reactions.keys():
if reactions[rxn] == 'None\tN/A':
temp = [rxn, ['None\tN/A']]
rxn_list.append(temp)
elif type(reactions[rxn]) is dict:
temp = [rxn]
for uniref in reactions[rxn].keys():
temp.append('{0}\t{1}'.format(uniref,
str(reactions[rxn][uniref])))
rxn_list.append(temp)
# Print output
print('>>> I\'ve finished analyzing everything!')
print('>>> Here it is (asterisks represent key reactions):')
rxn_print = [rxn for rxn in print_nested_list(rxn_list)]
for rxn in rxn_print:
print(rxn)
# Save output
print('>>> What file would you like me to save this to?')
print('>>> Type "n" if you don\'t want to save this output.')
while True:
out_file = raw_input('>>> File: ')
if out_file.lower() != 'n' and out_file.lower() != 'q':
try:
with open(out_file, 'w') as out_handle:
for rxn in rxn_print:
out_handle.write(rxn + os.linesep)
print('>>> Output written to {0}.'.format(out_file))
break
except IOError as error:
print('>>> I could not write to {0}.'.format(out_file))
print('>>> Original error:')
print(error)
print('>>> Let\'s try again (enter "n" to skip).')
elif out_file.lower() == 'q':
shutdown = True
break
else:
break
if shutdown is True:
break
print('>>> All done!')
print('>>> Let\'s do more science (enter "q" to exit program)!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.
RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(title='Database',
dest='database')
metacyc = subparsers.add_parser('metacyc',
help='Analyze MetaCyc Database')
metacyc.add_argument('abundance_file',
metavar='Abundance File',
type=argparse.FileType('r'),
help='TSV containing gene ID and abundance columns')
metacyc.add_argument('reactions_file',
metavar='Reactions File',
type=argparse.FileType('r'),
help='metacyc1 file mapping Unirefs to reactions')
metacyc.add_argument('-e', '--executable',
default=None,
type=str,
help='pathways-tree executable if not in PATH')
args = parser.parse_args()
main(args)
sys.exit(0)
| gpl-2.0 | 8,494,539,280,182,244,000 | 39.423645 | 79 | 0.509505 | false | 4.309874 | false | false | false |
eset/malware-research | winnti_group/winnti_group_unpack.py | 1 | 4389 | # -*- encoding: utf-8 -*-
#
# This script unpacks the payload from Winnti Group samples using their custom
# packer. For details, see:
# https://www.welivesecurity.com/wp-content/uploads/2019/10/ESET_Winnti.pdf
#
# For feedback or questions contact us at: github@eset.com
# https://github.com/eset/malware-research/
#
# Author:
# Marc-Etienne M.Léveillé <leveille@eset.com>
#
# This code is provided to the community under the two-clause BSD license as
# follows:
#
# Copyright (C) 2019 ESET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import struct
from Crypto.Cipher import ARC4
import hashlib
import json
# Those are the last functions of the shellcode returning the address of the
# payload header (minus 3)
GET_PAYLOAD_FUNCTIONS = [
# 32-bit
"558BECE800000000585DC3".decode('hex'),
# 64-bit
"E80000000058C3".decode('hex')
]
def get_payload_indexes(s):
r = []
for p in GET_PAYLOAD_FUNCTIONS:
i = s.find(p)
while i >= 0:
r.append(i + len(p))
i = s.find(p, i + 1)
r.sort()
return r
for path in sys.argv[1:]:
with open(path, 'rb') as f:
file_content = f.read()
for offset_to_bin in get_payload_indexes(file_content):
rc4_key, \
added_code_size, \
rc4_key_size, \
filename_size, \
filename_wide_size, \
pe_size, \
launch_type = \
struct.unpack("16s" + "I" * 6, file_content[offset_to_bin:][:40])
if launch_type not in (1, 2):
sys.stderr.write(
"Possibly invalid header (launch_type = {:d}) at {:s}:{:d}\n".format(
launch_type, path, offset_to_bin
)
)
rc4_key = ''.join([ chr(ord(c) ^ 0x37) for c in rc4_key[:rc4_key_size] ])
i = offset_to_bin + 40
filename = ARC4.new(rc4_key).decrypt(file_content[i:][:filename_size])[:-1]
i += filename_size
filename_wide = ARC4.new(rc4_key).decrypt(file_content[i:][:filename_wide_size])
filename_wide = filename_wide.decode('utf-16')[:-1]
i += filename_wide_size
pe = ARC4.new(rc4_key).decrypt(file_content[i:][:pe_size])
if pe[:2] == 'MZ':
payload_sha1 = hashlib.sha1(pe).hexdigest()
desc = {
"parent_sha1": hashlib.sha1(file_content).hexdigest(),
"rc4_key": rc4_key,
"filename": filename,
"filename_w": filename_wide,
"launch_type": launch_type,
"payload_sha1": payload_sha1,
}
if os.path.exists(payload_sha1):
sys.stderr.write("File {:s} already exists, skipping\n".format(
payload_sha1
))
else:
with file(payload_sha1, "wb") as o:
o.write(pe)
json.dump(desc, sys.stdout)
sys.stdout.write("\n")
else:
sys.stderr.write(
"Payload not decrypted sucessfully at {:s}:{:d}\n".format(
path, offset_to_bin
)
)
| bsd-2-clause | -1,076,169,081,500,170,800 | 36.495726 | 88 | 0.61933 | false | 3.680369 | false | false | false |
caronc/newsreap | newsreap/NNTPBinaryContent.py | 1 | 1934 | # -*- coding: utf-8 -*-
#
# A NNTP Binary File Representation
#
# Copyright (C) 2015-2016 Chris Caron <lead2gold@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
from newsreap.NNTPContent import NNTPContent
from newsreap.Utils import bytes_to_strsize
class NNTPBinaryContent(NNTPContent):
"""
A Binary file representation
"""
def __init__(self, filepath=None, part=None, total_parts=None,
begin=None, end=None, total_size=None,
work_dir=None, sort_no=10000, *args, **kwargs):
""" Intitialize NNTPBinaryContent
"""
super(NNTPBinaryContent, self).__init__(
filepath=filepath,
part=part, total_parts=total_parts,
begin=begin, end=end, total_size=total_size,
work_dir=work_dir,
sort_no=sort_no, *args, **kwargs)
def __repr__(self):
"""
Return a printable version of the file being read
"""
if self.part is not None:
return '<NNTPBinaryContent sort=%d filename="%s" part=%d/%d len=%s />' % (
self.sort_no,
self.filename,
self.part,
self.total_parts,
bytes_to_strsize(len(self)),
)
else:
return '<NNTPBinaryContent sort=%d filename="%s" len=%s />' % (
self.sort_no,
self.filename,
bytes_to_strsize(len(self)),
)
| gpl-3.0 | 6,874,197,596,624,833,000 | 34.814815 | 86 | 0.601344 | false | 3.971253 | false | false | false |
marcoscrcamargo/ic | opencv/classifiers/svm.py | 1 | 4640 | # import the necessary packages
from sklearn import svm
from sklearn.calibration import CalibratedClassifierCV
# from sklearn.cross_validation import train_test_split
# resolvendo problemas de compatibilidade
from sklearn.model_selection import train_test_split
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2
import os
data_path = "DBIM/alldb"
model_pxl = CalibratedClassifierCV(svm.LinearSVC())
model_hst = CalibratedClassifierCV(svm.LinearSVC())
def image_to_feature_vector(image, size=(32, 32)):
# resize the image to a fixed size, then flatten the image into
# a list of raw pixel intensities
return cv2.resize(image, size).flatten()
def extract_color_histogram(image, bins=(8, 8, 8)):
# extract a 3D color histogram from the HSV color space using
# the supplied number of `bins` per channel
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1, 2], None, bins,
[0, 180, 0, 256, 0, 256])
# handle normalizing the histogram if we are using OpenCV 2.4.X
if imutils.is_cv2():
hist = cv2.normalize(hist)
# otherwise, perform "in place" normalization in OpenCV 3 (I
# personally hate the way this is done
else:
cv2.normalize(hist, hist)
# return the flattened histogram as the feature vector
return hist.flatten()
def initializate(data_p = "DBIM/alldb"):
data_path = dbp
model_pxl = CalibratedClassifierCV(svm.LinearSVC())
model_hst = CalibratedClassifierCV(svm.LinearSVC())
def fit(info=False):
# grab the list of images that we'll be describing
if(info):
print("[INFO] describing images...")
imagePaths = list(paths.list_images(data_path))
# initialize the raw pixel intensities matrix, the features matrix,
# and labels list
rawImages = []
features = []
labels = []
# loop over the input images
for (i, imagePath) in enumerate(imagePaths):
# load the image and extract the class label (assuming that our
# path as the format: /path/to/dataset/{class}/{image_num}.jpg
image = cv2.imread(imagePath)
label = imagePath.split(os.path.sep)[2]
# extract raw pixel intensity "features", followed by a color
# histogram to characterize the color distribution of the pixels
# in the image
pixels = image_to_feature_vector(image)
hist = extract_color_histogram(image)
# update the raw images, features, and labels matricies,
# respectively
rawImages.append(pixels)
features.append(hist)
labels.append(label)
# show an update every 1,000 images
if i > 0 and i % 1000 == 0 and info:
print("[INFO] processed {}/{}".format(i, len(imagePaths)))
# show some information on the memory consumed by the raw images
# matrix and features matrix
rawImages = np.array(rawImages)
features = np.array(features)
labels = np.array(labels)
if(info):
print("[INFO] pixels matrix: {:.2f}MB".format(
rawImages.nbytes / (1024 * 1000.0)))
print("[INFO] features matrix: {:.2f}MB".format(
features.nbytes / (1024 * 1000.0)))
(trainRI, testRI, trainRL, testRL) = train_test_split(
rawImages, labels, test_size=0, random_state=42)
(trainFeat, testFeat, trainLabels, testLabels) = train_test_split(
features, labels, test_size=0, random_state=42)
model_pxl.fit(trainRI, trainRL)
model_hst.fit(trainFeat, trainLabels)
def get_predict_proba(model, input):
prob = model.predict_proba(input)
label = model.predict(input)[0]
return {'label':label, '0':prob[0][0] ,'1':prob[0][1], '2': prob[0][2] }
def print_proba(ret, full=False):
if(full):
print("SVM")
print("\n PIXEL")
print("Probability:")
print("label 0: " + str(ret['pxl']['0']) )
print("label 1: " + str(ret['pxl']['1']))
print("label 2: " + str(ret['pxl']['2']))
print("image label:" + str(ret['pxl']['label']))
print("")
print("\n HISTOGRAM")
print("Probability:")
print("label 0: " + str(ret['hst']['0']) )
print("label 1: " + str(ret['hst']['1']))
print("label 2: " + str(ret['hst']['2']))
print("image label:" + str(ret['hst']['label']))
print("")
else:
print("SVM\n")
print("Label: " + str(ret['pxl']['label']) +
" prob:" + str(ret['pxl'][str(ret['pxl']['label'])]))
print("Label: " + str(ret['hst']['label']) +
" prob:" + str(ret['hst'][str(ret['hst']['label'])]))
def classify(img_path, imshow=False):
img = cv2.imread(img_path)
if(imshow):
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
pxl = image_to_feature_vector(np.array(img)).reshape(1,-1)
hst = extract_color_histogram(np.array(img)).reshape(1,-1)
pxl_c = get_predict_proba(model_pxl, pxl)
hst_c = get_predict_proba(model_hst, hst)
return {'pxl':pxl_c, 'hst':hst_c } | gpl-3.0 | 1,263,451,520,084,172,800 | 28.188679 | 73 | 0.681897 | false | 2.894573 | true | false | false |
JohnCEarls/DataDirac | datadirac/aggregator/resultset.py | 1 | 13038 | import boto.sqs
import logging
import time
import boto
import json
import numpy as np
import tempfile
import hashlib
from collections import defaultdict
from boto.sqs.message import Message
from boto.s3.key import Key
import base64
import datetime
from boto.dynamodb2.exceptions import ConditionalCheckFailedException
import os.path
from datadirac.data import NetworkInfo
from masterdirac.models.aggregator import ( TruthGPUDiracModel,
RunGPUDiracModel, DataForDisplay )
import masterdirac.models.run as run_mdl
import random
import pandas
import re
class TruthException(Exception):
pass
class FileCorruption(Exception):
pass
class DirtyRunException(Exception):
pass
class InvalidMask(Exception):
#the given mask doesnt parse
pass
MASK_PATTERN_MATCH = r'([\[\(]\d+,\d+[\)\]])'
MASK_PATTERN_PARSE = r'[\[\(](\d+),(\d+)[\)\]]'
class ResultSet(object):
"""
Abstract BaseClass
For representing and manipulating a result
"""
def __init__(self, instructions ):
self.logger = logging.getLogger(__name__)
self._result_bucket = None
self._alleles = None
#print instructions
self._instructions = instructions
self.run_id = instructions['run_id']
self._file_id = instructions['file_id']
self.result_files = instructions['result_files']
self.sample_allele = instructions['sample_allele']
self.sample_names = instructions['sample_names']
self.shuffle = instructions['shuffle']
self.strain = instructions['strain']
self.num_networks = instructions['num_networks']
self._data = None
self._classified = None
self._truth = None
self._compare_mat = None
@property
def nsamp(self):
return len(self.sample_names)
@property
def file_id(self):
return self._file_id
@property
def nnets(self):
return self.num_networks
@property
def alleles(self):
if not self._alleles:
self._alleles = self.result_files.keys()
self._alleles.sort()
return self._alleles
@property
def data(self):
if self._data is None:
stacked = []
for allele in self.alleles:
stacked.append(self._get_data(allele))
self._data = np.array(stacked)
return self._data
@property
def classified(self):
if self._classified is None:
self._classified = np.argmax( self.data, axis=0 )
return self._classified
@property
def truth(self):
if self._truth is None:
classes = []
for a in self.alleles:
classes.append(set([sn for _, sn in self.sample_allele[a]]))
def clsfy(classes, s):
for i,_set in enumerate(classes):
if s in _set:
return i
t_list = [clsfy(classes, sname) for sname in self.sample_names]
self._truth = np.array(t_list)
return self._truth
def get_run_id(self):
return self.run_id
def get_strain(self):
return self.strain
def get_result_files(self):
return self.result_files
def archive_package(self):
return (self.file_id, self._instructions, self.data)
@property
def compare_mat(self):
if self._compare_mat is None:
truth_mat = np.tile(self.truth, (self.nnets, 1))
#T.D. could slice here or compute once and check with map
self._compare_mat = (truth_mat == self.classified)
return self._compare_mat
class S3ResultSet(ResultSet):
def __init__(self, instructions, from_gpu_bucket_name ):
"""
instructions dictified run model
"""
super(S3ResultSet,self).__init__(instructions)
self._s3_from_gpu= from_gpu_bucket_name
self._from_gpu_bucket = None
@property
def from_gpu_bucket(self):
"""
Returns the S3 bucket object that contains the gpu generated
results
"""
attempts = 0
while not self._from_gpu_bucket:
attempts += 1
try:
conn = boto.connect_s3()
self._from_gpu_bucket = conn.get_bucket(self._s3_from_gpu)
except:
if attempts > 5:
raise
msg = "Could not connect to %s. Trying again. "
msg = msg % self._s3_from_gpu
self.logger.exception( msg )
time.sleep(2 + (random.random() * attempts))
return self._from_gpu_bucket
def _get_data(self, allele ):
"""
Returns the given alleles rms matrix
"""
complete = False
count = 0
while not complete:
try:
with tempfile.SpooledTemporaryFile() as temp:
key = self.from_gpu_bucket.get_key( self.result_files[allele] )
key.get_contents_to_file( temp )
temp.seek(0)
buffered_matrix = np.load( temp )
complete = True
except Exception as e:
print e
#print "error on get[%r], trying again" % self.result_files[allele]
count += 1
if count > 1:
raise FileCorruption('Error on File [%s] [%r]' % (allele,
self.result_files[allele] ) )
pass
return buffered_matrix[:self.nnets, :self.nsamp]
class LocalResultSet(ResultSet):
def __init__(self, instructions, data_obj ):
super(LocalResultSet, self).__init__( instructions )
self._data = data_obj
self.local_path = local_path
class Masked(object):
"""
Receives a resultset object and a mask (start, end) i.e. (5,10)
Returns the accuracy for all networks over that range as a numpy
array
"""
def __init__(self, result_set, mask):
self._result_set = result_set
self._mask = mask
@property
def mask(self):
return self._mask
@property
def run_id(self):
return self._result_set.run_id
@property
def result_set(self):
return self._result_set
@property
def accuracy(self):
"""
Returns a vector representing the accuracy of a each network
given this age range and ordering
"""
rs = self.result_set
mask_map = self._select_range()
accuracy = rs.compare_mat[:,mask_map].sum(axis=1)/float(len(mask_map))
return accuracy
def _select_range(self):
"""
Returns the set of samples within the afformentioned age range.
(numpy vector containing their indices)
T.D. could only compute once
"""
rs = self.result_set
start = float(self.mask[0]) - .0001
end = float(self.mask[1]) + .0001
samp = set([])
for _, sl in rs.sample_allele.iteritems():
samp |= set([ sample_name for age, sample_name in sl if start <= age < end ])
return np.array([i for i,s in enumerate(rs.sample_names) if s in samp])
class ResultSetArchive(object):
def __init__( self,run_id, num_result_sets=100):
self.logger = logging.getLogger(__name__)
self._run_id = run_id
self._num = num_result_sets
self._rs_ctr = 0 # a single archive count
self._arch_ctr = 0 # the total count for this resultset archive
self._instructions = {}
self._data = {}
self._sent = {}
self._file_name = hashlib.md5()
self._arch_name = hashlib.md5()
self._truth = False
@property
def run_id(self):
return self._run_id
def add_result_set( self, result_set):
(file_id, inst, data) = result_set.archive_package()
self._instructions[file_id] = inst
self._data[file_id] = data
self._file_name.update( file_id )
self._rs_ctr += 1
self._arch_ctr += 1
if not result_set.shuffle:
self._truth = True
if self._rs_ctr >= self._num:
self.write()
def write(self):
self._write_instructions()
self._write_data()
self._sent[self.file_hash] = self._instructions.keys()
self._arch_name.update( self.file_hash )
self._instructions = {}
self._data = {}
self._rs_ctr = 0
self._file_name = hashlib.md5()
@property
def file_hash(self):
return self._file_name.hexdigest()
@property
def archive_hash(self):
return self._arch_name.hexdigest()
@property
def sent(self):
return self._sent
class S3ResultSetArchive(ResultSetArchive):
def __init__(self,run_id, bucket_name, path=None, num_result_sets=100 ):
super(S3ResultSetArchive,self).__init__(run_id, num_result_sets)
self._bucket_name = bucket_name
self._bucket = None
self._path = path
def _write_data(self):
with tempfile.SpooledTemporaryFile() as temp:
json.dump( self._instructions, temp)
temp.seek(0)
key = Key(self.bucket)
if self._path:
key.key = '%s/%s.json' % ( self._path, self.file_hash)
else:
key.key = '%s.json' % self.file_hash
key.set_contents_from_file( temp )
def _write_instructions(self):
with tempfile.SpooledTemporaryFile() as temp:
np.savez(temp, **self._data)
temp.seek(0)
key = Key(self.bucket)
if self._path:
key.key = '%s/%s.npz' % ( self._path, self.file_hash)
else:
key.key = '%s.npz' % self.file_hash
key.set_contents_from_file( temp )
@property
def bucket(self):
attempts = 0
while not self._bucket:
attempts += 1
try:
conn = boto.connect_s3()
self._bucket = conn.get_bucket(self._bucket_name)
except:
if attempts > 5:
raise
msg = "Could not connect to %s. Trying again. "
msg = msg % self._bucket_name
self.logger.exception( msg )
time.sleep(2 + (random.random() * attempts))
return self._bucket
def close_archive(self):
if self._rs_ctr > 0:
self.write()
with tempfile.SpooledTemporaryFile() as temp:
json.dump( self._sent, temp)
temp.seek(0)
key = Key(self.bucket)
if self._path:
key.key = '%s/%s.manifest.json' % ( self._path, self.archive_hash)
else:
key.key = '%s.manifest.json' % self.archive_hash
key.set_contents_from_file( temp )
run_mdl.insert_ANRunArchive( self.run_id, self.archive_hash, self._arch_ctr,
bucket = self._bucket_name,
archive_manifest = '%s.manifest.json' % self.archive_hash,
path = self._path, truth = self._truth)
if __name__ == "__main__":
sqs = boto.connect_sqs()
d2a = sqs.create_queue( 'from-data-to-agg-b6-canonical-q92-bak' )
archive = S3ResultSetArchive('this-is-a-test-run-id', 'an-scratch-bucket',
path="S3ResultSetArchiveTest3", num_result_sets=9 )
ctr = 0
for i in range(2):
messages = d2a.get_messages(10)
for message in messages:
ctr += 1
instructions = json.loads( message.get_body() )
rs = S3ResultSet(instructions, 'an-from-gpu-to-agg-b6-canonical-q92')
"""
print "rs.nsamp"
print rs.nsamp
print "rs.file_id"
print rs.file_id
print "rs.nnets"
print rs.nnets
print "rs.alleles"
print rs.alleles
print "rs.data"
print rs.data
print "rs.classified"
print rs.classified
print "rs.truth"
print rs.truth
print "rs.get_run_id()"
print rs.get_run_id()
print "rs.get_strain()"
print rs.get_strain()
print "rs.get_result_files()"
print rs.get_result_files()
print "rs.archive_package()"
print rs.archive_package()
for m in ["[0,100)", "[10,20)", "[13,17)", "[0,100)"]:
mrs = Masked( rs, m)
print "Mask id"
print mrs.mask_id
print "mrs.mask"
print mrs.mask
print "Masked accuracy"
print mrs.accuracy()
"""
archive.add_result_set( rs )
print ctr
print archive.sent
archive.close_archive()
| gpl-3.0 | -7,814,231,075,353,104,000 | 31.034398 | 89 | 0.541417 | false | 3.977425 | false | false | false |
navoj/ecell4 | python/samples/reaction_reader/blbr/blbr.py | 1 | 1781 | from ecell4.reaction_reader.decorator2 import species_attributes, reaction_rules
from ecell4.reaction_reader.network import generate_reactions
@species_attributes
def attributegen():
R(r1,r2,r=(r1,r2)) | R0
L(l1,l2,l=(l1,l2)) | L0
@reaction_rules
def rulegen():
# Ligand addition
R(r) + L(_1,_2,l=[_1,_2]) == R(r^1).L(_1^1,_2,l=[_1,_2]) | (kp1, km1)
# R(r) + L(l1,l2) == R(r^1).L(l1^1,l2) | (kp1, km1)
# Chain elongation
R(r) + L(_1,_2^_,l=[_1,_2]) == R(r^1).L(_1^1,_2^_,l=[_1,_2]) | (kp2, km2)
# R(r) + L(l1,l2^_) == R(r^1).L(l1^1,l2^_) | (kp2, km2)
# Ring closure
R(r).L(l) == R(r^1).L(l^1) | (kp3, km3)
if __name__ == "__main__":
newseeds = []
for i, (sp, attr) in enumerate(attributegen()):
print i, sp, attr
newseeds.append(sp)
print ''
rules = rulegen()
for i, rr in enumerate(rules):
print i, rr
print ''
seeds, reactions = generate_reactions(
newseeds, rules, max_stoich={"R": 5, "L": 5})
for i, seed in enumerate(seeds):
print i, seed
# print ''
# for i, reaction in enumerate(reactions):
# print i, reaction
# setOption("SpeciesLabel","HNauty")
# begin model
# begin parameters
# kp1 1
# km1 1
# kp2 1
# km2 1
# kp3 1
# km3 1
# R0 3e5
# L0 3e5
# end parameters
#
# begin seed species
# R(r,r) R0
# L(l,l) L0
# end seed species
#
# begin reaction rules
# # Ligand addition
# R(r) + L(l,l) <-> R(r!1).L(l!1,l) kp1,km1
#
# # Chain elongation
# R(r) + L(l,l!+) <-> R(r!1).L(l!1,l!+) kp2,km2
#
# # Ring closure
# R(r).L(l) <-> R(r!1).L(l!1) kp3,km3
# end reaction rules
# end model
#
# ## actions ##
# generate_network({overwrite=>1,max_stoich=>{R=>5,L=>5}})
| gpl-2.0 | 4,637,524,014,636,223,000 | 22.746667 | 80 | 0.528916 | false | 2.286264 | false | false | false |
phihag/jippy | jippy/_jvm_intf.py | 1 | 56525 | import ctypes
_jboolean = ctypes.c_ubyte
_jbyte = ctypes.c_ubyte
_jchar = ctypes.c_short
_jshort = ctypes.c_int16
_jint = ctypes.c_int32
_jlong = ctypes.c_int64
_jfloat = ctypes.c_float
_jdouble = ctypes.c_double
_jsize = _jint
class _jobject_struct(ctypes.Structure):
__fields = []
_jobject = ctypes.POINTER(_jobject_struct)
_jclass = _jobject
_jthrowable = _jobject
_jstring = _jobject
_jarray = _jobject
_jobjectArray = _jarray
_jbooleanArray = _jarray
_jbyteArray = _jarray
_jcharArray = _jarray
_jshortArray = _jarray
_jintArray = _jarray
_jlongArray = _jarray
_jfloatArray = _jarray
_jdoubleArray = _jarray
_jobjectArray = _jarray
_jweak = _jobject
class _jvalue(ctypes.Union):
_fields_ = [
('z', _jboolean),
('b', _jbyte),
('c', _jchar),
('s', _jshort),
('i', _jint),
('j', _jlong),
('f', _jfloat),
('d', _jdouble),
('l', _jobject),
]
class _jmethodID_struct(ctypes.Structure):
_fields_ = []
_jmethodID = ctypes.POINTER(_jmethodID_struct)
class _jfieldID_struct(ctypes.Structure):
_fields_ = []
_jfieldID = ctypes.POINTER(_jfieldID_struct)
class _JNINativeMethod(ctypes.Structure):
_fields = [
('name', ctypes.c_char_p, ),
('signature', ctypes.c_char_p),
('fnPtr', ctypes.c_void_p),
]
class _JavaVMOption(ctypes.Structure):
_fields = [
('optionString', ctypes.c_char_p),
('extraInfo', ctypes.c_void_p),
]
class _JavaVMInitArgs(ctypes.Structure):
_fields = [
('version', _jint),
('nOptions', _jint),
('options', ctypes.POINTER(_JavaVMOption)),
('ignoreUnrecognized', _jboolean)
]
class _JavaVM(ctypes.Structure):
_fields = [
('functions', ctypes.c_void_p),
# really a ctypes.POINTER(_JNIInvokeInterface)
]
class _JNIInvokeInterface(ctypes.Structure):
_fields = [
('reserved0', ctypes.c_void_p),
('reserved1', ctypes.c_void_p),
('reserved2', ctypes.c_void_p),
('DestroyJavaVM',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM) # JavaVM* vm
))
),
('AttachCurrentThread',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM), # JavaVM* vm
ctypes.POINTER(ctypes.c_void_p), # void** penv
ctypes.c_void_p, # void* args
))
),
('DetachCurrentThread',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM), # JavaVM* vm
))
),
('GetEnv',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM), # JavaVM* vm
ctypes.POINTER(ctypes.c_void_p), # void** penv
_jint, # jint version
))
),
('AttachCurrentThreadAsDaemon',
ctypes.POINTER(ctypes.CFUNCTYPE(
_jint,
ctypes.POINTER(_JavaVM), # JavaVM* vm
ctypes.POINTER(ctypes.c_void_p), # void** penv
ctypes.c_void_p, # void* args
))
),
]
class _JNIEnv(ctypes.Structure):
_fields = [
('functions', ctypes.c_void_p),
# really a ctypes.POINTER(_JNINativeInterface)
]
class _JNINativeInterface(ctypes.Structure):
_fields = [
('reserved0', ctypes.c_void_p),
('reserved1', ctypes.c_void_p),
('reserved2', ctypes.c_void_p),
('reserved3', ctypes.c_void_p),
('foo',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.c_int, # a
ctypes.POINTER(ctypes.c_int), # b
))
),
('GetVersion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('DefineClass',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.c_char), # name
_jobject, # loader
ctypes.POINTER(_jbyte), # buf
_jsize, # len
))
),
('FindClass',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.c_char), # name
))
),
('FromReflectedMethod',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # method
))
),
('FromReflectedField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # field
))
),
('ToReflectedMethod',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # cls
_jmethodID, # methodID
_jboolean, # isStatic
))
),
('GetSuperclass',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # sub
))
),
('IsAssignableFrom',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # sub
_jclass, # sup
))
),
('ToReflectedField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # cls
_jfieldID, # fieldID
_jboolean, # isStatic
))
),
('Throw',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jthrowable, # obj
))
),
('ThrowNew',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # msg
))
),
('ExceptionOccurred',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('ExceptionDescribe',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('ExceptionClear',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('FatalError',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.c_char), # msg
))
),
('PushLocalFrame',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jint, # capacity
))
),
('PopLocalFrame',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # result
))
),
('NewGlobalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # lobj
))
),
('DeleteGlobalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # gref
))
),
('DeleteLocalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('IsSameObject',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj1
_jobject, # obj2
))
),
('NewLocalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # ref
))
),
('EnsureLocalCapacity',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jint, # capacity
))
),
('AllocObject',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
))
),
# NewObject skipped because of varargs
# NewObjectV skipped because of varargs
('NewObjectA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
('GetObjectClass',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('IsInstanceOf',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
))
),
('GetMethodID',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # name
ctypes.POINTER(ctypes.c_char), # sig
))
),
# CallObjectMethod skipped because of varargs
# CallObjectMethodV skipped because of varargs
('CallObjectMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallBooleanMethod skipped because of varargs
# CallBooleanMethodV skipped because of varargs
('CallBooleanMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallByteMethod skipped because of varargs
# CallByteMethodV skipped because of varargs
('CallByteMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallCharMethod skipped because of varargs
# CallCharMethodV skipped because of varargs
('CallCharMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallShortMethod skipped because of varargs
# CallShortMethodV skipped because of varargs
('CallShortMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallIntMethod skipped because of varargs
# CallIntMethodV skipped because of varargs
('CallIntMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallLongMethod skipped because of varargs
# CallLongMethodV skipped because of varargs
('CallLongMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallFloatMethod skipped because of varargs
# CallFloatMethodV skipped because of varargs
('CallFloatMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallDoubleMethod skipped because of varargs
# CallDoubleMethodV skipped because of varargs
('CallDoubleMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallVoidMethod skipped because of varargs
# CallVoidMethodV skipped because of varargs
('CallVoidMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualObjectMethod skipped because of varargs
# CallNonvirtualObjectMethodV skipped because of varargs
('CallNonvirtualObjectMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualBooleanMethod skipped because of varargs
# CallNonvirtualBooleanMethodV skipped because of varargs
('CallNonvirtualBooleanMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualByteMethod skipped because of varargs
# CallNonvirtualByteMethodV skipped because of varargs
('CallNonvirtualByteMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualCharMethod skipped because of varargs
# CallNonvirtualCharMethodV skipped because of varargs
('CallNonvirtualCharMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualShortMethod skipped because of varargs
# CallNonvirtualShortMethodV skipped because of varargs
('CallNonvirtualShortMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualIntMethod skipped because of varargs
# CallNonvirtualIntMethodV skipped because of varargs
('CallNonvirtualIntMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualLongMethod skipped because of varargs
# CallNonvirtualLongMethodV skipped because of varargs
('CallNonvirtualLongMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualFloatMethod skipped because of varargs
# CallNonvirtualFloatMethodV skipped because of varargs
('CallNonvirtualFloatMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualDoubleMethod skipped because of varargs
# CallNonvirtualDoubleMethodV skipped because of varargs
('CallNonvirtualDoubleMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallNonvirtualVoidMethod skipped because of varargs
# CallNonvirtualVoidMethodV skipped because of varargs
('CallNonvirtualVoidMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
('GetFieldID',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # name
ctypes.POINTER(ctypes.c_char), # sig
))
),
('GetObjectField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetBooleanField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetByteField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetCharField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetShortField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetIntField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetLongField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetFloatField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('GetDoubleField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
))
),
('SetObjectField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jobject, # val
))
),
('SetBooleanField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jboolean, # val
))
),
('SetByteField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jbyte, # val
))
),
('SetCharField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jchar, # val
))
),
('SetShortField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jshort, # val
))
),
('SetIntField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jint, # val
))
),
('SetLongField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jlong, # val
))
),
('SetFloatField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jfloat, # val
))
),
('SetDoubleField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
_jfieldID, # fieldID
_jdouble, # val
))
),
('GetStaticMethodID',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # name
ctypes.POINTER(ctypes.c_char), # sig
))
),
# CallStaticObjectMethod skipped because of varargs
# CallStaticObjectMethodV skipped because of varargs
('CallStaticObjectMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticBooleanMethod skipped because of varargs
# CallStaticBooleanMethodV skipped because of varargs
('CallStaticBooleanMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticByteMethod skipped because of varargs
# CallStaticByteMethodV skipped because of varargs
('CallStaticByteMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticCharMethod skipped because of varargs
# CallStaticCharMethodV skipped because of varargs
('CallStaticCharMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticShortMethod skipped because of varargs
# CallStaticShortMethodV skipped because of varargs
('CallStaticShortMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticIntMethod skipped because of varargs
# CallStaticIntMethodV skipped because of varargs
('CallStaticIntMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticLongMethod skipped because of varargs
# CallStaticLongMethodV skipped because of varargs
('CallStaticLongMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticFloatMethod skipped because of varargs
# CallStaticFloatMethodV skipped because of varargs
('CallStaticFloatMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticDoubleMethod skipped because of varargs
# CallStaticDoubleMethodV skipped because of varargs
('CallStaticDoubleMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
# CallStaticVoidMethod skipped because of varargs
# CallStaticVoidMethodV skipped because of varargs
('CallStaticVoidMethodA',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # cls
_jmethodID, # methodID
ctypes.POINTER(_jvalue), # args
))
),
('GetStaticFieldID',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(ctypes.c_char), # name
ctypes.POINTER(ctypes.c_char), # sig
))
),
('GetStaticObjectField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticBooleanField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticByteField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticCharField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticShortField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticIntField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticLongField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticFloatField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('GetStaticDoubleField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
))
),
('SetStaticObjectField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jobject, # value
))
),
('SetStaticBooleanField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jboolean, # value
))
),
('SetStaticByteField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jbyte, # value
))
),
('SetStaticCharField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jchar, # value
))
),
('SetStaticShortField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jshort, # value
))
),
('SetStaticIntField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jint, # value
))
),
('SetStaticLongField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jlong, # value
))
),
('SetStaticFloatField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jfloat, # value
))
),
('SetStaticDoubleField',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
_jfieldID, # fieldID
_jdouble, # value
))
),
('NewString',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(_jchar), # unicode
_jsize, # len
))
),
('GetStringLength',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
))
),
('GetStringChars',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleaseStringChars',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
ctypes.POINTER(_jchar), # chars
))
),
('NewStringUTF',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.c_char), # utf
))
),
('GetStringUTFLength',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
))
),
('GetStringUTFChars',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleaseStringUTFChars',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
ctypes.POINTER(ctypes.c_char), # chars
))
),
('GetArrayLength',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jarray, # array
))
),
('NewObjectArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
_jclass, # clazz
_jobject, # init
))
),
('GetObjectArrayElement',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobjectArray, # array
_jsize, # index
))
),
('SetObjectArrayElement',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobjectArray, # array
_jsize, # index
_jobject, # val
))
),
('NewBooleanArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewByteArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewCharArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewShortArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewIntArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewLongArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewFloatArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('NewDoubleArray',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jsize, # len
))
),
('GetBooleanArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbooleanArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetByteArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbyteArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetCharArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jcharArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetShortArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetIntArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetLongArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jlongArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetFloatArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jfloatArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('GetDoubleArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jdoubleArray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleaseBooleanArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbooleanArray, # array
ctypes.POINTER(_jboolean), # elems
_jint, # mode
))
),
('ReleaseByteArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbyteArray, # array
ctypes.POINTER(_jbyte), # elems
_jint, # mode
))
),
('ReleaseCharArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jcharArray, # array
ctypes.POINTER(_jchar), # elems
_jint, # mode
))
),
('ReleaseShortArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
ctypes.POINTER(_jshort), # elems
_jint, # mode
))
),
('ReleaseIntArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
ctypes.POINTER(_jint), # elems
_jint, # mode
))
),
('ReleaseLongArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jlongArray, # array
ctypes.POINTER(_jlong), # elems
_jint, # mode
))
),
('ReleaseFloatArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jfloatArray, # array
ctypes.POINTER(_jfloat), # elems
_jint, # mode
))
),
('ReleaseDoubleArrayElements',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jdoubleArray, # array
ctypes.POINTER(_jdouble), # elems
_jint, # mode
))
),
('GetBooleanArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbooleanArray, # array
_jsize, # start
_jsize, # l
ctypes.POINTER(_jboolean), # buf
))
),
('GetByteArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbyteArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jbyte), # buf
))
),
('GetCharArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jcharArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jchar), # buf
))
),
('GetShortArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jshort), # buf
))
),
('GetIntArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jint), # buf
))
),
('GetLongArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jlongArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jlong), # buf
))
),
('GetFloatArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jfloatArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jfloat), # buf
))
),
('GetDoubleArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jdoubleArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jdouble), # buf
))
),
('SetBooleanArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbooleanArray, # array
_jsize, # start
_jsize, # l
ctypes.POINTER(_jboolean), # buf
))
),
('SetByteArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jbyteArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jbyte), # buf
))
),
('SetCharArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jcharArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jchar), # buf
))
),
('SetShortArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jshort), # buf
))
),
('SetIntArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jintArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jint), # buf
))
),
('SetLongArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jlongArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jlong), # buf
))
),
('SetFloatArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jfloatArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jfloat), # buf
))
),
('SetDoubleArrayRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jdoubleArray, # array
_jsize, # start
_jsize, # len
ctypes.POINTER(_jdouble), # buf
))
),
('RegisterNatives',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
ctypes.POINTER(_JNINativeMethod), # methods
_jint, # nMethods
))
),
('UnregisterNatives',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jclass, # clazz
))
),
('MonitorEnter',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('MonitorExit',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('GetJavaVM',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.POINTER(ctypes.POINTER(_JavaVM)), # vm
))
),
('GetStringRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
_jsize, # start
_jsize, # len
ctypes.POINTER(_jchar), # buf
))
),
('GetStringUTFRegion',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # str
_jsize, # start
_jsize, # len
ctypes.POINTER(ctypes.c_char), # buf
))
),
('GetPrimitiveArrayCritical',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jarray, # array
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleasePrimitiveArrayCritical',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jarray, # array
ctypes.c_void_p, # carray
_jint, # mode
))
),
('GetStringCritical',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # string
ctypes.POINTER(_jboolean), # isCopy
))
),
('ReleaseStringCritical',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jstring, # string
ctypes.POINTER(_jchar), # cstring
))
),
('NewWeakGlobalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
('DeleteWeakGlobalRef',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jweak, # ref
))
),
('ExceptionCheck',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
))
),
('NewDirectByteBuffer',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
ctypes.c_void_p, # address
_jlong, # capacity
))
),
('GetDirectBufferAddress',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # buf
))
),
('GetDirectBufferCapacity',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # buf
))
),
('GetObjectRefType',
ctypes.POINTER(ctypes.CFUNCTYPE(
ctypes.POINTER(_JNIEnv), # env
_jobject, # obj
))
),
]
| apache-2.0 | 834,388,089,160,075,500 | 38.036602 | 65 | 0.387368 | false | 5.12466 | false | false | false |
theteam/django-theteamcommon | src/theteamcommon/middleware.py | 1 | 1267 | from django.conf import settings
from django.utils.http import urlquote
class ChangeContentType(object):
STATIC_CONTENT = [settings.MEDIA_URL,
settings.STATIC_URL,
settings.ADMIN_MEDIA_PREFIX,
]
FILE_ASSOCIATION = {'htc': 'text/x-component'}
def is_supported(self, path):
for p in self.STATIC_CONTENT:
if path.startswith(p):
return True
def process_response(self, request, response):
path = urlquote(request.get_full_path())
try:
extension = path.split('.')[-1]
except IndexError:
extension = None
if self.is_supported(path) and extension in self.FILE_ASSOCIATION:
response['Content-Type'] = self.FILE_ASSOCIATION[extension]
return response
class StagingMarquee(object):
def process_response(self, request, response):
content = response.content
index = content.upper().find('</BODY>')
if index == -1:
return response
marquee = "<div style='color:red;position:absolute;top:0;font-weight:bold;font-size:20px;'>STAGING</div>"
response.content = content[:index] + marquee + content[index:]
return response
| mit | 9,081,709,442,860,570,000 | 33.243243 | 113 | 0.604578 | false | 4.154098 | false | false | false |
gaso/users_statistics | report/report_users_by_month.py | 1 | 3125 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from report import report_sxw
from osv import osv
class report_parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_parser, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'cr':cr,
'uid': uid,
'general_info': self.general_info,
'tab_year': self.tab_year
})
def tab_year(self, obj):
cr = self.localcontext.get('cr')
uid = self.localcontext.get('uid')
users_active_obj = self.pool.get('users.active')
all_active_users_ids = users_active_obj.search(cr, uid,[])
all_active_users = users_active_obj.browse(cr, uid, all_active_users_ids)
tab_year = []
for active_user in all_active_users :
if active_user.year in tab_year :
print 'continue'
else :
tab_year.append(active_user.year)
return tab_year
def general_info(self, obj):
cr = self.localcontext.get('cr')
uid = self.localcontext.get('uid')
context = self.localcontext
db = cr.dbname
obj_user = self.pool.get('res.users')
users_active_obj = self.pool.get('users.active')
res_lang_obj = self.pool.get('res.lang')
user = obj_user.browse(cr, uid, uid, context=context)
obj_company = obj_user.browse(cr, uid, uid, context=context).company_id
company_name = obj_company.partner_id.name
lang_ids = res_lang_obj.search(cr, uid, [('code','=',context.get('lang'))])
if len(lang_ids) == 1 :
lang = res_lang_obj.browse(cr, uid, lang_ids[0])
format = lang.date_format
data = {
'company_name' : company_name,
'user' : user,
'database' : db,
'date_print' : time.strftime(format),
}
return data
report_sxw.report_sxw('report.users.active',
'users.active',
'addons/users_statistics/report/users_by_month.mako',
parser=report_parser) | gpl-2.0 | 1,735,793,882,997,414,100 | 34.522727 | 83 | 0.5648 | false | 3.96071 | false | false | false |
neutrons/web_reflectivity | web_reflectivity/fitting/catalog.py | 1 | 3476 | #pylint: disable=bare-except, invalid-name, too-many-nested-blocks, too-many-locals, too-many-branches
"""
Optional utilities to communicate with ONcat.
ONcat is an online data catalog used internally at ORNL.
@copyright: 2018 Oak Ridge National Laboratory
"""
import sys
import datetime
import logging
from django.conf import settings
try:
import pyoncat
HAVE_ONCAT = True
except:
HAVE_ONCAT = False
from fitting.models import CatalogCache
def decode_time(timestamp):
"""
Decode timestamp and return a datetime object
:param timestamp: timestamp to decode
"""
try:
tz_location = timestamp.rfind('+')
if tz_location < 0:
tz_location = timestamp.rfind('-')
if tz_location > 0:
date_time_str = timestamp[:tz_location]
# Get rid of fractions of a second
sec_location = date_time_str.rfind('.')
if sec_location > 0:
date_time_str = date_time_str[:sec_location]
return datetime.datetime.strptime(date_time_str, "%Y-%m-%dT%H:%M:%S")
except:
logging.error("Could not parse timestamp '%s': %s", timestamp, sys.exc_value)
return None
def get_run_info(instrument, run_number):
"""
Legacy issue:
Until the facility information is stored in the DB so that we can
retrieve the facility from it, we'll have to use the application
configuration.
:param str instrument: instrument name
:param str run_number: run number
:param str facility: facility name (SNS or HFIR)
"""
facility = 'SNS'
if hasattr(settings, 'FACILITY_INFO'):
facility = settings.FACILITY_INFO.get(instrument, 'SNS')
return _get_run_info(instrument, run_number, facility)
def _get_run_info(instrument, run_number, facility='SNS'):
"""
Get ONCat info for the specified run
Notes: At the moment we do not catalog reduced data
:param str instrument: instrument short name
:param str run_number: run number
:param str facility: facility name (SNS or HFIR)
"""
run_info = {}
cached_entry = [] #CatalogCache.objects.filter(data_path="%s/%s" % (instrument, run_number))
if len(cached_entry) > 0:
return dict(title=cached_entry[0].title, proposal=cached_entry[0].proposal)
if not HAVE_ONCAT:
return run_info
try:
oncat = pyoncat.ONCat(
settings.CATALOG_URL,
# Here we're using the machine-to-machine "Client Credentials" flow,
# which requires a client ID and secret, but no *user* credentials.
flow = pyoncat.CLIENT_CREDENTIALS_FLOW,
client_id = settings.CATALOG_ID,
client_secret = settings.CATALOG_SECRET,
)
oncat.login()
datafiles = oncat.Datafile.list(
facility = facility,
instrument = instrument.upper(),
projection = ['experiment', 'location', 'metadata.entry.title'],
tags = ['type/raw'],
ranges_q = 'indexed.run_number:%s' % str(run_number)
)
if datafiles:
run_info['title'] = datafiles[0].metadata.get('entry', {}).get('title', None)
run_info['proposal'] = datafiles[0].experiment
run_info['location'] = datafiles[0].location
except:
logging.error("Communication with ONCat server failed: %s", sys.exc_value)
return run_info
| apache-2.0 | 675,756,985,683,754,000 | 34.469388 | 102 | 0.61939 | false | 3.95 | false | false | false |
ah744/ScaffCC_RKQC | llvm/test/Scripts/coff-dump.py | 10 | 16172 | #!/usr/bin/env python
#===-- coff-dump.py - COFF object file dump utility-------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
#
# COFF File Definition
#
def string_table_entry (offset):
return ('ptr', '+ + PointerToSymbolTable * NumberOfSymbols 18 %s' % offset, ('scalar', 'cstr', '%s'))
def secname(value):
if value[0] == '/':
return string_table_entry(value[1:].rstrip('\0'))
else:
return '%s'
def symname(value):
parts = struct.unpack("<2L", value)
if parts[0] == 0:
return string_table_entry(parts[1])
else:
return '%s'
file = ('struct', [
('MachineType', ('enum', '<H', '0x%X', {
0x0: 'IMAGE_FILE_MACHINE_UNKNOWN',
0x1d3: 'IMAGE_FILE_MACHINE_AM33',
0x8664: 'IMAGE_FILE_MACHINE_AMD64',
0x1c0: 'IMAGE_FILE_MACHINE_ARM',
0xebc: 'IMAGE_FILE_MACHINE_EBC',
0x14c: 'IMAGE_FILE_MACHINE_I386',
0x200: 'IMAGE_FILE_MACHINE_IA64',
0x904: 'IMAGE_FILE_MACHINE_M32R',
0x266: 'IMAGE_FILE_MACHINE_MIPS16',
0x366: 'IMAGE_FILE_MACHINE_MIPSFPU',
0x466: 'IMAGE_FILE_MACHINE_MIPSFPU16',
0x1f0: 'IMAGE_FILE_MACHINE_POWERPC',
0x1f1: 'IMAGE_FILE_MACHINE_POWERPCFP',
0x166: 'IMAGE_FILE_MACHINE_R4000',
0x1a2: 'IMAGE_FILE_MACHINE_SH3',
0x1a3: 'IMAGE_FILE_MACHINE_SH3DSP',
0x1a6: 'IMAGE_FILE_MACHINE_SH4',
0x1a8: 'IMAGE_FILE_MACHINE_SH5',
0x1c2: 'IMAGE_FILE_MACHINE_THUMB',
0x169: 'IMAGE_FILE_MACHINE_WCEMIPSV2',
})),
('NumberOfSections', ('scalar', '<H', '%d')),
('TimeDateStamp', ('scalar', '<L', '%d')),
('PointerToSymbolTable', ('scalar', '<L', '0x%0X')),
('NumberOfSymbols', ('scalar', '<L', '%d')),
('SizeOfOptionalHeader', ('scalar', '<H', '%d')),
('Characteristics', ('flags', '<H', '0x%x', [
(0x0001, 'IMAGE_FILE_RELOCS_STRIPPED', ),
(0x0002, 'IMAGE_FILE_EXECUTABLE_IMAGE', ),
(0x0004, 'IMAGE_FILE_LINE_NUMS_STRIPPED', ),
(0x0008, 'IMAGE_FILE_LOCAL_SYMS_STRIPPED', ),
(0x0010, 'IMAGE_FILE_AGGRESSIVE_WS_TRIM', ),
(0x0020, 'IMAGE_FILE_LARGE_ADDRESS_AWARE', ),
(0x0080, 'IMAGE_FILE_BYTES_REVERSED_LO', ),
(0x0100, 'IMAGE_FILE_32BIT_MACHINE', ),
(0x0200, 'IMAGE_FILE_DEBUG_STRIPPED', ),
(0x0400, 'IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', ),
(0x0800, 'IMAGE_FILE_NET_RUN_FROM_SWAP', ),
(0x1000, 'IMAGE_FILE_SYSTEM', ),
(0x2000, 'IMAGE_FILE_DLL', ),
(0x4000, 'IMAGE_FILE_UP_SYSTEM_ONLY', ),
(0x8000, 'IMAGE_FILE_BYTES_REVERSED_HI', ),
])),
('Sections', ('array', '1', 'NumberOfSections', ('struct', [
('Name', ('scalar', '<8s', secname)),
('VirtualSize', ('scalar', '<L', '%d' )),
('VirtualAddress', ('scalar', '<L', '%d' )),
('SizeOfRawData', ('scalar', '<L', '%d' )),
('PointerToRawData', ('scalar', '<L', '0x%X' )),
('PointerToRelocations', ('scalar', '<L', '0x%X' )),
('PointerToLineNumbers', ('scalar', '<L', '0x%X' )),
('NumberOfRelocations', ('scalar', '<H', '%d' )),
('NumberOfLineNumbers', ('scalar', '<H', '%d' )),
('Charateristics', ('flags', '<L', '0x%X', [
(0x00000008, 'IMAGE_SCN_TYPE_NO_PAD'),
(0x00000020, 'IMAGE_SCN_CNT_CODE'),
(0x00000040, 'IMAGE_SCN_CNT_INITIALIZED_DATA'),
(0x00000080, 'IMAGE_SCN_CNT_UNINITIALIZED_DATA'),
(0x00000100, 'IMAGE_SCN_LNK_OTHER'),
(0x00000200, 'IMAGE_SCN_LNK_INFO'),
(0x00000800, 'IMAGE_SCN_LNK_REMOVE'),
(0x00001000, 'IMAGE_SCN_LNK_COMDAT'),
(0x00008000, 'IMAGE_SCN_GPREL'),
(0x00020000, 'IMAGE_SCN_MEM_PURGEABLE'),
(0x00020000, 'IMAGE_SCN_MEM_16BIT'),
(0x00040000, 'IMAGE_SCN_MEM_LOCKED'),
(0x00080000, 'IMAGE_SCN_MEM_PRELOAD'),
(0x00F00000, 'IMAGE_SCN_ALIGN', {
0x00100000: 'IMAGE_SCN_ALIGN_1BYTES',
0x00200000: 'IMAGE_SCN_ALIGN_2BYTES',
0x00300000: 'IMAGE_SCN_ALIGN_4BYTES',
0x00400000: 'IMAGE_SCN_ALIGN_8BYTES',
0x00500000: 'IMAGE_SCN_ALIGN_16BYTES',
0x00600000: 'IMAGE_SCN_ALIGN_32BYTES',
0x00700000: 'IMAGE_SCN_ALIGN_64BYTES',
0x00800000: 'IMAGE_SCN_ALIGN_128BYTES',
0x00900000: 'IMAGE_SCN_ALIGN_256BYTES',
0x00A00000: 'IMAGE_SCN_ALIGN_512BYTES',
0x00B00000: 'IMAGE_SCN_ALIGN_1024BYTES',
0x00C00000: 'IMAGE_SCN_ALIGN_2048BYTES',
0x00D00000: 'IMAGE_SCN_ALIGN_4096BYTES',
0x00E00000: 'IMAGE_SCN_ALIGN_8192BYTES',
}),
(0x01000000, 'IMAGE_SCN_LNK_NRELOC_OVFL'),
(0x02000000, 'IMAGE_SCN_MEM_DISCARDABLE'),
(0x04000000, 'IMAGE_SCN_MEM_NOT_CACHED'),
(0x08000000, 'IMAGE_SCN_MEM_NOT_PAGED'),
(0x10000000, 'IMAGE_SCN_MEM_SHARED'),
(0x20000000, 'IMAGE_SCN_MEM_EXECUTE'),
(0x40000000, 'IMAGE_SCN_MEM_READ'),
(0x80000000, 'IMAGE_SCN_MEM_WRITE'),
])),
('SectionData', ('ptr', 'PointerToRawData', ('blob', 'SizeOfRawData'))),
('Relocations', ('ptr', 'PointerToRelocations', ('array', '0', 'NumberOfRelocations', ('struct', [
('VirtualAddress', ('scalar', '<L', '0x%X')),
('SymbolTableIndex', ('scalar', '<L', '%d' )),
('Type', ('enum', '<H', '%d', ('MachineType', {
0x14c: {
0x0000: 'IMAGE_REL_I386_ABSOLUTE',
0x0001: 'IMAGE_REL_I386_DIR16',
0x0002: 'IMAGE_REL_I386_REL16',
0x0006: 'IMAGE_REL_I386_DIR32',
0x0007: 'IMAGE_REL_I386_DIR32NB',
0x0009: 'IMAGE_REL_I386_SEG12',
0x000A: 'IMAGE_REL_I386_SECTION',
0x000B: 'IMAGE_REL_I386_SECREL',
0x000C: 'IMAGE_REL_I386_TOKEN',
0x000D: 'IMAGE_REL_I386_SECREL7',
0x0014: 'IMAGE_REL_I386_REL32',
},
0x8664: {
0x0000: 'IMAGE_REL_AMD64_ABSOLUTE',
0x0001: 'IMAGE_REL_AMD64_ADDR64',
0x0002: 'IMAGE_REL_AMD64_ADDR32',
0x0003: 'IMAGE_REL_AMD64_ADDR32NB',
0x0004: 'IMAGE_REL_AMD64_REL32',
0x0005: 'IMAGE_REL_AMD64_REL32_1',
0x0006: 'IMAGE_REL_AMD64_REL32_2',
0x0007: 'IMAGE_REL_AMD64_REL32_3',
0x0008: 'IMAGE_REL_AMD64_REL32_4',
0x0009: 'IMAGE_REL_AMD64_REL32_5',
0x000A: 'IMAGE_REL_AMD64_SECTION',
0x000B: 'IMAGE_REL_AMD64_SECREL',
0x000C: 'IMAGE_REL_AMD64_SECREL7',
0x000D: 'IMAGE_REL_AMD64_TOKEN',
0x000E: 'IMAGE_REL_AMD64_SREL32',
0x000F: 'IMAGE_REL_AMD64_PAIR',
0x0010: 'IMAGE_REL_AMD64_SSPAN32',
},
}))),
('SymbolName', ('ptr', '+ PointerToSymbolTable * SymbolTableIndex 18', ('scalar', '<8s', symname)))
])))),
]))),
('Symbols', ('ptr', 'PointerToSymbolTable', ('byte-array', '18', '* NumberOfSymbols 18', ('struct', [
('Name', ('scalar', '<8s', symname)),
('Value', ('scalar', '<L', '%d' )),
('SectionNumber', ('scalar', '<H', '%d' )),
('_Type', ('scalar', '<H', None )),
('SimpleType', ('enum', '& _Type 15', '%d', {
0: 'IMAGE_SYM_TYPE_NULL',
1: 'IMAGE_SYM_TYPE_VOID',
2: 'IMAGE_SYM_TYPE_CHAR',
3: 'IMAGE_SYM_TYPE_SHORT',
4: 'IMAGE_SYM_TYPE_INT',
5: 'IMAGE_SYM_TYPE_LONG',
6: 'IMAGE_SYM_TYPE_FLOAT',
7: 'IMAGE_SYM_TYPE_DOUBLE',
8: 'IMAGE_SYM_TYPE_STRUCT',
9: 'IMAGE_SYM_TYPE_UNION',
10: 'IMAGE_SYM_TYPE_ENUM',
11: 'IMAGE_SYM_TYPE_MOE',
12: 'IMAGE_SYM_TYPE_BYTE',
13: 'IMAGE_SYM_TYPE_WORD',
14: 'IMAGE_SYM_TYPE_UINT',
15: 'IMAGE_SYM_TYPE_DWORD',
})), # (Type & 0xF0) >> 4
('ComplexType', ('enum', '>> & _Type 240 4', '%d', {
0: 'IMAGE_SYM_DTYPE_NULL',
1: 'IMAGE_SYM_DTYPE_POINTER',
2: 'IMAGE_SYM_DTYPE_FUNCTION',
3: 'IMAGE_SYM_DTYPE_ARRAY',
})),
('StorageClass', ('enum', '<B', '%d', {
-1: 'IMAGE_SYM_CLASS_END_OF_FUNCTION',
0: 'IMAGE_SYM_CLASS_NULL',
1: 'IMAGE_SYM_CLASS_AUTOMATIC',
2: 'IMAGE_SYM_CLASS_EXTERNAL',
3: 'IMAGE_SYM_CLASS_STATIC',
4: 'IMAGE_SYM_CLASS_REGISTER',
5: 'IMAGE_SYM_CLASS_EXTERNAL_DEF',
6: 'IMAGE_SYM_CLASS_LABEL',
7: 'IMAGE_SYM_CLASS_UNDEFINED_LABEL',
8: 'IMAGE_SYM_CLASS_MEMBER_OF_STRUCT',
9: 'IMAGE_SYM_CLASS_ARGUMENT',
10: 'IMAGE_SYM_CLASS_STRUCT_TAG',
11: 'IMAGE_SYM_CLASS_MEMBER_OF_UNION',
12: 'IMAGE_SYM_CLASS_UNION_TAG',
13: 'IMAGE_SYM_CLASS_TYPE_DEFINITION',
14: 'IMAGE_SYM_CLASS_UNDEFINED_STATIC',
15: 'IMAGE_SYM_CLASS_ENUM_TAG',
16: 'IMAGE_SYM_CLASS_MEMBER_OF_ENUM',
17: 'IMAGE_SYM_CLASS_REGISTER_PARAM',
18: 'IMAGE_SYM_CLASS_BIT_FIELD',
100: 'IMAGE_SYM_CLASS_BLOCK',
101: 'IMAGE_SYM_CLASS_FUNCTION',
102: 'IMAGE_SYM_CLASS_END_OF_STRUCT',
103: 'IMAGE_SYM_CLASS_FILE',
104: 'IMAGE_SYM_CLASS_SECTION',
105: 'IMAGE_SYM_CLASS_WEAK_EXTERNAL',
107: 'IMAGE_SYM_CLASS_CLR_TOKEN',
})),
('NumberOfAuxSymbols', ('scalar', '<B', '%d' )),
('AuxillaryData', ('blob', '* NumberOfAuxSymbols 18')),
])))),
])
#
# Definition Interpreter
#
import sys, types, struct, re
Input = None
Stack = []
Fields = {}
Indent = 0
NewLine = True
def indent():
global Indent
Indent += 1
def dedent():
global Indent
Indent -= 1
def write(input):
global NewLine
output = ""
for char in input:
if NewLine:
output += Indent * ' '
NewLine = False
output += char
if char == '\n':
NewLine = True
sys.stdout.write(output)
def read(format):
return struct.unpack(format, Input.read(struct.calcsize(format)))
def read_cstr():
output = ""
while True:
char = Input.read(1)
if len(char) == 0:
raise RuntimeError ("EOF while reading cstr")
if char == '\0':
break
output += char
return output
def push_pos(seek_to = None):
Stack [0:0] = [Input.tell()]
if seek_to:
Input.seek(seek_to)
def pop_pos():
assert(len(Stack) > 0)
Input.seek(Stack[0])
del Stack[0]
def print_binary_data(size):
value = ""
while size > 0:
if size >= 16:
data = Input.read(16)
size -= 16
else:
data = Input.read(size)
size = 0
value += data
bytes = ""
text = ""
for index in xrange(16):
if index < len(data):
if index == 8:
bytes += "- "
ch = ord(data[index])
bytes += "%02X " % ch
if ch >= 0x20 and ch <= 0x7F:
text += data[index]
else:
text += "."
else:
if index == 8:
bytes += " "
bytes += " "
write("%s|%s|\n" % (bytes, text))
return value
idlit = re.compile("[a-zA-Z_][a-zA-Z0-9_-]*")
numlit = re.compile("[0-9]+")
def read_value(expr):
input = iter(expr.split())
def eval():
token = input.next()
if expr == 'cstr':
return read_cstr()
if expr == 'true':
return True
if expr == 'false':
return False
if token == '+':
return eval() + eval()
if token == '-':
return eval() - eval()
if token == '*':
return eval() * eval()
if token == '/':
return eval() / eval()
if token == '&':
return eval() & eval()
if token == '|':
return eval() | eval()
if token == '>>':
return eval() >> eval()
if token == '<<':
return eval() << eval()
if len(token) > 1 and token[0] in ('=', '@', '<', '!', '>'):
val = read(expr)
assert(len(val) == 1)
return val[0]
if idlit.match(token):
return Fields[token]
if numlit.match(token):
return int(token)
raise RuntimeError("unexpected token %s" % repr(token))
value = eval()
try:
input.next()
except StopIteration:
return value
raise RuntimeError("unexpected input at end of expression")
def write_value(format,value):
format_type = type(format)
if format_type is types.StringType:
write(format % value)
elif format_type is types.FunctionType:
write_value(format(value), value)
elif format_type is types.TupleType:
Fields['this'] = value
handle_element(format)
elif format_type is types.NoneType:
pass
else:
raise RuntimeError("unexpected type: %s" % repr(format_type))
def handle_scalar(entry):
iformat = entry[1]
oformat = entry[2]
value = read_value(iformat)
write_value(oformat, value)
return value
def handle_enum(entry):
iformat = entry[1]
oformat = entry[2]
definitions = entry[3]
value = read_value(iformat)
if type(definitions) is types.TupleType:
selector = read_value(definitions[0])
definitions = definitions[1][selector]
if value in definitions:
description = definitions[value]
else:
description = "unknown"
write("%s (" % description)
write_value(oformat, value)
write(")")
return value
def handle_flags(entry):
iformat = entry[1]
oformat = entry[2]
definitions = entry[3]
value = read_value(iformat)
write_value(oformat, value)
indent()
for entry in definitions:
mask = entry[0]
name = entry[1]
if len (entry) == 3:
map = entry[2]
selection = value & mask
if selection in map:
write("\n%s" % map[selection])
else:
write("\n%s <%d>" % (name, selection))
elif len(entry) == 2:
if value & mask != 0:
write("\n%s" % name)
dedent()
return value
def handle_struct(entry):
global Fields
members = entry[1]
newFields = {}
write("{\n");
indent()
for member in members:
name = member[0]
type = member[1]
if name[0] != "_":
write("%s = " % name.ljust(24))
value = handle_element(type)
if name[0] != "_":
write("\n")
Fields[name] = value
newFields[name] = value
dedent()
write("}")
return newFields
def handle_array(entry):
start_index = entry[1]
length = entry[2]
element = entry[3]
newItems = []
write("[\n")
indent()
start_index = read_value(start_index)
value = read_value(length)
for index in xrange(value):
write("%d = " % (index + start_index))
value = handle_element(element)
write("\n")
newItems.append(value)
dedent()
write("]")
return newItems
def handle_byte_array(entry):
ent_size = entry[1]
length = entry[2]
element = entry[3]
newItems = []
write("[\n")
indent()
item_size = read_value(ent_size)
value = read_value(length)
end_of_array = Input.tell() + value
prev_loc = Input.tell()
index = 0
while Input.tell() < end_of_array:
write("%d = " % index)
value = handle_element(element)
write("\n")
newItems.append(value)
index += (Input.tell() - prev_loc) / item_size
prev_loc = Input.tell()
dedent()
write("]")
return newItems
def handle_ptr(entry):
offset = entry[1]
element = entry[2]
value = None
offset = read_value(offset)
if offset != 0:
push_pos(offset)
value = handle_element(element)
pop_pos()
else:
write("None")
return value
def handle_blob(entry):
length = entry[1]
write("\n")
indent()
value = print_binary_data(read_value(length))
dedent()
return value
def handle_element(entry):
handlers = {
'struct': handle_struct,
'scalar': handle_scalar,
'enum': handle_enum,
'flags': handle_flags,
'ptr': handle_ptr,
'blob': handle_blob,
'array': handle_array,
'byte-array': handle_byte_array,
}
if not entry[0] in handlers:
raise RuntimeError ("unexpected type '%s'" % str (entry[0]))
return handlers[entry[0]](entry)
if len(sys.argv) <= 1 or sys.argv[1] == '-':
import StringIO
Input = StringIO.StringIO(sys.stdin.read())
else:
Input = open (sys.argv[1], "rb")
try:
handle_element(file)
finally:
Input.close()
Input = None
| bsd-2-clause | 8,050,534,616,835,100,000 | 26.410169 | 112 | 0.554353 | false | 2.99426 | false | false | false |
michaelrup/scancode-toolkit | src/textcode/strings.py | 5 | 8700 | #
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import string
import re
from commoncode.text import toascii
"""
Extract raw ASCII strings from (possibly) binary strings.
Both plain ASCII and UTF-16-LE-encoded (aka. wide) strings are extracted.
The later is found typically in some Windows PEs.
This is more or less similar to what GNU Binutils strings does.
Does not recognize and extract non-ASCII characters
Some alternative and references:
https://github.com/fireeye/flare-floss (also included)
http://stackoverflow.com/questions/10637055/how-do-i-extract-unicode-character-sequences-from-an-mz-executable-file
http://stackoverflow.com/questions/1324067/how-do-i-get-str-translate-to-work-with-unicode-strings
http://stackoverflow.com/questions/11066400/remove-punctuation-from-unicode-formatted-strings/11066687#11066687
https://github.com/TakahiroHaruyama/openioc_scan/blob/d7e8c5962f77f55f9a5d34dbfd0799f8c57eff7f/openioc_scan.py#L184
"""
# at least three characters are needed to consider some blob as a good string
MIN_LEN = 3
def strings_from_file(location, buff_size=1024 * 1024, ascii=False, clean=True, min_len=MIN_LEN):
"""
Yield unicode strings made only of ASCII characters found in file at location.
Process the file in chunks (to limit memory usage). If ascii is True, strings
are converted to plain ASCII "str or byte" strings instead of unicode.
"""
min_len = MIN_LEN
with open(location, 'rb') as f:
while 1:
buf = f.read(buff_size)
if not buf:
break
for s in strings_from_string(buf, clean=clean, min_len=min_len):
if ascii:
s = toascii(s)
s = s.strip()
if not s or len(s) < min_len:
continue
yield s
# Extracted text is digit, letters, punctuation and white spaces
punctuation = re.escape(string.punctuation)
whitespaces = ' \t\n\r'
printable = 'A-Za-z0-9' + whitespaces + punctuation
null_byte = '\x00'
ascii_strings = re.compile(
# plain ASCII is a sequence of printable of a minimum length
'('
+ '[' + printable + ']'
+ '{' + str(MIN_LEN) + ',}'
+ ')'
# or utf-16-le-encoded ASCII is a sequence of ASCII+null byte
+ '|'
+ '('
+ '(?:' + '[' + printable + ']' + null_byte + ')'
+ '{' + str(MIN_LEN) + ',}'
+ ')'
).finditer
def strings_from_string(binary_string, clean=False, min_len=0):
"""
Yield strings extracted from a (possibly binary) string. The strings are ASCII
printable characters only. If clean is True, also clean and filter short and
repeated strings.
Note: we do not keep the offset of where a string was found (e.g. match.start).
"""
for match in ascii_strings(binary_string):
s = decode(match.group())
if s:
if clean:
for ss in clean_string(s, min_len=min_len):
yield ss
else:
yield s
def string_from_string(binary_string, clean=False, min_len=0):
"""
Return a unicode string string extracted from a (possibly binary) string,
removing all non printable characters.
"""
return u' '.join(strings_from_string(binary_string, clean, min_len))
def decode(s):
"""
Return a decoded unicode string from s or None if the string cannot be decoded.
"""
if '\x00' in s:
try:
return s.decode('utf-16-le')
except UnicodeDecodeError:
pass
else:
return s.decode('ascii')
remove_junk = re.compile('[' + punctuation + whitespaces + ']').sub
def clean_string(s, min_len=MIN_LEN,
junk=string.punctuation + string.digits + string.whitespace):
"""
Yield cleaned strings from string s if it passes some validity tests:
* not made of white spaces
* with a minimum length
* not made of only two repeated character
* not made of only of digits, punctuations and whitespaces
"""
s = s.strip()
def valid(st):
st = remove_junk('', st)
return (st and len(st) >= min_len
# ignore character repeats, e.g need more than two unique characters
and len(set(st.lower())) > 1
# ignore string made only of digit or punctuation
and not all(c in junk for c in st))
if valid(s):
yield s.strip()
#####################################################################################
# TODO: Strings classification
# Classify strings, detect junk, detect paths, symbols, demangle symbols, unescape
# http://code.activestate.com/recipes/466293-efficient-character-escapes-decoding/?in=user-2382677
def is_file(s):
"""
Return True if s looks like a file name.
Exmaple: dsdsd.dll
"""
filename = re.compile('^[\w_\-]+\.\w{1,4}$', re.IGNORECASE).match
return filename(s)
def is_shared_object(s):
"""
Return True if s looks like a shared object file.
Example: librt.so.1
"""
so = re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE).match
return so(s)
def is_posix_path(s):
"""
Return True if s looks like a posix path.
Example: /usr/lib/librt.so.1 or /usr/lib
"""
# TODO: implement me
posix = re.compile('^/[\w_\-].*$', re.IGNORECASE).match
posix(s)
return False
def is_relative_path(s):
"""
Return True if s looks like a relative posix path.
Example: usr/lib/librt.so.1 or ../usr/lib
"""
relative = re.compile('^(?:([^/]|\.\.)[\w_\-]+/.*$', re.IGNORECASE).match
return relative(s)
def is_win_path(s):
"""
Return True if s looks like a win path.
Example: c:\usr\lib\librt.so.1.
"""
winpath = re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE).match
return winpath(s)
def is_c_source(s):
"""
Return True if s looks like a C source path.
Example: this.c
FIXME: should get actual algo from contenttype.
"""
return s.endswith(('.c', '.cpp', '.hpp', '.h'))
def is_java_source(s):
"""
Return True if s looks like a Java source path.
Example: this.java
FIXME: should get actual algo from contenttype.
"""
return s.endswith(('.java', '.jsp', '.aj',))
def is_glibc_ref(s):
"""
Return True if s looks like a reference to GLIBC as typically found in
Elfs.
"""
return '@@GLIBC' in s
def is_java_ref(s):
"""
Return True if s looks like a reference to a java class or package in a
class file.
"""
jref = re.compile('^.*$', re.IGNORECASE).match
# TODO: implement me
jref(s)
return False
def is_win_guid(s):
"""
Return True if s looks like a windows GUID/APPID/CLSID.
"""
guid = re.compile('"\{[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}\}"', re.IGNORECASE).match
# TODO: implement me
guid(s)
return False
class BinaryStringsClassifier(object):
"""
Classify extracted strings as good or bad/junk.
The types of strings that are recognized include:
file
file_path
junk
text
"""
# TODO: Implement me
if __name__ == '__main__':
# also usable a simple command line script
import sys
location = sys.argv[1]
for s in strings_from_file(location):
print(s)
| apache-2.0 | -2,972,077,459,978,382,300 | 30.407942 | 115 | 0.635172 | false | 3.62349 | false | false | false |
KonstantinShemyak/python-javatools | javatools/crypto.py | 2 | 6109 | # This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
"""
Cryptography-related functions for handling JAR signature block files.
:author: Konstantin Shemyak <konstantin@shemyak.com>
:license: LGPL v.3
"""
from M2Crypto import SMIME, X509, BIO, RSA, DSA, EC, m2
class CannotFindKeyTypeError(Exception):
"""
Failed to determine the type of the private key.
"""
pass
class SignatureBlockVerificationError(Exception):
"""
The Signature Block File verification failed.
"""
pass
def private_key_type(key_file):
"""
Determines type of the private key: RSA, DSA, EC.
:param key_file: file path
:type key_file: str
:return: one of "RSA", "DSA" or "EC"
:except CannotFindKeyTypeError
"""
keytypes = (("RSA", RSA), ("DSA", DSA), ("EC", EC))
for key, ktype in keytypes:
try:
ktype.load_key(key_file)
except (RSA.RSAError, DSA.DSAError, ValueError):
continue
else:
return key
else:
raise CannotFindKeyTypeError()
def create_signature_block(openssl_digest, certificate, private_key,
extra_certs, data):
"""
Produces a signature block for the data.
Reference
---------
http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html#Digital_Signatures
Note: Oracle does not specify the content of the "signature
file block", friendly saying that "These are binary files
not intended to be interpreted by humans".
:param openssl_digest: alrogithm known to OpenSSL used to digest the data
:type openssl_digest: str
:param certificate: filename of the certificate file (PEM format)
:type certificate: str
:param private_key:filename of private key used to sign (PEM format)
:type private_key: str
:param extra_certs: additional certificates to embed into the signature (PEM format)
:type extra_certs: array of filenames
:param data: the content to be signed
:type data: bytes
:returns: content of the signature block file as produced by jarsigner
:rtype: bytes
""" # noqa
smime = SMIME.SMIME()
with BIO.openfile(private_key) as k, BIO.openfile(certificate) as c:
smime.load_key_bio(k, c)
if extra_certs is not None:
# Could we use just X509.new_stack_from_der() instead?
stack = X509.X509_Stack()
for cert in extra_certs:
stack.push(X509.load_cert(cert))
smime.set_x509_stack(stack)
pkcs7 = smime.sign(BIO.MemoryBuffer(data),
algo=openssl_digest,
flags=(SMIME.PKCS7_BINARY |
SMIME.PKCS7_DETACHED |
SMIME.PKCS7_NOATTR))
tmp = BIO.MemoryBuffer()
pkcs7.write_der(tmp)
return tmp.read()
def ignore_missing_email_protection_eku_cb(ok, ctx):
"""
For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify().
The latter requires that ExtendedKeyUsage extension, if present,
contains 'emailProtection' OID. (Is it because S/MIME is/was the
primary use case for PKCS7?)
We do not want to fail the verification in this case. At present,
M2Crypto lacks possibility of removing or modifying an existing
extension. Let's assign a custom verification callback.
"""
# The error we want to ignore is indicated by X509_V_ERR_INVALID_PURPOSE.
err = ctx.get_error()
if err != m2.X509_V_ERR_INVALID_PURPOSE:
return ok
# PKCS7_verify() has this requriement only for the signing certificate.
# Do not modify the behavior for certificates upper in the chain.
if ctx.get_error_depth() > 0:
return ok
# There is another cause of ERR_INVALID_PURPOSE: incompatible keyUsage.
# Do not modify the default behavior in this case.
cert = ctx.get_current_cert()
try:
key_usage = cert.get_ext('keyUsage').get_value()
if 'digitalSignature' not in key_usage \
and 'nonRepudiation' not in key_usage:
return ok
except LookupError:
pass
# Here, keyUsage is either absent, or contains the needed bit(s).
# So ERR_INVALID_PURPOSE is caused by EKU not containing 'emailProtection'.
# Ignore this error.
return 1
def verify_signature_block(certificate_file, content, signature):
"""
Verifies the 'signature' over the 'content', trusting the
'certificate'.
:param certificate_file: the trusted certificate (PEM format)
:type certificate_file: str
:param content: The signature should match this content
:type content: str
:param signature: data (DER format) subject to check
:type signature: str
:return None if the signature validates.
:exception SignatureBlockVerificationError
"""
sig_bio = BIO.MemoryBuffer(signature)
pkcs7 = SMIME.PKCS7(m2.pkcs7_read_bio_der(sig_bio._ptr()), 1)
signers_cert_stack = pkcs7.get0_signers(X509.X509_Stack())
trusted_cert_store = X509.X509_Store()
trusted_cert_store.set_verify_cb(ignore_missing_email_protection_eku_cb)
trusted_cert_store.load_info(certificate_file)
smime = SMIME.SMIME()
smime.set_x509_stack(signers_cert_stack)
smime.set_x509_store(trusted_cert_store)
data_bio = BIO.MemoryBuffer(content)
try:
smime.verify(pkcs7, data_bio)
except SMIME.PKCS7_Error as message:
raise SignatureBlockVerificationError(message)
else:
return None
#
# The end.
| lgpl-3.0 | 540,998,306,147,703,550 | 31.668449 | 89 | 0.671141 | false | 3.770988 | false | false | false |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractKobatoChanDaiSukiScan.py | 1 | 9274 | def extractKobatoChanDaiSukiScan(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Lookism' in item['tags']:
return None
if 'webtoon' in item['tags']:
return None
if '*Announcements*' in item['tags']:
return None
if '*STAFF ONLY*' in item['tags']:
return None
tagmap = [
("Can't Stop Craving Potions Again and Again", "Can't Stop Craving Potions Again and Again", 'translated'),
("Can't Stop Craving Potions", "Can't Stop Craving Potions", 'translated'),
("Royal Roader on My Own", "Royal Roader on My Own", 'translated'),
('A Bird That Drinks Tears', 'A Bird That Drinks Tears', 'translated'),
('All Things Wrong', 'Doing All Things Wrong And Somehow Becoming The Best In The Game', 'translated'),
('Cheat Skill: Sleep Learning', 'Cheat Skill: Sleep Learning', 'translated'),
('Coder Lee YongHo', 'Coder Lee YongHo', 'translated'),
('FFF-Class Trashero', 'FFF-Class Trashero', 'translated'),
('Dragon Poor', 'Dragon Poor', 'translated'),
('Everyone Else is a Returnee', 'Everyone Else is a Returnee', 'translated'),
('God of Cooking', 'God of Cooking', 'translated'),
('God of Crime', 'God of Crime', 'translated'),
('God of Music', 'God of Music', 'translated'),
('God of Thunder', 'God of Thunder', 'translated'),
('God-level Bodyguard in the City', 'God-level Bodyguard in the City', 'translated'),
('Green Skin', 'Green Skin', 'translated'),
('I am the monarch', 'I am the Monarch', 'translated'),
('Kenkyo kenjitsu o motto ni ikite orimasu!', 'Kenkyo, Kenjitsu o Motto ni Ikite Orimasu!', 'translated'),
('Life of the Damned', 'Life of the Damned', 'translated'),
('Forest of Funerals', 'Forest of Funerals', 'translated'),
('Link the Orc', 'Link the Orc', 'translated'),
('maou no hajimekata', 'Maou no Hajimekata', 'translated'),
('Miracle Drawing!', 'Miracle Drawing!', 'translated'),
('Omni Genius', 'Omni Genius', 'translated'),
('Omocha no Kyousou-sama', 'Omocha no Kyousou-sama', 'translated'),
('One Man Army', 'One Man Army', 'translated'),
('Reincarnator', 'Reincarnator', 'translated'),
('Rise Strongest Warrior', 'Rise Strongest Warrior', 'translated'),
('Solo Clear', 'Solo Clear', 'translated'),
('Survival World RPG', 'Survival World RPG', 'translated'),
('Ten Thousand Heaven Controlling Sword', 'Ten Thousand Heaven Controlling Sword', 'translated'),
('The Bird That Drinks Tears', 'The Bird That Drinks Tears', 'translated'),
('The Sorcerer Laughs in the Mirror', 'The Sorcerer Laughs in the Mirror', 'translated'),
('The Stone of Days', 'The Stone of Days', 'translated'),
('The Strongest System', 'The Strongest System', 'translated'),
('Wagahai no Kare wa Baka de aru', 'Wagahai no Kare wa Baka de aru', 'translated'),
('When The Star Flutters', 'When The Star Flutters', 'translated'),
('Magician of Insa-Dong', 'Magician of Insa-Dong', 'translated'),
("Hero", "Hero", 'oel'),
("Immortal Ascension Tower", "Immortal Ascension Tower", 'oel'),
("The Overlord's Elite is now a Human?!", "The Overlord's Elite is now a Human?!", 'oel'),
("Titan's Throne", "Titan's Throne", 'oel'),
('Conquest', 'Conquest', 'oel'),
('The Empyrean Nethervoid', 'The Empyrean Nethervoid', 'oel'),
]
for tag, sname, tl_type in tagmap:
if tag in item['tags']:
return buildReleaseMessageWithType(item, sname, vol, chp, frag=frag, tl_type=tl_type)
titlemap = [
('fujimaru wrote a new post, FFF-Class Trashero - Chapter', 'FFF-Class Trashero', 'translated'),
('kobatochandaisuki wrote a new post, I Am the Monarch - Chapter', 'I Am the Monarch', 'translated'),
('Engebu wrote a new post, I Am the Monarch - Chapter', 'I Am the Monarch', 'translated'),
('Calvis wrote a new post, Dragon Poor - Chapter', 'Dragon Poor', 'translated'),
('Calvis wrote a new post, Green Skin - Chapter', 'Green Skin', 'translated'),
('Calvis wrote a new post, Rise, Strongest Warrior - Chapter', 'Rise, Strongest Warrior', 'translated'),
('Calvis wrote a new post, The Stone of Days - ', 'The Stone of Days', 'translated'),
('Calvis wrote a new post, The Stone of Days - Chapter', 'The Stone of Days', 'translated'),
('csvtranslator wrote a new post, I Am the Monarch - Chapter', 'I Am the Monarch', 'translated'),
('Koukouseidesu wrote a new post, Everyone Else is a Returnee - Chapter', 'Everyone Else is a Returnee', 'translated'),
('kuhaku wrote a new post, Solo Clear - Chapter ', 'Solo Clear', 'translated'),
('miraclerifle wrote a new post, God of Cooking - Chapter', 'God of Cooking', 'translated'),
('miraclerifle wrote a new post, Royal Roader on My Own - Chapter', 'Royal Roader on My Own', 'translated'),
('pyrenose wrote a new post, Rise, Strongest Warrior - Chapter', 'Rise, Strongest Warrior', 'translated'),
('Saquacon wrote a new post, All Things Wrong - Chapter', 'Doing All Things Wrong And Somehow Becoming The Best In The Game', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause | 4,132,580,538,126,302,000 | 96.631579 | 164 | 0.383761 | false | 4.611636 | false | false | false |
cwacek/python-jsonschema-objects | python_jsonschema_objects/util.py | 1 | 6181 | import six
import copy
import json
class lazy_format(object):
__slots__ = ("fmt", "args", "kwargs")
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.fmt.format(*self.args, **self.kwargs)
def safe_issubclass(x, y):
"""Safe version of issubclass() that will not throw TypeErrors.
Invoking issubclass('object', some-abc.meta instances) will result
in the underlying implementation throwing TypeError's from trying to
memoize the result- 'object' isn't a usable weakref target at that level.
Unfortunately this gets exposed all the way up to our code; thus a
'safe' version of the function."""
try:
return issubclass(x, y)
except TypeError:
return False
def coerce_for_expansion(mapping):
"""Given a value, make sure it is usable for f(**val) expansion.
In py2.7, the value must be a dictionary- thus a as_dict() method
will be invoked if available. In py3k, the raw mapping is returned
unmodified."""
if six.PY2 and hasattr(mapping, "as_dict"):
return mapping.as_dict()
return mapping
class ProtocolJSONEncoder(json.JSONEncoder):
def default(self, obj):
from python_jsonschema_objects import classbuilder
from python_jsonschema_objects import wrapper_types
if isinstance(obj, classbuilder.LiteralValue):
return obj._value
if isinstance(obj, wrapper_types.ArrayWrapper):
return obj.for_json()
if isinstance(obj, classbuilder.ProtocolBase):
props = {}
for raw, trans in six.iteritems(obj.__prop_names__):
props[raw] = getattr(obj, trans)
if props[raw] is None:
del props[raw]
for raw, data in six.iteritems(obj._extended_properties):
props[raw] = data
if props[raw] is None:
del props[raw]
return props
else:
return json.JSONEncoder.default(self, obj)
def propmerge(into, data_from):
""" Merge JSON schema requirements into a dictionary """
newprops = copy.deepcopy(into)
for prop, propval in six.iteritems(data_from):
if prop not in newprops:
newprops[prop] = propval
continue
new_sp = newprops[prop]
for subprop, spval in six.iteritems(propval):
if subprop not in new_sp:
new_sp[subprop] = spval
elif subprop == "enum":
new_sp[subprop] = set(spval) & set(new_sp[subprop])
elif subprop == "type":
if spval != new_sp[subprop]:
raise TypeError("Type cannot conflict in allOf'")
elif subprop in ("minLength", "minimum"):
new_sp[subprop] = new_sp[subprop] if new_sp[subprop] > spval else spval
elif subprop in ("maxLength", "maximum"):
new_sp[subprop] = new_sp[subprop] if new_sp[subprop] < spval else spval
elif subprop == "multipleOf":
if new_sp[subprop] % spval == 0:
new_sp[subprop] = spval
else:
raise AttributeError("Cannot set conflicting multipleOf values")
else:
new_sp[subprop] = spval
newprops[prop] = new_sp
return newprops
def resolve_ref_uri(base, ref):
if ref[0] == "#":
# Local ref
uri = base.rsplit("#", 1)[0] + ref
else:
uri = ref
return uri
"""namespace module"""
__all__ = ("Namespace", "as_namespace")
from collections.abc import Mapping, Sequence
class _Dummy:
pass
CLASS_ATTRS = dir(_Dummy)
NEWCLASS_ATTRS = dir(object)
del _Dummy
class Namespace(dict):
"""A dict subclass that exposes its items as attributes.
Warning: Namespace instances do not have direct access to the
dict methods.
"""
def __init__(self, obj={}):
dict.__init__(self, obj)
def __dir__(self):
return list(self)
def __repr__(self):
return "%s(%s)" % (type(self).__name__, super(dict, self).__repr__())
def __getattribute__(self, name):
try:
return self[name]
except KeyError:
msg = "'%s' object has no attribute '%s'"
raise AttributeError(msg % (type(self).__name__, name))
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
# ------------------------
# "copy constructors"
@classmethod
def from_object(cls, obj, names=None):
if names is None:
names = dir(obj)
ns = {name: getattr(obj, name) for name in names}
return cls(ns)
@classmethod
def from_mapping(cls, ns, names=None):
if names:
ns = {name: ns[name] for name in names}
return cls(ns)
@classmethod
def from_sequence(cls, seq, names=None):
if names:
seq = {name: val for name, val in seq if name in names}
return cls(seq)
# ------------------------
# static methods
@staticmethod
def hasattr(ns, name):
try:
object.__getattribute__(ns, name)
except AttributeError:
return False
return True
@staticmethod
def getattr(ns, name):
return object.__getattribute__(ns, name)
@staticmethod
def setattr(ns, name, value):
return object.__setattr__(ns, name, value)
@staticmethod
def delattr(ns, name):
return object.__delattr__(ns, name)
def as_namespace(obj, names=None):
# functions
if isinstance(obj, type(as_namespace)):
obj = obj()
# special cases
if isinstance(obj, type):
names = (name for name in dir(obj) if name not in CLASS_ATTRS)
return Namespace.from_object(obj, names)
if isinstance(obj, Mapping):
return Namespace.from_mapping(obj, names)
if isinstance(obj, Sequence):
return Namespace.from_sequence(obj, names)
# default
return Namespace.from_object(obj, names)
| mit | -3,246,414,182,600,502,000 | 26.59375 | 87 | 0.576282 | false | 4.079868 | false | false | false |
marmyshev/transitions | openlp/core/lib/theme.py | 1 | 23608 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Provide the theme XML and handling functions for OpenLP v2 themes.
"""
import os
import re
import logging
from xml.dom.minidom import Document
from lxml import etree, objectify
from openlp.core.lib import str_to_bool, ScreenList
log = logging.getLogger(__name__)
BLANK_THEME_XML = \
'''<?xml version="1.0" encoding="utf-8"?>
<theme version="1.0">
<name> </name>
<background type="image">
<filename></filename>
<borderColor>#000000</borderColor>
</background>
<background type="gradient">
<startColor>#000000</startColor>
<endColor>#000000</endColor>
<direction>vertical</direction>
</background>
<background type="solid">
<color>#000000</color>
</background>
<font type="main">
<name>Arial</name>
<color>#FFFFFF</color>
<size>40</size>
<bold>False</bold>
<italics>False</italics>
<line_adjustment>0</line_adjustment>
<shadow shadowColor="#000000" shadowSize="5">True</shadow>
<outline outlineColor="#000000" outlineSize="2">False</outline>
<location override="False" x="10" y="10" width="1004" height="690"/>
</font>
<font type="footer">
<name>Arial</name>
<color>#FFFFFF</color>
<size>12</size>
<bold>False</bold>
<italics>False</italics>
<line_adjustment>0</line_adjustment>
<shadow shadowColor="#000000" shadowSize="5">True</shadow>
<outline outlineColor="#000000" outlineSize="2">False</outline>
<location override="False" x="10" y="690" width="1004" height="78"/>
</font>
<display>
<horizontalAlign>0</horizontalAlign>
<verticalAlign>0</verticalAlign>
<slideTransition>False</slideTransition>
</display>
</theme>
'''
class ThemeLevel(object):
"""
Provides an enumeration for the level a theme applies to
"""
Global = 1
Service = 2
Song = 3
class BackgroundType(object):
"""
Type enumeration for backgrounds.
"""
Solid = 0
Gradient = 1
Image = 2
Transparent = 3
@staticmethod
def to_string(background_type):
"""
Return a string representation of a background type.
"""
if background_type == BackgroundType.Solid:
return u'solid'
elif background_type == BackgroundType.Gradient:
return u'gradient'
elif background_type == BackgroundType.Image:
return u'image'
elif background_type == BackgroundType.Transparent:
return u'transparent'
@staticmethod
def from_string(type_string):
"""
Return a background type for the given string.
"""
if type_string == u'solid':
return BackgroundType.Solid
elif type_string == u'gradient':
return BackgroundType.Gradient
elif type_string == u'image':
return BackgroundType.Image
elif type_string == u'transparent':
return BackgroundType.Transparent
class BackgroundGradientType(object):
"""
Type enumeration for background gradients.
"""
Horizontal = 0
Vertical = 1
Circular = 2
LeftTop = 3
LeftBottom = 4
@staticmethod
def to_string(gradient_type):
"""
Return a string representation of a background gradient type.
"""
if gradient_type == BackgroundGradientType.Horizontal:
return u'horizontal'
elif gradient_type == BackgroundGradientType.Vertical:
return u'vertical'
elif gradient_type == BackgroundGradientType.Circular:
return u'circular'
elif gradient_type == BackgroundGradientType.LeftTop:
return u'leftTop'
elif gradient_type == BackgroundGradientType.LeftBottom:
return u'leftBottom'
@staticmethod
def from_string(type_string):
"""
Return a background gradient type for the given string.
"""
if type_string == u'horizontal':
return BackgroundGradientType.Horizontal
elif type_string == u'vertical':
return BackgroundGradientType.Vertical
elif type_string == u'circular':
return BackgroundGradientType.Circular
elif type_string == u'leftTop':
return BackgroundGradientType.LeftTop
elif type_string == u'leftBottom':
return BackgroundGradientType.LeftBottom
class HorizontalType(object):
"""
Type enumeration for horizontal alignment.
"""
Left = 0
Right = 1
Center = 2
Justify = 3
Names = [u'left', u'right', u'center', u'justify']
class VerticalType(object):
"""
Type enumeration for vertical alignment.
"""
Top = 0
Middle = 1
Bottom = 2
Names = [u'top', u'middle', u'bottom']
BOOLEAN_LIST = [u'bold', u'italics', u'override', u'outline', u'shadow',
u'slide_transition']
INTEGER_LIST = [u'size', u'line_adjustment', u'x', u'height', u'y',
u'width', u'shadow_size', u'outline_size', u'horizontal_align',
u'vertical_align', u'wrap_style']
class ThemeXML(object):
"""
A class to encapsulate the Theme XML.
"""
FIRST_CAMEL_REGEX = re.compile(u'(.)([A-Z][a-z]+)')
SECOND_CAMEL_REGEX = re.compile(u'([a-z0-9])([A-Z])')
def __init__(self):
"""
Initialise the theme object.
"""
# Create the minidom document
self.theme_xml = Document()
self.parse_xml(BLANK_THEME_XML)
def extend_image_filename(self, path):
"""
Add the path name to the image name so the background can be rendered.
``path``
The path name to be added.
"""
if self.background_type == u'image':
if self.background_filename and path:
self.theme_name = self.theme_name.strip()
self.background_filename = self.background_filename.strip()
self.background_filename = os.path.join(path, self.theme_name,
self.background_filename)
def _new_document(self, name):
"""
Create a new theme XML document.
"""
self.theme_xml = Document()
self.theme = self.theme_xml.createElement(u'theme')
self.theme_xml.appendChild(self.theme)
self.theme.setAttribute(u'version', u'2.0')
self.name = self.theme_xml.createElement(u'name')
text_node = self.theme_xml.createTextNode(name)
self.name.appendChild(text_node)
self.theme.appendChild(self.name)
def add_background_transparent(self):
"""
Add a transparent background.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'transparent')
self.theme.appendChild(background)
def add_background_solid(self, bkcolor):
"""
Add a Solid background.
``bkcolor``
The color of the background.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'solid')
self.theme.appendChild(background)
self.child_element(background, u'color', unicode(bkcolor))
def add_background_gradient(self, startcolor, endcolor, direction):
"""
Add a gradient background.
``startcolor``
The gradient's starting colour.
``endcolor``
The gradient's ending colour.
``direction``
The direction of the gradient.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'gradient')
self.theme.appendChild(background)
# Create startColor element
self.child_element(background, u'startColor', unicode(startcolor))
# Create endColor element
self.child_element(background, u'endColor', unicode(endcolor))
# Create direction element
self.child_element(background, u'direction', unicode(direction))
def add_background_image(self, filename, borderColor):
"""
Add a image background.
``filename``
The file name of the image.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'image')
self.theme.appendChild(background)
# Create Filename element
self.child_element(background, u'filename', filename)
# Create endColor element
self.child_element(background, u'borderColor', unicode(borderColor))
def add_font(self, name, color, size, override, fonttype=u'main',
bold=u'False', italics=u'False', line_adjustment=0,
xpos=0, ypos=0, width=0, height=0, outline=u'False',
outline_color=u'#ffffff', outline_pixel=2, shadow=u'False',
shadow_color=u'#ffffff', shadow_pixel=5):
"""
Add a Font.
``name``
The name of the font.
``color``
The colour of the font.
``size``
The size of the font.
``override``
Whether or not to override the default positioning of the theme.
``fonttype``
The type of font, ``main`` or ``footer``. Defaults to ``main``.
``weight``
The weight of then font Defaults to 50 Normal
``italics``
Does the font render to italics Defaults to 0 Normal
``xpos``
The X position of the text block.
``ypos``
The Y position of the text block.
``width``
The width of the text block.
``height``
The height of the text block.
``outline``
Whether or not to show an outline.
``outline_color``
The colour of the outline.
``outline_size``
How big the Shadow is
``shadow``
Whether or not to show a shadow.
``shadow_color``
The colour of the shadow.
``shadow_size``
How big the Shadow is
"""
background = self.theme_xml.createElement(u'font')
background.setAttribute(u'type', fonttype)
self.theme.appendChild(background)
# Create Font name element
self.child_element(background, u'name', name)
# Create Font color element
self.child_element(background, u'color', unicode(color))
# Create Proportion name element
self.child_element(background, u'size', unicode(size))
# Create weight name element
self.child_element(background, u'bold', unicode(bold))
# Create italics name element
self.child_element(background, u'italics', unicode(italics))
# Create indentation name element
self.child_element(background, u'line_adjustment', unicode(line_adjustment))
# Create Location element
element = self.theme_xml.createElement(u'location')
element.setAttribute(u'override', unicode(override))
element.setAttribute(u'x', unicode(xpos))
element.setAttribute(u'y', unicode(ypos))
element.setAttribute(u'width', unicode(width))
element.setAttribute(u'height', unicode(height))
background.appendChild(element)
# Shadow
element = self.theme_xml.createElement(u'shadow')
element.setAttribute(u'shadowColor', unicode(shadow_color))
element.setAttribute(u'shadowSize', unicode(shadow_pixel))
value = self.theme_xml.createTextNode(unicode(shadow))
element.appendChild(value)
background.appendChild(element)
# Outline
element = self.theme_xml.createElement(u'outline')
element.setAttribute(u'outlineColor', unicode(outline_color))
element.setAttribute(u'outlineSize', unicode(outline_pixel))
value = self.theme_xml.createTextNode(unicode(outline))
element.appendChild(value)
background.appendChild(element)
def add_display(self, horizontal, vertical, transition):
"""
Add a Display options.
``horizontal``
The horizontal alignment of the text.
``vertical``
The vertical alignment of the text.
``transition``
Whether the slide transition is active.
"""
background = self.theme_xml.createElement(u'display')
self.theme.appendChild(background)
# Horizontal alignment
element = self.theme_xml.createElement(u'horizontalAlign')
value = self.theme_xml.createTextNode(unicode(horizontal))
element.appendChild(value)
background.appendChild(element)
# Vertical alignment
element = self.theme_xml.createElement(u'verticalAlign')
value = self.theme_xml.createTextNode(unicode(vertical))
element.appendChild(value)
background.appendChild(element)
# Slide Transition
element = self.theme_xml.createElement(u'slideTransition')
value = self.theme_xml.createTextNode(unicode(transition))
element.appendChild(value)
background.appendChild(element)
def child_element(self, element, tag, value):
"""
Generic child element creator.
"""
child = self.theme_xml.createElement(tag)
child.appendChild(self.theme_xml.createTextNode(value))
element.appendChild(child)
return child
def set_default_header_footer(self):
"""
Set the header and footer size into the current primary screen.
10 px on each side is removed to allow for a border.
"""
current_screen = ScreenList().current
self.font_main_y = 0
self.font_main_width = current_screen[u'size'].width() - 20
self.font_main_height = current_screen[u'size'].height() * 9 / 10
self.font_footer_width = current_screen[u'size'].width() - 20
self.font_footer_y = current_screen[u'size'].height() * 9 / 10
self.font_footer_height = current_screen[u'size'].height() / 10
def dump_xml(self):
"""
Dump the XML to file used for debugging
"""
return self.theme_xml.toprettyxml(indent=u' ')
def extract_xml(self):
"""
Print out the XML string.
"""
self._build_xml_from_attrs()
return self.theme_xml.toxml(u'utf-8').decode(u'utf-8')
def extract_formatted_xml(self):
"""
Pull out the XML string formatted for human consumption
"""
self._build_xml_from_attrs()
return self.theme_xml.toprettyxml(indent=u' ', newl=u'\n', encoding=u'utf-8')
def parse(self, xml):
"""
Read in an XML string and parse it.
``xml``
The XML string to parse.
"""
self.parse_xml(unicode(xml))
def parse_xml(self, xml):
"""
Parse an XML string.
``xml``
The XML string to parse.
"""
# remove encoding string
line = xml.find(u'?>')
if line:
xml = xml[line + 2:]
try:
theme_xml = objectify.fromstring(xml)
except etree.XMLSyntaxError:
log.exception(u'Invalid xml %s', xml)
return
xml_iter = theme_xml.getiterator()
for element in xml_iter:
master = u''
if element.tag == u'background':
if element.attrib:
for attr in element.attrib:
self._create_attr(element.tag, attr, element.attrib[attr])
parent = element.getparent()
if parent is not None:
if parent.tag == u'font':
master = parent.tag + u'_' + parent.attrib[u'type']
# set up Outline and Shadow Tags and move to font_main
if parent.tag == u'display':
if element.tag.startswith(u'shadow') or element.tag.startswith(u'outline'):
self._create_attr(u'font_main', element.tag, element.text)
master = parent.tag
if parent.tag == u'background':
master = parent.tag
if master:
self._create_attr(master, element.tag, element.text)
if element.attrib:
for attr in element.attrib:
base_element = attr
# correction for the shadow and outline tags
if element.tag == u'shadow' or element.tag == u'outline':
if not attr.startswith(element.tag):
base_element = element.tag + u'_' + attr
self._create_attr(master, base_element, element.attrib[attr])
else:
if element.tag == u'name':
self._create_attr(u'theme', element.tag, element.text)
def _translate_tags(self, master, element, value):
"""
Clean up XML removing and redefining tags
"""
master = master.strip().lstrip()
element = element.strip().lstrip()
value = unicode(value).strip().lstrip()
if master == u'display':
if element == u'wrapStyle':
return True, None, None, None
if element.startswith(u'shadow') or element.startswith(u'outline'):
master = u'font_main'
# fix bold font
if element == u'weight':
element = u'bold'
if value == u'Normal':
value = False
else:
value = True
if element == u'proportion':
element = u'size'
return False, master, element, value
def _create_attr(self, master, element, value):
"""
Create the attributes with the correct data types and name format
"""
reject, master, element, value = self._translate_tags(master, element, value)
if reject:
return
field = self._de_hump(element)
tag = master + u'_' + field
if field in BOOLEAN_LIST:
setattr(self, tag, str_to_bool(value))
elif field in INTEGER_LIST:
setattr(self, tag, int(value))
else:
# make string value unicode
if not isinstance(value, unicode):
value = unicode(str(value), u'utf-8')
# None means an empty string so lets have one.
if value == u'None':
value = u''
setattr(self, tag, unicode(value).strip().lstrip())
def __str__(self):
"""
Return a string representation of this object.
"""
theme_strings = []
for key in dir(self):
if key[0:1] != u'_':
theme_strings.append(u'%30s: %s' % (key, getattr(self, key)))
return u'\n'.join(theme_strings)
def _de_hump(self, name):
"""
Change Camel Case string to python string
"""
sub_name = ThemeXML.FIRST_CAMEL_REGEX.sub(r'\1_\2', name)
return ThemeXML.SECOND_CAMEL_REGEX.sub(r'\1_\2', sub_name).lower()
def _build_xml_from_attrs(self):
"""
Build the XML from the varables in the object
"""
self._new_document(self.theme_name)
if self.background_type == BackgroundType.to_string(BackgroundType.Solid):
self.add_background_solid(self.background_color)
elif self.background_type == BackgroundType.to_string(BackgroundType.Gradient):
self.add_background_gradient(
self.background_start_color,
self.background_end_color,
self.background_direction
)
elif self.background_type == BackgroundType.to_string(BackgroundType.Image):
filename = os.path.split(self.background_filename)[1]
self.add_background_image(filename, self.background_border_color)
elif self.background_type == BackgroundType.to_string(BackgroundType.Transparent):
self.add_background_transparent()
self.add_font(
self.font_main_name,
self.font_main_color,
self.font_main_size,
self.font_main_override, u'main',
self.font_main_bold,
self.font_main_italics,
self.font_main_line_adjustment,
self.font_main_x,
self.font_main_y,
self.font_main_width,
self.font_main_height,
self.font_main_outline,
self.font_main_outline_color,
self.font_main_outline_size,
self.font_main_shadow,
self.font_main_shadow_color,
self.font_main_shadow_size
)
self.add_font(
self.font_footer_name,
self.font_footer_color,
self.font_footer_size,
self.font_footer_override, u'footer',
self.font_footer_bold,
self.font_footer_italics,
0, # line adjustment
self.font_footer_x,
self.font_footer_y,
self.font_footer_width,
self.font_footer_height,
self.font_footer_outline,
self.font_footer_outline_color,
self.font_footer_outline_size,
self.font_footer_shadow,
self.font_footer_shadow_color,
self.font_footer_shadow_size
)
self.add_display(
self.display_horizontal_align,
self.display_vertical_align,
self.display_slide_transition
)
| gpl-2.0 | -182,272,033,019,598,620 | 34.65861 | 95 | 0.574346 | false | 4.183977 | false | false | false |
40323230/Pyslvs-PyQt5 | pyslvs_ui/entities/relocate_point_ui.py | 1 | 16987 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pyslvs_ui/entities/relocate_point.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from qtpy import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(366, 468)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons:calculator.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
Dialog.setModal(True)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.tab_widget = QtWidgets.QTabWidget(Dialog)
self.tab_widget.setObjectName("tab_widget")
self.plap_tab = QtWidgets.QWidget()
self.plap_tab.setObjectName("plap_tab")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.plap_tab)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.panel_layout = QtWidgets.QHBoxLayout()
self.panel_layout.setObjectName("panel_layout")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.plap_p1_label = QtWidgets.QLabel(self.plap_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plap_p1_label.sizePolicy().hasHeightForWidth())
self.plap_p1_label.setSizePolicy(sizePolicy)
self.plap_p1_label.setObjectName("plap_p1_label")
self.horizontalLayout_2.addWidget(self.plap_p1_label)
self.plap_p1_box = QtWidgets.QComboBox(self.plap_tab)
self.plap_p1_box.setObjectName("plap_p1_box")
self.horizontalLayout_2.addWidget(self.plap_p1_box)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.plap_p1x_label = QtWidgets.QLabel(self.plap_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plap_p1x_label.sizePolicy().hasHeightForWidth())
self.plap_p1x_label.setSizePolicy(sizePolicy)
self.plap_p1x_label.setObjectName("plap_p1x_label")
self.horizontalLayout.addWidget(self.plap_p1x_label)
self.plap_p1x_box = QtWidgets.QDoubleSpinBox(self.plap_tab)
self.plap_p1x_box.setDecimals(4)
self.plap_p1x_box.setMinimum(-9999.99)
self.plap_p1x_box.setMaximum(9999.99)
self.plap_p1x_box.setObjectName("plap_p1x_box")
self.horizontalLayout.addWidget(self.plap_p1x_box)
self.plap_p1y_label = QtWidgets.QLabel(self.plap_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plap_p1y_label.sizePolicy().hasHeightForWidth())
self.plap_p1y_label.setSizePolicy(sizePolicy)
self.plap_p1y_label.setObjectName("plap_p1y_label")
self.horizontalLayout.addWidget(self.plap_p1y_label)
self.plap_p1y_box = QtWidgets.QDoubleSpinBox(self.plap_tab)
self.plap_p1y_box.setDecimals(4)
self.plap_p1y_box.setMinimum(-9999.99)
self.plap_p1y_box.setMaximum(9999.99)
self.plap_p1y_box.setObjectName("plap_p1y_box")
self.horizontalLayout.addWidget(self.plap_p1y_box)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.plap_angle_label = QtWidgets.QLabel(self.plap_tab)
self.plap_angle_label.setObjectName("plap_angle_label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.plap_angle_label)
self.plap_angle_box = QtWidgets.QDoubleSpinBox(self.plap_tab)
self.plap_angle_box.setDecimals(4)
self.plap_angle_box.setMaximum(360.0)
self.plap_angle_box.setObjectName("plap_angle_box")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.plap_angle_box)
self.plap_distance_label = QtWidgets.QLabel(self.plap_tab)
self.plap_distance_label.setObjectName("plap_distance_label")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.plap_distance_label)
self.plap_distance_box = QtWidgets.QDoubleSpinBox(self.plap_tab)
self.plap_distance_box.setDecimals(4)
self.plap_distance_box.setMaximum(9999.99)
self.plap_distance_box.setObjectName("plap_distance_box")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.plap_distance_box)
self.verticalLayout_3.addLayout(self.formLayout)
self.panel_layout.addLayout(self.verticalLayout_3)
self.verticalLayout_4.addLayout(self.panel_layout)
spacerItem = QtWidgets.QSpacerItem(20, 126, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem)
self.tab_widget.addTab(self.plap_tab, "")
self.pllp_tab = QtWidgets.QWidget()
self.pllp_tab.setObjectName("pllp_tab")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.pllp_tab)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.widget = QtWidgets.QWidget(self.pllp_tab)
self.widget.setObjectName("widget")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.pllp_p1_label = QtWidgets.QRadioButton(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p1_label.sizePolicy().hasHeightForWidth())
self.pllp_p1_label.setSizePolicy(sizePolicy)
self.pllp_p1_label.setChecked(True)
self.pllp_p1_label.setObjectName("pllp_p1_label")
self.horizontalLayout_4.addWidget(self.pllp_p1_label)
self.pllp_p1_box = QtWidgets.QComboBox(self.widget)
self.pllp_p1_box.setObjectName("pllp_p1_box")
self.horizontalLayout_4.addWidget(self.pllp_p1_box)
self.verticalLayout_5.addLayout(self.horizontalLayout_4)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.pllp_p1xy_label = QtWidgets.QRadioButton(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p1xy_label.sizePolicy().hasHeightForWidth())
self.pllp_p1xy_label.setSizePolicy(sizePolicy)
self.pllp_p1xy_label.setText("")
self.pllp_p1xy_label.setObjectName("pllp_p1xy_label")
self.horizontalLayout_3.addWidget(self.pllp_p1xy_label)
self.pllp_p1x_label = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p1x_label.sizePolicy().hasHeightForWidth())
self.pllp_p1x_label.setSizePolicy(sizePolicy)
self.pllp_p1x_label.setObjectName("pllp_p1x_label")
self.horizontalLayout_3.addWidget(self.pllp_p1x_label)
self.pllp_p1x_box = QtWidgets.QDoubleSpinBox(self.widget)
self.pllp_p1x_box.setDecimals(4)
self.pllp_p1x_box.setMinimum(-9999.99)
self.pllp_p1x_box.setMaximum(9999.99)
self.pllp_p1x_box.setObjectName("pllp_p1x_box")
self.horizontalLayout_3.addWidget(self.pllp_p1x_box)
self.pllp_p1y_label = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p1y_label.sizePolicy().hasHeightForWidth())
self.pllp_p1y_label.setSizePolicy(sizePolicy)
self.pllp_p1y_label.setObjectName("pllp_p1y_label")
self.horizontalLayout_3.addWidget(self.pllp_p1y_label)
self.pllp_p1y_box = QtWidgets.QDoubleSpinBox(self.widget)
self.pllp_p1y_box.setDecimals(4)
self.pllp_p1y_box.setMinimum(-9999.99)
self.pllp_p1y_box.setMaximum(9999.99)
self.pllp_p1y_box.setObjectName("pllp_p1y_box")
self.horizontalLayout_3.addWidget(self.pllp_p1y_box)
self.verticalLayout_5.addLayout(self.horizontalLayout_3)
self.verticalLayout_7.addWidget(self.widget)
self.widget1 = QtWidgets.QWidget(self.pllp_tab)
self.widget1.setObjectName("widget1")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.widget1)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.pllp_p2_label = QtWidgets.QRadioButton(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p2_label.sizePolicy().hasHeightForWidth())
self.pllp_p2_label.setSizePolicy(sizePolicy)
self.pllp_p2_label.setChecked(True)
self.pllp_p2_label.setObjectName("pllp_p2_label")
self.horizontalLayout_5.addWidget(self.pllp_p2_label)
self.pllp_p2_box = QtWidgets.QComboBox(self.widget1)
self.pllp_p2_box.setObjectName("pllp_p2_box")
self.horizontalLayout_5.addWidget(self.pllp_p2_box)
self.verticalLayout_6.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.pllp_p2xy_label = QtWidgets.QRadioButton(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p2xy_label.sizePolicy().hasHeightForWidth())
self.pllp_p2xy_label.setSizePolicy(sizePolicy)
self.pllp_p2xy_label.setText("")
self.pllp_p2xy_label.setObjectName("pllp_p2xy_label")
self.horizontalLayout_6.addWidget(self.pllp_p2xy_label)
self.pllp_p2x_label = QtWidgets.QLabel(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p2x_label.sizePolicy().hasHeightForWidth())
self.pllp_p2x_label.setSizePolicy(sizePolicy)
self.pllp_p2x_label.setObjectName("pllp_p2x_label")
self.horizontalLayout_6.addWidget(self.pllp_p2x_label)
self.pllp_p2x_box = QtWidgets.QDoubleSpinBox(self.widget1)
self.pllp_p2x_box.setDecimals(4)
self.pllp_p2x_box.setMinimum(-9999.99)
self.pllp_p2x_box.setMaximum(9999.99)
self.pllp_p2x_box.setObjectName("pllp_p2x_box")
self.horizontalLayout_6.addWidget(self.pllp_p2x_box)
self.pllp_p2y_label = QtWidgets.QLabel(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pllp_p2y_label.sizePolicy().hasHeightForWidth())
self.pllp_p2y_label.setSizePolicy(sizePolicy)
self.pllp_p2y_label.setObjectName("pllp_p2y_label")
self.horizontalLayout_6.addWidget(self.pllp_p2y_label)
self.pllp_p2y_box = QtWidgets.QDoubleSpinBox(self.widget1)
self.pllp_p2y_box.setDecimals(4)
self.pllp_p2y_box.setMinimum(-9999.99)
self.pllp_p2y_box.setObjectName("pllp_p2y_box")
self.horizontalLayout_6.addWidget(self.pllp_p2y_box)
self.verticalLayout_6.addLayout(self.horizontalLayout_6)
self.verticalLayout_7.addWidget(self.widget1)
self.formLayout_2 = QtWidgets.QFormLayout()
self.formLayout_2.setObjectName("formLayout_2")
self.pllp_distance1_box = QtWidgets.QDoubleSpinBox(self.pllp_tab)
self.pllp_distance1_box.setDecimals(4)
self.pllp_distance1_box.setMaximum(9999.99)
self.pllp_distance1_box.setObjectName("pllp_distance1_box")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.pllp_distance1_box)
self.pllp_distance1_label = QtWidgets.QLabel(self.pllp_tab)
self.pllp_distance1_label.setObjectName("pllp_distance1_label")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.pllp_distance1_label)
self.pllp_distance2_box = QtWidgets.QDoubleSpinBox(self.pllp_tab)
self.pllp_distance2_box.setDecimals(4)
self.pllp_distance2_box.setMaximum(9999.99)
self.pllp_distance2_box.setObjectName("pllp_distance2_box")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.pllp_distance2_box)
self.pllp_distance2_label = QtWidgets.QLabel(self.pllp_tab)
self.pllp_distance2_label.setObjectName("pllp_distance2_label")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.pllp_distance2_label)
self.verticalLayout_7.addLayout(self.formLayout_2)
self.pllp_inversed_box = QtWidgets.QCheckBox(self.pllp_tab)
self.pllp_inversed_box.setObjectName("pllp_inversed_box")
self.verticalLayout_7.addWidget(self.pllp_inversed_box)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem1)
self.tab_widget.addTab(self.pllp_tab, "")
self.verticalLayout.addWidget(self.tab_widget)
self.preview_label = QtWidgets.QLabel(Dialog)
self.preview_label.setObjectName("preview_label")
self.verticalLayout.addWidget(self.preview_label)
self.button_box = QtWidgets.QDialogButtonBox(Dialog)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.button_box.setObjectName("button_box")
self.verticalLayout.addWidget(self.button_box)
self.retranslateUi(Dialog)
self.button_box.rejected.connect(Dialog.reject)
self.button_box.accepted.connect(Dialog.accept)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Relocate"))
self.plap_p1_label.setText(_translate("Dialog", "Point"))
self.plap_p1x_label.setText(_translate("Dialog", "X"))
self.plap_p1y_label.setText(_translate("Dialog", "Y"))
self.plap_angle_label.setText(_translate("Dialog", "Angle"))
self.plap_distance_label.setText(_translate("Dialog", "Distance"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.plap_tab), _translate("Dialog", "Polar"))
self.pllp_p1_label.setText(_translate("Dialog", "Point &1"))
self.pllp_p1x_label.setText(_translate("Dialog", "X"))
self.pllp_p1y_label.setText(_translate("Dialog", "Y"))
self.pllp_p2_label.setText(_translate("Dialog", "Point &2"))
self.pllp_p2x_label.setText(_translate("Dialog", "X"))
self.pllp_p2y_label.setText(_translate("Dialog", "Y"))
self.pllp_distance1_label.setText(_translate("Dialog", "Distance 1"))
self.pllp_distance2_label.setText(_translate("Dialog", "Distance 2"))
self.pllp_inversed_box.setText(_translate("Dialog", "Inverse the position to another side."))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.pllp_tab), _translate("Dialog", "Two Points"))
| agpl-3.0 | 921,782,860,108,813,000 | 58.81338 | 115 | 0.714134 | false | 3.360435 | false | false | false |
zozo123/buildbot | master/buildbot/test/unit/test_buildslave_protocols_pb.py | 1 | 9511 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from buildbot.buildslave.protocols import pb
from buildbot.test.fake import fakemaster
from buildbot.test.util import protocols as util_protocols
from twisted.internet import defer
from twisted.spread import pb as twisted_pb
from twisted.trial import unittest
class TestListener(unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master()
def test_constructor(self):
listener = pb.Listener(self.master)
self.assertEqual(listener.master, self.master)
self.assertEqual(listener._registrations, {})
@defer.inlineCallbacks
def test_updateRegistration_simple(self):
listener = pb.Listener(self.master)
reg = yield listener.updateRegistration('example', 'pass', 'tcp:1234')
self.assertEqual(self.master.pbmanager._registrations,
[('tcp:1234', 'example', 'pass')])
self.assertEqual(listener._registrations['example'], ('pass', 'tcp:1234', reg))
@defer.inlineCallbacks
def test_updateRegistration_pass_changed(self):
listener = pb.Listener(self.master)
listener.updateRegistration('example', 'pass', 'tcp:1234')
reg1 = yield listener.updateRegistration('example', 'pass1', 'tcp:1234')
self.assertEqual(listener._registrations['example'], ('pass1', 'tcp:1234', reg1))
self.assertEqual(self.master.pbmanager._unregistrations, [('tcp:1234', 'example')])
@defer.inlineCallbacks
def test_updateRegistration_port_changed(self):
listener = pb.Listener(self.master)
listener.updateRegistration('example', 'pass', 'tcp:1234')
reg1 = yield listener.updateRegistration('example', 'pass', 'tcp:4321')
self.assertEqual(listener._registrations['example'], ('pass', 'tcp:4321', reg1))
self.assertEqual(self.master.pbmanager._unregistrations, [('tcp:1234', 'example')])
@defer.inlineCallbacks
def test_getPerspective(self):
listener = pb.Listener(self.master)
buildslave = mock.Mock()
buildslave.slavename = 'test'
mind = mock.Mock()
listener.updateRegistration('example', 'pass', 'tcp:1234')
self.master.buildslaves.register(buildslave)
conn = yield listener._getPerspective(mind, buildslave.slavename)
mind.broker.transport.setTcpKeepAlive.assert_called_with(1)
self.assertIsInstance(conn, pb.Connection)
class TestConnectionApi(util_protocols.ConnectionInterfaceTest,
unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master()
self.conn = pb.Connection(self.master, mock.Mock(), mock.Mock())
class TestConnection(unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master()
self.mind = mock.Mock()
self.buildslave = mock.Mock()
def test_constructor(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
self.assertEqual(conn.mind, self.mind)
self.assertEqual(conn.master, self.master)
self.assertEqual(conn.buildslave, self.buildslave)
@defer.inlineCallbacks
def test_attached(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
att = yield conn.attached(self.mind)
self.assertNotEqual(conn.keepalive_timer, None)
self.buildslave.attached.assert_called_with(conn)
self.assertEqual(att, conn)
conn.detached(self.mind)
def test_detached(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.attached(self.mind)
conn.detached(self.mind)
self.assertEqual(conn.keepalive_timer, None)
self.assertEqual(conn.mind, None)
def test_loseConnection(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.loseConnection()
self.assertEqual(conn.keepalive_timer, None)
conn.mind.broker.transport.loseConnection.assert_called_with()
def test_remotePrint(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.remotePrint(message='test')
conn.mind.callRemote.assert_called_with('print', message='test')
@defer.inlineCallbacks
def test_remoteGetSlaveInfo(self):
def side_effect(*args, **kwargs):
if 'getSlaveInfo' in args:
return defer.succeed({'info': 'test'})
if 'getCommands' in args:
return defer.succeed({'x': 1, 'y': 2})
if 'getVersion' in args:
return defer.succeed('TheVersion')
self.mind.callRemote.side_effect = side_effect
conn = pb.Connection(self.master, self.buildslave, self.mind)
info = yield conn.remoteGetSlaveInfo()
r = {'info': 'test', 'slave_commands': {'y': 2, 'x': 1}, 'version': 'TheVersion'}
self.assertEqual(info, r)
calls = [mock.call('getSlaveInfo'), mock.call('getCommands'), mock.call('getVersion')]
self.mind.callRemote.assert_has_calls(calls)
@defer.inlineCallbacks
def test_remoteGetSlaveInfo_getSlaveInfo_fails(self):
def side_effect(*args, **kwargs):
if 'getSlaveInfo' in args:
return defer.fail(twisted_pb.NoSuchMethod())
if 'getCommands' in args:
return defer.succeed({'x': 1, 'y': 2})
if 'getVersion' in args:
return defer.succeed('TheVersion')
self.mind.callRemote.side_effect = side_effect
conn = pb.Connection(self.master, self.buildslave, self.mind)
info = yield conn.remoteGetSlaveInfo()
r = {'slave_commands': {'y': 2, 'x': 1}, 'version': 'TheVersion'}
self.assertEqual(info, r)
calls = [mock.call('getSlaveInfo'), mock.call('getCommands'), mock.call('getVersion')]
self.mind.callRemote.assert_has_calls(calls)
@defer.inlineCallbacks
def test_remoteSetBuilderList(self):
builders = ['builder1', 'builder2']
self.mind.callRemote.return_value = defer.succeed(builders)
conn = pb.Connection(self.master, self.buildslave, self.mind)
r = yield conn.remoteSetBuilderList(builders)
self.assertEqual(r, builders)
self.assertEqual(conn.builders, builders)
self.mind.callRemote.assert_called_with('setBuilderList', builders)
def test_remoteStartCommand(self):
builders = ['builder']
ret_val = {'builder': mock.Mock()}
self.mind.callRemote.return_value = defer.succeed(ret_val)
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.remoteSetBuilderList(builders)
RCInstance, builder_name, commandID = None, "builder", None
remote_command, args = "command", "args"
conn.remoteStartCommand(RCInstance, builder_name, commandID, remote_command, args)
ret_val['builder'].callRemote.assert_called_with('startCommand',
RCInstance, commandID, remote_command, args)
def test_doKeepalive(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.doKeepalive()
self.mind.callRemote.assert_called_with('print', message="keepalive")
def test_remoteShutdown(self):
self.mind.callRemote.return_value = defer.succeed(None)
conn = pb.Connection(self.master, self.buildslave, self.mind)
# note that we do not test the "old way", as it is now *very* old.
conn.remoteShutdown()
self.mind.callRemote.assert_called_with('shutdown')
def test_remoteStartBuild(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
builders = {'builder': mock.Mock()}
self.mind.callRemote.return_value = defer.succeed(builders)
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.remoteSetBuilderList(builders)
conn.remoteStartBuild('builder')
builders['builder'].callRemote.assert_called_with('startBuild')
def test_startStopKeepaliveTimer(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.startKeepaliveTimer()
self.assertNotEqual(conn.keepalive_timer, None)
conn.stopKeepaliveTimer()
self.assertEqual(conn.keepalive_timer, None)
def test_perspective_shutdown(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.perspective_shutdown()
conn.buildslave.shutdownRequested.assert_called_with()
conn.buildslave.messageReceivedFromSlave.assert_called_with()
def test_perspective_keepalive(self):
conn = pb.Connection(self.master, self.buildslave, self.mind)
conn.perspective_keepalive()
conn.buildslave.messageReceivedFromSlave.assert_called_with()
| gpl-3.0 | -7,822,177,358,557,186,000 | 39.130802 | 101 | 0.667963 | false | 3.913992 | true | false | false |
buffer/thug | thug/ActiveX/modules/JavaDeploymentToolkit.py | 1 | 1913 |
import logging
log = logging.getLogger("Thug")
def launch(self, arg):
log.ThugLogging.add_behavior_warn("[Java Deployment Toolkit ActiveX] Launching: %s" % (arg, ))
tokens = arg.split(' ')
if tokens[0].lower() != 'http:':
return
for token in tokens[1:]:
if not token.lower().startswith('http'):
continue
log.ThugLogging.add_behavior_warn("[Java Deployment Toolkit ActiveX] Fetching from URL %s" % (token, ))
log.ThugLogging.log_exploit_event(self._window.url,
"Java Deployment Toolkit ActiveX",
"Fetching from URL",
data = {
"url": token
},
forward = False)
try:
self._window._navigator.fetch(token, redirect_type = "Java Deployment Toolkit Exploit")
except Exception:
log.ThugLogging.add_behavior_warn("[Java Deployment Toolkit ActiveX] Fetch Failed")
def launchApp(self, pJNLP, pEmbedded = None, pVmArgs = None):
cve_2013_2416 = False
if len(pJNLP) > 32:
cve_2013_2416 = True
log.ThugLogging.Shellcode.check_shellcode(pJNLP)
if pEmbedded:
cve_2013_2416 = True
log.ThugLogging.Shellcode.check_shellcode(pEmbedded)
if cve_2013_2416:
log.ThugLogging.log_exploit_event(self._window.url,
"Java Deployment Toolkit ActiveX",
"Java ActiveX component memory corruption (CVE-2013-2416)",
cve = "CVE-2013-2416",
forward = True)
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2013-2416")
| gpl-2.0 | 5,999,459,635,673,854,000 | 37.26 | 111 | 0.509671 | false | 4.185996 | false | false | false |
alvaroaleman/ansible | lib/ansible/inventory/__init__.py | 32 | 34639 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import os
import subprocess
import sys
import re
import itertools
from ansible.compat.six import string_types, iteritems
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.dir import InventoryDirectory, get_file_parser
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins import vars_loader
from ansible.utils.vars import combine_vars
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
HOSTS_PATTERNS_CACHE = {}
class Inventory(object):
"""
Host inventory for ansible.
"""
def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = unfrackpath(host_list, follow=False)
self._loader = loader
self._variable_manager = variable_manager
self.localhost = None
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
self._pattern_cache = {}
self._group_dict_cache = {}
self._vars_plugins = []
self._basedir = self.basedir()
# Contains set of filenames under group_vars directories
self._group_vars_files = self._find_group_vars_files(self._basedir)
self._host_vars_files = self._find_host_vars_files(self._basedir)
# to be set by calling set_playbook_basedir by playbook code
self._playbook_basedir = None
# the inventory object holds a list of groups
self.groups = {}
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
# clear the cache here, which is only useful if more than
# one Inventory objects are created when using the API directly
self.clear_pattern_cache()
self.clear_group_dict_cache()
self.parse_inventory(host_list)
def serialize(self):
data = dict()
return data
def deserialize(self, data):
pass
def parse_inventory(self, host_list):
if isinstance(host_list, string_types):
if "," in host_list:
host_list = host_list.split(",")
host_list = [ h for h in host_list if h and h.strip() ]
self.parser = None
# Always create the 'all' and 'ungrouped' groups, even if host_list is
# empty: in this case we will subsequently an the implicit 'localhost' to it.
ungrouped = Group('ungrouped')
all = Group('all')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
if host_list is None:
pass
elif isinstance(host_list, list):
for h in host_list:
try:
(host, port) = parse_address(h, allow_ranges=False)
except AnsibleError as e:
display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
host = h
port = None
new_host = Host(host, port)
if h in C.LOCALHOST:
# set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
if self.localhost is not None:
display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
display.vvvv("Set default localhost to %s" % h)
self.localhost = new_host
all.add_host(new_host)
elif self._loader.path_exists(host_list):
# TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
if self.is_directory(host_list):
# Ensure basedir is inside the directory
host_list = os.path.join(self.host_list, "")
self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list)
else:
self.parser = get_file_parser(host_list, self.groups, self._loader)
vars_loader.add_directory(self._basedir, with_subdir=True)
if not self.parser:
# should never happen, but JIC
raise AnsibleError("Unable to parse %s as an inventory source" % host_list)
else:
display.warning("Host file not found: %s" % to_text(host_list))
self._vars_plugins = [ x for x in vars_loader.all(self) ]
# set group vars from group_vars/ files and vars plugins
for g in self.groups:
group = self.groups[g]
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
self.get_group_vars(group)
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts(ignore_limits=True, ignore_restrictions=True):
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
self.get_host_vars(host)
def _match(self, str, pattern_str):
try:
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], str)
else:
return fnmatch.fnmatch(str, pattern_str)
except Exception:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
def _match_list(self, items, item_attr, pattern_str):
results = []
try:
if not pattern_str.startswith('~'):
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
except Exception:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
for item in items:
if pattern.match(getattr(item, item_attr)):
results.append(item)
return results
def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
# Check if pattern already computed
if isinstance(pattern, list):
pattern_hash = u":".join(pattern)
else:
pattern_hash = pattern
if not ignore_limits and self._subset:
pattern_hash += u":%s" % to_text(self._subset)
if not ignore_restrictions and self._restriction:
pattern_hash += u":%s" % to_text(self._restriction)
if pattern_hash not in HOSTS_PATTERNS_CACHE:
patterns = Inventory.split_host_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
# mainly useful for hostvars[host] access
if not ignore_limits and self._subset:
# exclude hosts not in a subset, if defined
subset = self._evaluate_patterns(self._subset)
hosts = [ h for h in hosts if h in subset ]
if not ignore_restrictions and self._restriction:
# exclude hosts mentioned in any restriction (ex: failed hosts)
hosts = [ h for h in hosts if h.name in self._restriction ]
seen = set()
HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
return HOSTS_PATTERNS_CACHE[pattern_hash][:]
@classmethod
def split_host_pattern(cls, pattern):
"""
Takes a string containing host patterns separated by commas (or a list
thereof) and returns a list of single patterns (which may not contain
commas). Whitespace is ignored.
Also accepts ':' as a separator for backwards compatibility, but it is
not recommended due to the conflict with IPv6 addresses and host ranges.
Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
"""
if isinstance(pattern, list):
return list(itertools.chain(*map(cls.split_host_pattern, pattern)))
# If it's got commas in it, we'll treat it as a straightforward
# comma-separated list of patterns.
elif ',' in pattern:
patterns = re.split('\s*,\s*', pattern)
# If it doesn't, it could still be a single pattern. This accounts for
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
else:
try:
(base, port) = parse_address(pattern, allow_ranges=True)
patterns = [pattern]
except:
# The only other case we accept is a ':'-separated list of patterns.
# This mishandles IPv6 addresses, and is retained only for backwards
# compatibility.
patterns = re.findall(
r'''(?: # We want to match something comprising:
[^\s:\[\]] # (anything other than whitespace or ':[]'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)+ # occurring once or more
''', pattern, re.X
)
return [p.strip() for p in patterns]
@classmethod
def order_patterns(cls, patterns):
# Host specifiers should be sorted to ensure consistent behavior
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
return pattern_regular + pattern_intersection + pattern_exclude
def _evaluate_patterns(self, patterns):
"""
Takes a list of patterns and returns a list of matching host names,
taking into account any negative and intersection patterns.
"""
patterns = Inventory.order_patterns(patterns)
hosts = []
for p in patterns:
# avoid resolving a pattern that is a plain host
if p in self._hosts_cache:
hosts.append(self.get_host(p))
else:
that = self._match_one_pattern(p)
if p.startswith("!"):
hosts = [ h for h in hosts if h not in that ]
elif p.startswith("&"):
hosts = [ h for h in hosts if h in that ]
else:
to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ]
hosts.extend(to_append)
return hosts
def _match_one_pattern(self, pattern):
"""
Takes a single pattern and returns a list of matching host names.
Ignores intersection (&) and exclusion (!) specifiers.
The pattern may be:
1. A regex starting with ~, e.g. '~[abc]*'
2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
3. An ordinary word that matches itself only, e.g. 'foo'
The pattern is matched using the following rules:
1. If it's 'all', it matches all hosts in all groups.
2. Otherwise, for each known group name:
(a) if it matches the group name, the results include all hosts
in the group or any of its children.
(b) otherwise, if it matches any hosts in the group, the results
include the matching hosts.
This means that 'foo*' may match one or more groups (thus including all
hosts therein) but also hosts in other groups.
The built-in groups 'all' and 'ungrouped' are special. No pattern can
match these group names (though 'all' behaves as though it matches, as
described above). The word 'ungrouped' can match a host of that name,
and patterns like 'ungr*' and 'al*' can match either hosts or groups
other than all and ungrouped.
If the pattern matches one or more group names according to these rules,
it may have an optional range suffix to select a subset of the results.
This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
would work if 'foo*' matched the name of one or more groups.
Duplicate matches are always eliminated from the results.
"""
if pattern.startswith("&") or pattern.startswith("!"):
pattern = pattern[1:]
if pattern not in self._pattern_cache:
(expr, slice) = self._split_subscript(pattern)
hosts = self._enumerate_matches(expr)
try:
hosts = self._apply_subscript(hosts, slice)
except IndexError:
raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
self._pattern_cache[pattern] = hosts
return self._pattern_cache[pattern]
def _split_subscript(self, pattern):
"""
Takes a pattern, checks if it has a subscript, and returns the pattern
without the subscript and a (start,end) tuple representing the given
subscript (or None if there is no subscript).
Validates that the subscript is in the right syntax, but doesn't make
sure the actual indices make sense in context.
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
# We want a pattern followed by an integer or range subscript.
# (We can't be more restrictive about the expression because the
# fnmatch semantics permit [\[:\]] to occur.)
pattern_with_subscript = re.compile(
r'''^
(.+) # A pattern expression ending with...
\[(?: # A [subscript] expression comprising:
(-?[0-9]+)| # A single positive or negative number
([0-9]+)([:-]) # Or an x:y or x: range.
([0-9]*)
)\]
$
''', re.X
)
subscript = None
m = pattern_with_subscript.match(pattern)
if m:
(pattern, idx, start, sep, end) = m.groups()
if idx:
subscript = (int(idx), None)
else:
if not end:
end = -1
subscript = (int(start), int(end))
if sep == '-':
display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
return (pattern, subscript)
def _apply_subscript(self, hosts, subscript):
"""
Takes a list of hosts and a (start,end) tuple and returns the subset of
hosts based on the subscript (which may be None to return all hosts).
"""
if not hosts or not subscript:
return hosts
(start, end) = subscript
if end:
if end == -1:
end = len(hosts)-1
return hosts[start:end+1]
else:
return [ hosts[start] ]
def _enumerate_matches(self, pattern):
"""
Returns a list of host names matching the given pattern according to the
rules explained above in _match_one_pattern.
"""
results = []
hostnames = set()
def __append_host_to_results(host):
if host.name not in hostnames:
hostnames.add(host.name)
results.append(host)
groups = self.get_groups()
for group in groups.values():
if pattern == 'all':
for host in group.get_hosts():
if host.implicit:
continue
__append_host_to_results(host)
else:
if self._match(group.name, pattern) and group.name not in ('all', 'ungrouped'):
for host in group.get_hosts():
if host.implicit:
continue
__append_host_to_results(host)
else:
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
for host in matching_hosts:
__append_host_to_results(host)
if pattern in C.LOCALHOST and len(results) == 0:
new_host = self._create_implicit_localhost(pattern)
results.append(new_host)
return results
def _create_implicit_localhost(self, pattern):
if self.localhost:
new_host = self.localhost
else:
new_host = Host(pattern)
new_host.address = "127.0.0.1"
new_host.implicit = True
new_host.vars = self.get_host_vars(new_host)
new_host.set_variable("ansible_connection", "local")
if "ansible_python_interpreter" not in new_host.vars:
py_interp = sys.executable
if not py_interp:
# sys.executable is not set in some cornercases. #13585
display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default.'
' You can correct this by setting ansible_python_interpreter for localhost')
py_interp = '/usr/bin/python'
new_host.set_variable("ansible_python_interpreter", py_interp)
self.get_group("ungrouped").add_host(new_host)
self.localhost = new_host
return new_host
def clear_pattern_cache(self):
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
global HOSTS_PATTERNS_CACHE
HOSTS_PATTERNS_CACHE = {}
self._pattern_cache = {}
def clear_group_dict_cache(self):
''' called exclusively by the add_host and group_by plugins '''
self._group_dict_cache = {}
def groups_for_host(self, host):
if host in self._hosts_cache:
return self._hosts_cache[host].get_groups()
else:
return []
def get_groups(self):
return self.groups
def get_host(self, hostname):
if hostname not in self._hosts_cache:
self._hosts_cache[hostname] = self._get_host(hostname)
return self._hosts_cache[hostname]
def _get_host(self, hostname):
matching_host = None
if hostname in C.LOCALHOST:
if self.localhost:
matching_host= self.localhost
else:
for host in self.get_group('all').get_hosts():
if host.name in C.LOCALHOST:
matching_host = host
break
if not matching_host:
matching_host = self._create_implicit_localhost(hostname)
# update caches
self._hosts_cache[hostname] = matching_host
for host in C.LOCALHOST.difference((hostname,)):
self._hosts_cache[host] = self._hosts_cache[hostname]
else:
for group in self.groups.values():
for host in group.get_hosts():
if host not in self._hosts_cache:
self._hosts_cache[host.name] = host
if hostname == host.name:
matching_host = host
return matching_host
def get_group(self, groupname):
return self.groups.get(groupname)
def get_group_variables(self, groupname, update_cached=False, vault_password=None):
if groupname not in self._vars_per_group or update_cached:
self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password)
return self._vars_per_group[groupname]
def _get_group_variables(self, groupname, vault_password=None):
group = self.get_group(groupname)
if group is None:
raise Exception("group not found: %s" % groupname)
vars = {}
# plugin.get_group_vars retrieves just vars for specific group
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# Read group_vars/ files
vars = combine_vars(vars, self.get_group_vars(group))
return vars
def get_group_dict(self):
"""
In get_vars() we merge a 'magic' dictionary 'groups' with group name
keys and hostname list values into every host variable set.
Cache the creation of this structure here
"""
if not self._group_dict_cache:
for (group_name, group) in iteritems(self.groups):
self._group_dict_cache[group_name] = [h.name for h in group.get_hosts()]
return self._group_dict_cache
def get_vars(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname)
if not host:
raise AnsibleError("no vars as host is not in inventory: %s" % hostname)
return host.get_vars()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
if hostname not in self._vars_per_host or update_cached:
self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password)
return self._vars_per_host[hostname]
def _get_host_variables(self, hostname, vault_password=None):
host = self.get_host(hostname)
if host is None:
raise AnsibleError("no host vars as host is not in inventory: %s" % hostname)
vars = {}
# plugin.run retrieves all vars (also from groups) for host
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# still need to check InventoryParser per host vars
# which actually means InventoryScript per host,
# which is not performant
if self.parser is not None:
vars = combine_vars(vars, self.parser.get_host_variables(host))
return vars
def add_group(self, group):
if group.name not in self.groups:
self.groups[group.name] = group
else:
raise AnsibleError("group already in inventory: %s" % group.name)
def list_hosts(self, pattern="all"):
""" return a list of hostnames for a pattern """
result = [ h for h in self.get_hosts(pattern) ]
if len(result) == 0 and pattern in C.LOCALHOST:
result = [pattern]
return result
def list_groups(self):
return sorted(self.groups.keys(), key=lambda x: x)
def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to batch serial operations in main playbook code, don't use this for other
reasons.
"""
if restriction is None:
return
elif not isinstance(restriction, list):
restriction = [ restriction ]
self._restriction = [ h.name for h in restriction ]
def subset(self, subset_pattern):
"""
Limits inventory results to a subset of inventory that matches a given
pattern, such as to select a given geographic of numeric slice amongst
a previous 'hosts' selection that only select roles, or vice versa.
Corresponds to --limit parameter to ansible-playbook
"""
if subset_pattern is None:
self._subset = None
else:
subset_patterns = Inventory.split_host_pattern(subset_pattern)
results = []
# allow Unix style @filename data
for x in subset_patterns:
if x.startswith("@"):
fd = open(x[1:])
results.extend(fd.read().split("\n"))
fd.close()
else:
results.append(x)
self._subset = results
def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def is_file(self):
"""
Did inventory come from a file? We don't use the equivalent loader
methods in inventory, due to the fact that the loader does an implict
DWIM on the path, which may be incorrect for inventory paths relative
to the playbook basedir.
"""
if not isinstance(self.host_list, string_types):
return False
return os.path.isfile(self.host_list) or self.host_list == os.devnull
def is_directory(self, path):
"""
Is the inventory host list a directory? Same caveat for here as with
the is_file() method above.
"""
if not isinstance(self.host_list, string_types):
return False
return os.path.isdir(path)
def basedir(self):
""" if inventory came from a file, what's the directory? """
dname = self.host_list
if self.is_directory(self.host_list):
dname = self.host_list
elif not self.is_file():
dname = None
else:
dname = os.path.dirname(self.host_list)
if dname is None or dname == '' or dname == '.':
dname = os.getcwd()
if dname:
dname = os.path.abspath(dname)
return dname
def src(self):
""" if inventory came from a file, what's the directory and file name? """
if not self.is_file():
return None
return self.host_list
def playbook_basedir(self):
""" returns the directory of the current playbook """
return self._playbook_basedir
def set_playbook_basedir(self, dir_name):
"""
sets the base directory of the playbook so inventory can use it as a
basedir for host_ and group_vars, and other things.
"""
# Only update things if dir is a different playbook basedir
if dir_name != self._playbook_basedir:
# we're changing the playbook basedir, so if we had set one previously
# clear the host/group vars entries from the VariableManager so they're
# not incorrectly used by playbooks from different directories
if self._playbook_basedir:
self._variable_manager.clear_playbook_hostgroup_vars_files(self._playbook_basedir)
self._playbook_basedir = dir_name
# get group vars from group_vars/ files
# TODO: excluding the new_pb_basedir directory may result in group_vars
# files loading more than they should, however with the file caching
# we do this shouldn't be too much of an issue. Still, this should
# be fixed at some point to allow a "first load" to touch all of the
# directories, then later runs only touch the new basedir specified
found_group_vars = self._find_group_vars_files(self._playbook_basedir)
if found_group_vars:
self._group_vars_files = self._group_vars_files.union(found_group_vars)
for group in self.groups.values():
self.get_group_vars(group)
found_host_vars = self._find_host_vars_files(self._playbook_basedir)
if found_host_vars:
self._host_vars_files = self._host_vars_files.union(found_host_vars)
# get host vars from host_vars/ files
for host in self.get_hosts():
self.get_host_vars(host)
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
def get_host_vars(self, host, new_pb_basedir=False, return_results=False):
""" Read host_vars/ files """
return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir, return_results=return_results)
def get_group_vars(self, group, new_pb_basedir=False, return_results=False):
""" Read group_vars/ files """
return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir, return_results=return_results)
def _find_group_vars_files(self, basedir):
""" Find group_vars/ files """
if basedir in ('', None):
basedir = './'
path = os.path.realpath(os.path.join(basedir, 'group_vars'))
found_vars = set()
if os.path.exists(path):
if os.path.isdir(path):
found_vars = set(os.listdir(to_text(path)))
else:
display.warning("Found group_vars that is not a directory, skipping: %s" % path)
return found_vars
def _find_host_vars_files(self, basedir):
""" Find host_vars/ files """
if basedir in ('', None):
basedir = './'
path = os.path.realpath(os.path.join(basedir, 'host_vars'))
found_vars = set()
if os.path.exists(path):
found_vars = set(os.listdir(to_text(path)))
return found_vars
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False, return_results=False):
"""
Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel
to the inventory base directory or in the same directory as the playbook. Variables in the playbook
dir will win over the inventory dir if files are in both.
"""
results = {}
scan_pass = 0
_basedir = self._basedir
_playbook_basedir = self._playbook_basedir
# look in both the inventory base directory and the playbook base directory
# unless we do an update for a new playbook base dir
if not new_pb_basedir and _playbook_basedir:
basedirs = [_basedir, _playbook_basedir]
else:
basedirs = [_basedir]
for basedir in basedirs:
# this can happen from particular API usages, particularly if not run
# from /usr/bin/ansible-playbook
if basedir in ('', None):
basedir = './'
scan_pass = scan_pass + 1
# it's not an eror if the directory does not exist, keep moving
if not os.path.exists(basedir):
continue
# save work of second scan if the directories are the same
if _basedir == _playbook_basedir and scan_pass != 1:
continue
# Before trying to load vars from file, check that the directory contains relvant file names
if host is None and any(map(lambda ext: group.name + ext in self._group_vars_files, C.YAML_FILENAME_EXTENSIONS)):
# load vars in dir/group_vars/name_of_group
base_path = to_text(os.path.abspath(os.path.join(to_bytes(basedir), b"group_vars/" + to_bytes(group.name))), errors='surrogate_or_strict')
host_results = self._variable_manager.add_group_vars_file(base_path, self._loader)
if return_results:
results = combine_vars(results, host_results)
elif group is None and any(map(lambda ext: host.name + ext in self._host_vars_files, C.YAML_FILENAME_EXTENSIONS)):
# same for hostvars in dir/host_vars/name_of_host
base_path = to_text(os.path.abspath(os.path.join(to_bytes(basedir), b"host_vars/" + to_bytes(host.name))), errors='surrogate_or_strict')
group_results = self._variable_manager.add_host_vars_file(base_path, self._loader)
if return_results:
results = combine_vars(results, group_results)
# all done, results is a dictionary of variables for this particular host.
return results
def refresh_inventory(self):
self.clear_pattern_cache()
self.clear_group_dict_cache()
self._hosts_cache = {}
self._vars_per_host = {}
self._vars_per_group = {}
self.groups = {}
self.parse_inventory(self.host_list)
| gpl-3.0 | -3,553,771,108,302,727,700 | 39.184455 | 155 | 0.582984 | false | 4.335837 | false | false | false |
kate-v-stepanova/TACA | taca/illumina/MiSeq_Runs.py | 2 | 3883 | import os
import re
import csv
import glob
import shutil
import gzip
import operator
import subprocess
from datetime import datetime
from taca.utils.filesystem import chdir, control_fastq_filename
from taca.illumina.HiSeq_Runs import HiSeq_Run
from taca.utils import misc
from flowcell_parser.classes import RunParametersParser, SampleSheetParser, RunParser, LaneBarcodeParser, DemuxSummaryParser
import logging
logger = logging.getLogger(__name__)
class MiSeq_Run(HiSeq_Run):
def __init__(self, path_to_run, configuration):
#constructor, it returns a MiSeq object only if the MiSeq run belongs to NGI facility, i.e., contains
#Application or production in the Description
super(MiSeq_Run, self).__init__( path_to_run, configuration)
self._set_sequencer_type()
self._set_run_type()
def _set_sequencer_type(self):
self.sequencer_type = "MiSeq"
def _set_run_type(self):
ssname = os.path.join(self.run_dir, 'Data', 'Intensities', 'BaseCalls','SampleSheet.csv')
if not os.path.exists(ssname):
#case in which no samplesheet is found, assume it is a non NGI run
self.run_type = "NON-NGI-RUN"
else:
#it SampleSheet exists try to see if it is a NGI-run
ssparser = SampleSheetParser(ssname)
if ssparser.header['Description'] == "Production" or ssparser.header['Description'] == "Applications":
self.run_type = "NGI-RUN"
else:
#otherwise this is a non NGI run
self.run_type = "NON-NGI-RUN"
def _get_samplesheet(self):
"""
Locate and parse the samplesheet for a run.
In MiSeq case this is located in FC_DIR/Data/Intensities/BaseCalls/SampleSheet.csv
"""
ssname = os.path.join(self.run_dir, 'Data', 'Intensities', 'BaseCalls','SampleSheet.csv')
if os.path.exists(ssname):
#if exists parse the SampleSheet
return ssname
else:
#some MiSeq runs do not have the SampleSheet at all, in this case assume they are non NGI.
#not real clean solution but what else can be done if no samplesheet is provided?
return None
def _generate_clean_samplesheet(self, ssparser):
"""
Will generate a 'clean' samplesheet, for bcl2fastq2.17
"""
output=""
#Header
output+="[Header]{}".format(os.linesep)
for field in ssparser.header:
output+="{},{}".format(field.rstrip(), ssparser.header[field].rstrip())
output+=os.linesep
#now parse the data section
data = []
for line in ssparser.data:
entry = {}
for field, value in line.iteritems():
if ssparser.dfield_sid in field:
entry[field] ='Sample_{}'.format(value)
elif ssparser.dfield_proj in field:
entry[field] = value.replace(".", "__")
else:
entry[field] = value
if 'Lane' not in entry:
entry['Lane'] = '1'
data.append(entry)
fields_to_output = ['Lane', ssparser.dfield_sid, ssparser.dfield_snm, 'index', ssparser.dfield_proj]
#now create the new SampleSheet data section
output+="[Data]{}".format(os.linesep)
for field in ssparser.datafields:
if field not in fields_to_output:
fields_to_output.append(field)
output+=",".join(fields_to_output)
output+=os.linesep
#now process each data entry and output it
for entry in data:
line = []
for field in fields_to_output:
line.append(entry[field])
output+=",".join(line)
output+=os.linesep
return output
| mit | 8,839,568,075,589,495,000 | 33.669643 | 124 | 0.594128 | false | 3.914315 | false | false | false |
ESOedX/edx-platform | common/djangoapps/util/request_rate_limiter.py | 2 | 1692 | """
A utility class which wraps the RateLimitMixin 3rd party class to do bad request counting
which can be used for rate limiting
"""
from __future__ import absolute_import
from django.conf import settings
from ratelimitbackend.backends import RateLimitMixin
class RequestRateLimiter(RateLimitMixin):
"""
Use the 3rd party RateLimitMixin to help do rate limiting.
"""
def is_rate_limit_exceeded(self, request):
"""
Returns if the client has been rated limited
"""
counts = self.get_counters(request)
return sum(counts.values()) >= self.requests
def tick_request_counter(self, request):
"""
Ticks any counters used to compute when rate limt has been reached
"""
self.cache_incr(self.get_cache_key(request))
class BadRequestRateLimiter(RequestRateLimiter):
"""
Default rate limit is 30 requests for every 5 minutes.
"""
pass
class PasswordResetEmailRateLimiter(RequestRateLimiter):
"""
Rate limiting requests to send password reset emails.
"""
email_rate_limit = getattr(settings, 'PASSWORD_RESET_EMAIL_RATE_LIMIT', {})
requests = email_rate_limit.get('no_of_emails', 1)
cache_timeout_seconds = email_rate_limit.get('per_seconds', 60)
reset_email_cache_prefix = 'resetemail'
def key(self, request, dt):
"""
Returns cache key.
"""
return '%s-%s-%s' % (
self.reset_email_cache_prefix,
self.get_ip(request),
dt.strftime('%Y%m%d%H%M'),
)
def expire_after(self):
"""
Returns timeout for cache keys.
"""
return self.cache_timeout_seconds
| agpl-3.0 | 8,674,315,781,843,019,000 | 27.677966 | 89 | 0.638298 | false | 3.971831 | false | false | false |
SarahPythonista/acmpy | examples/cs106a/breakout.py | 1 | 3149 | from spgl.graphics.gwindow import *
from spgl.graphics.gobjects import *
from spgl.graphics.gevents import *
from spgl.gtimer import *
from spgl.grandom import *
import time
window = GWindow()
window.setWindowTitle("Breakout")
# waitForClick()
ball = GOval(20,20, window.getWidth()/2, window.getHeight()/2)
ball.setFilled(True)
window.add(ball)
vx = 2.7
vy = 3.0
paddle = GRect(125, 15, window.getWidth()/2, window.getHeight() - 50)
paddle.setFilled(True)
window.add(paddle)
spacer = 5
recW = (window.getWidth() - (9*spacer)) / 10.0
for i in range(10):
for j in range(10):
rec = GRect(recW, 15, j*(recW + spacer), 50 + i * (15 + spacer))
rec.setFilled(True)
if(i<2):
rec.setColor(color = "RED")
elif(i<4):
rec.setColor(color = "ORANGE")
elif(i<6):
rec.setColor(color = "YELLOW")
elif(i<8):
rec.setColor(color = "GREEN")
elif(i<10):
rec.setColor(color = "BLUE")
window.add(rec)
timer = GTimer(milliseconds=15)
import sys
timer.start()
# sys.exit(0)
while(True):
e = getNextEvent()
if(e.getEventType() == EventType.MOUSE_MOVED):
newX = e.getX()
if(newX - paddle.getWidth()/2 > 0 and \
newX + paddle.getWidth()/2 < window.getWidth()):
paddle.setLocation(x = newX - paddle.getWidth()/2, y = paddle.getY())
elif(newX - paddle.getWidth()/2 < 0):
paddle.setLocation(x = 0, y = paddle.getY())
elif(newX + paddle.getWidth()/2 > window.getWidth()):
paddle.setLocation(x = window.getWidth() - paddle.getWidth(), \
y = paddle.getY())
elif(e.getEventType() == EventType.TIMER_TICKED):
ball.move(vx, vy)
# check for wall collisions
if(ball.getX() + ball.getWidth() > window.getWidth() or \
ball.getX() < 0):
vx = -vx
if(ball.getY() + ball.getHeight() > window.getHeight() or \
ball.getY() < 0):
vy = -vy
obj1 = window.getObjectAt(ball.getX()-1, ball.getY()-1)
obj2 = window.getObjectAt(ball.getX() + ball.getWidth() + 1, ball.getY()-1)
obj3 = window.getObjectAt(ball.getX()-1, ball.getY() + ball.getHeight()+1)
obj4 = window.getObjectAt(ball.getX() + ball.getWidth() + 1, ball.getY() + ball.getHeight()+1)
# check for paddle collisions
if(window.getObjectAt(ball.getX(), ball.getY()) == paddle or \
window.getObjectAt(ball.getX() + ball.getWidth(), ball.getY()) == paddle or \
window.getObjectAt(ball.getX(), ball.getY() + ball.getHeight()) == paddle or \
window.getObjectAt(ball.getX() + ball.getWidth(), ball.getY() + ball.getHeight()) == paddle):
if(vy > 0):
vy = -vy
elif(obj1 != None and obj1 != paddle):
vy = -vy
window.remove(obj1)
elif(obj2 != None and obj2 != paddle):
vy = -vy
window.remove(obj2)
elif(obj3 != None and obj3 != paddle):
vy = -vy
window.remove(obj3)
elif(obj4 != None and obj4 != paddle):
vy = -vy
window.remove(obj4)
elif(e.getEventType() == EventType.KEY_TYPED):
initRandomSeed()
window.remove(ball)
ball = GOval(20,20, window.getWidth()/2, window.getHeight()/2)
ball.setFilled(True)
window.add(ball)
vx = randomReal(2,4)
if(randomChance(.5)): vx = -vx
vy = 3.0
| mit | 2,413,356,645,683,590,000 | 28.872549 | 97 | 0.62496 | false | 2.58751 | false | false | false |
SanPen/PracticalGridModeling | examples/topology_engine.py | 1 | 22254 | import numpy as np
import pandas as pd
from scipy.sparse import csc_matrix, lil_matrix, diags
from JacobianBased import IwamotoNR
np.set_printoptions(linewidth=10000, precision=3)
# pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
class Graph:
"""
Program to count islands in boolean 2D matrix
"""
def __init__(self, row, col, g):
"""
:param row: number of columns
:param col: number of rows
:param g: adjacency matrix
"""
self.ROW = row
self.COL = col
self.graph = g
def is_safe(self, i, j, visited):
"""
A function to check if a given cell (row, col) can be included in DFS
:param i: row index
:param j: column index
:param visited: 2D array of visited elements
:return: if it is safe or not
"""
# row number is in range, column number is in range and value is 1 and not yet visited
return 0 >= i < self.ROW and 0 >= j < self.COL and not visited[i][j] and self.graph[i][j]
def dfs(self, i, j, visited):
"""
A utility function to do DFS for a 2D boolean matrix.
It only considers the 8 neighbours as adjacent vertices
:param i: row index
:param j: column index
:param visited: 2D array of visited elements
"""
# TODO: Use a proper DFS with sparsity considerations
# These arrays are used to get row and column numbers of 8 neighbours of a given cell
rowNbr = [-1, -1, -1, 0, 0, 1, 1, 1]
colNbr = [-1, 0, 1, -1, 1, -1, 0, 1]
# Mark this cell as visited
visited[i][j] = True
# Recur for all connected neighbours
for k in range(8):
if self.is_safe(i + rowNbr[k], j + colNbr[k], visited):
self.dfs(i + rowNbr[k], j + colNbr[k], visited)
def count_islands(self):
"""
The main function that returns count of islands in a given boolean 2D matrix
:return: count of islands
"""
# Make a bool array to mark visited cells. Initially all cells are unvisited
# TODO: Replace with sparse matrix
visited = [[False for j in range(self.COL)] for i in range(self.ROW)]
# Initialize count as 0 and traverse through the all cells of given matrix
count = 0
# TODO: replace with sparse version
for i in range(self.ROW):
for j in range(self.COL):
# If a cell with value 1 is not visited yet, then new island found
if not visited[i][j] and self.graph[i][j] == 1:
# Visit all cells in this island and increment island count
self.dfs(i, j, visited)
count += 1
return count
class Terminal:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class ConnectivityNode:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class ShuntDevice:
def __init__(self, name, terminal: Terminal):
self.name = name
self.terminal = terminal
def __str__(self):
return self.name
class Load(ShuntDevice):
def __init__(self, name, terminal: Terminal, P=0, Q=0):
ShuntDevice.__init__(self, name, terminal)
self.P = P
self.Q = Q
class Shunt(ShuntDevice):
def __init__(self, name, terminal: Terminal, G=0, B=0):
ShuntDevice.__init__(self, name, terminal)
self.G = G
self.B = B
class Generator(ShuntDevice):
def __init__(self, name, terminal: Terminal, P=0, Vset=0):
ShuntDevice.__init__(self, name, terminal)
self.P = P
self.Vset = Vset
class Branch:
def __init__(self, name, t1, t2):
self.name = name
self.t1 = t1
self.t2 = t2
def get_y(self):
return 100.0, 0.0, 0.0, 100.0
def __str__(self):
return self.name
class Jumper(Branch):
def __init__(self, name, t1, t2):
Branch.__init__(self, name, t1, t2)
class Switch(Branch):
def __init__(self, name, t1, t2, state=True):
Branch.__init__(self, name, t1, t2)
self.state = state
class Line(Branch):
def __init__(self, name, t1, t2, r=0, x=0, r0=0, x0=0, g=0, b=0, g0=0, b0=0, length=1, tap_module=1.0, tap_angle=0):
Branch.__init__(self, name, t1, t2)
self.r = r
self.x = x
self.r0 = r0
self.x0 = x0
self.g = g
self.b = b
self.g0 = g0
self.b0 = b0
self.length = length
self.tap_module = tap_module
self.tap_angle = tap_angle
def get_y(self):
tap = self.tap_module * np.exp(-1j * self.tap_angle)
Ysh = complex(self.g * self.length, self.b * self.length) / 2
if self.r > 0 or self.x:
Ys = 1 / complex(self.r * self.length, self.x * self.length)
else:
raise ValueError("The impedance at " + self.name + " is zero")
Ytt = Ys + Ysh
Yff = Ytt / (tap * np.conj(tap))
Yft = - Ys / np.conj(tap)
Ytf = - Ys / tap
return Yff, Yft, Ytf, Ytt
class Connectivity:
def __init__(self, n_terminals, n_nodes, n_br, n_sw, n_ld, n_gen, n_sh, Sbase):
"""
Constructor
:param n_terminals: number of terminals
:param n_nodes: number of nodes
:param n_br: number of branches
:param n_sw: number of switches
:param n_ld: number of loads
:param n_gen: number of generators
:param n_sh: number of shunts
"""
self.Sbase = Sbase
# connectivity nodes - terminals matrix
self.CN_T = lil_matrix((n_nodes, n_terminals), dtype=int)
# lines, transformers and jumpers to terminals matrix
self.BR_T_f = lil_matrix((n_br, n_terminals), dtype=int)
self.BR_T_t = lil_matrix((n_br, n_terminals), dtype=int)
# switches - terminals matrix
self.SW_T = lil_matrix((n_sw, n_terminals), dtype=int)
self.SW_states = np.zeros(n_sw, dtype=int)
# shunt elements (loads, shunts, generators)
self.LD_T = lil_matrix((n_ld, n_terminals), dtype=int)
self.GEN_T = lil_matrix((n_gen, n_terminals), dtype=int)
self.SH_T = lil_matrix((n_sh, n_terminals), dtype=int)
# admittance components vectors
self.BR_yff = np.zeros(n_br, dtype=complex)
self.BR_yft = np.zeros(n_br, dtype=complex)
self.BR_ytf = np.zeros(n_br, dtype=complex)
self.BR_ytt = np.zeros(n_br, dtype=complex)
# load generation and shunts
self.LD_Power = np.zeros(n_ld, dtype=complex)
self.Gen_Power = np.zeros(n_gen, dtype=float)
self.Gen_voltage = np.zeros(n_gen, dtype=float)
self.SH_Power = np.zeros(n_sh, dtype=complex)
# names
self.T_names = [None] * n_terminals
self.CN_names = [None] * n_nodes
self.BR_names = [None] * n_br
self.SW_names = [None] * n_sw
self.LD_names = [None] * n_ld
self.GEN_names = [None] * n_gen
self.SH_names = [None] * n_sh
# resulting matrices
self.BR_CN = None # nodes - branch
self.CN_CN = None # node - node
self.SW_T_state = None # switch - terminal with the switches state applied
self.BR_SW_f = None # branch - switch
self.BR_SW_t = None # branch - switch
self.CN_SW = None # node - switch
self.LD_CN = None # load - node
self.GEN_CN = None # generator - node
self.SH_CN = None # shunt - node
# resulting matrices
self.Cf = None
self.Ct = None
self.Yf = None
self.Yt = None
self.Ybus = None
self.Ysh = None
self.Sbus = None
self.Ibus = None
self.Vbus = None
self.types = None
self.pq = None
self.pv = None
self.ref = None
def compute(self):
"""
Compute the cross connectivity matrices to determine the circuit connectivity towards the calculation
Additionally, compute the calculation matrices
"""
# --------------------------------------------------------------------------------------------------------------
# Connectivity matrices
# --------------------------------------------------------------------------------------------------------------
# switches connectivity matrix with the switches state applied
self.SW_T_state = diags(self.SW_states) * self.SW_T
# Branch-Switch connectivity matrix
self.BR_SW_f = self.BR_T_f * self.SW_T_state.transpose()
self.BR_SW_t = self.BR_T_t * self.SW_T_state.transpose()
# Node-Switch connectivity matrix
self.CN_SW = self.CN_T * self.SW_T_state.transpose()
# load-Connectivity Node matrix
self.LD_CN = self.LD_T * self.CN_T.transpose()
# generator-Connectivity Node matrix
self.GEN_CN = self.GEN_T * self.CN_T.transpose()
# shunt-Connectivity Node matrix
self.SH_CN = self.SH_T * self.CN_T.transpose()
# branch-node connectivity matrix (Equals A^t)
# A branch and a node can be connected via a switch or directly
self.Cf = self.CN_SW * self.BR_SW_f.transpose() + self.CN_T * self.BR_T_f.transpose()
self.Ct = self.CN_SW * self.BR_SW_t.transpose() + self.CN_T * self.BR_T_t.transpose()
self.BR_CN = (self.Cf - self.Ct).transpose()
# node-node connectivity matrix
self.CN_CN = self.BR_CN.transpose() * self.BR_CN
self.CN_CN = self.CN_CN.astype(bool).astype(int)
# --------------------------------------------------------------------------------------------------------------
# Calculation matrices
# --------------------------------------------------------------------------------------------------------------
# form the power injections vector
PD = self.LD_CN.transpose() * self.LD_Power # demand (complex)
PG = self.GEN_CN.transpose() * self.Gen_Power # generation (real)
self.Sbus = (PG - PD) / self.Sbase
self.Ibus = np.zeros_like(self.Sbus)
# types logic:
# if the number is < 10 -> PQ
# if the number is >= 10 -> PV
# later, choose a PV gen as Slack
self.types = (self.LD_CN.sum(axis=0).A1 + self.GEN_CN.sum(axis=0).A1 * 10).reshape(-1)
# Voltage vector
# self.Vbus = self.GEN_CN.transpose() * self.Gen_voltage
self.Vbus = np.ones_like(self.Sbus)
# form the shunt vector
self.Ysh = self.SH_CN.transpose() * self.SH_Power
# form the admittance matrix
self.Yf = diags(self.BR_yff) * self.Cf.transpose() + diags(self.BR_yft) * self.Ct.transpose()
self.Yt = diags(self.BR_ytf) * self.Cf.transpose() + diags(self.BR_ytt) * self.Ct.transpose()
self.Ybus = self.Cf * self.Yf + self.Ct * self.Yt + diags(self.Ysh)
self.pq = np.where(self.types < 10)[0]
self.pv = np.where(self.types >= 10)[0]
if self.ref is None:
self.ref = self.pv[0]
self.pv = self.pv[:-1] # pick all bu the first, which is not a ref
def print(self):
"""
print the connectivity matrices
:return:
"""
print('\nCN_T\n', pd.DataFrame(self.CN_T.todense(), index=self.CN_names, columns=self.T_names).to_latex())
print('\nBR_T_f\n', pd.DataFrame(self.BR_T_f.todense(), index=self.BR_names, columns=self.T_names).to_latex())
print('\nBR_T_t\n', pd.DataFrame(self.BR_T_t.todense(), index=self.BR_names, columns=self.T_names).to_latex())
print('\nSW_T\n', pd.DataFrame(self.SW_T.todense(), index=self.SW_names, columns=self.T_names).to_latex())
print('\nSW_states\n', pd.DataFrame(self.SW_states, index=self.SW_names, columns=['States']).to_latex())
# resulting
print('\n\n' + '-' * 40 + ' RESULTS ' + '-' * 40 + '\n')
print('\nLD_CN\n', pd.DataFrame(self.LD_CN.todense(), index=self.LD_names, columns=self.CN_names).to_latex())
print('\nSH_CN\n', pd.DataFrame(self.SH_CN.todense(), index=self.SH_names, columns=self.CN_names).to_latex())
print('\nGEN_CN\n', pd.DataFrame(self.GEN_CN.todense(), index=self.GEN_names, columns=self.CN_names).to_latex())
print('\nBR_CN\n', pd.DataFrame(self.BR_CN.astype(int).todense(), index=self.BR_names, columns=self.CN_names).to_latex())
print('\nCN_CN\n', pd.DataFrame(self.CN_CN.todense(), index=self.CN_names, columns=self.CN_names).to_latex())
print('\ntypes\n', self.types)
print('\nSbus\n', self.Sbus)
print('\nVbus\n', self.Vbus)
print('\nYsh\n', self.Ysh)
print('\nYbus\n', self.Ybus.todense())
class Circuit:
def __init__(self, Sbase=100):
"""
Circuit constructor
"""
self.Sbase = Sbase
self.connectivity_nodes = list()
self.terminals = list()
self.switches = list()
self.branches = list()
self.jumpers = list()
self.loads = list()
self.shunts = list()
self.generators = list()
self.nodes_idx = dict()
self.terminals_idx = dict()
# relations between connectivity nodes and terminals
# node_terminal[some_node] = list of terminals
self.node_terminal = dict()
def add_node_terminal_relation(self, connectivity_node, terminal):
"""
Add the relation between a Connectivity Node and a Terminal
:param terminal:
:param connectivity_node:
:return:
"""
if connectivity_node in self.node_terminal.keys():
self.node_terminal[connectivity_node].append(terminal)
else:
self.node_terminal[connectivity_node] = [terminal]
def add_connectivity_node(self, node):
"""
add a Connectivity node
:param node:
:return:
"""
self.connectivity_nodes.append(node)
def add_terminal(self, terminal):
self.terminals.append(terminal)
def add_switch(self, switch):
"""
Add a switch
:param switch:
:return:
"""
self.switches.append(switch)
def add_branch(self, branch):
"""
Add a branch
:param branch:
:return:
"""
self.branches.append(branch)
def add_jumper(self, jumper):
"""
:param jumper:
"""
self.jumpers.append(jumper)
def add_load(self, load):
"""
:param load:
"""
self.loads.append(load)
def add_shunt(self, shunt):
"""
:param shunt:
"""
self.shunts.append(shunt)
def add_generator(self, generator):
"""
:param generator:
"""
self.generators.append(generator)
def load_file(self, fname):
"""
Load file
:param fname: file name
"""
xls = pd.ExcelFile(fname)
# Terminals
T_dict = dict()
df = pd.read_excel(xls, 'Terminals')
for i in range(df.shape[0]):
val = df.values[i, 0]
T = Terminal(val)
T_dict[val] = T
self.add_terminal(T)
# ConnectivityNodes
CN_dict = dict()
df = pd.read_excel(xls, 'ConnectivityNodes')
for i in range(df.shape[0]):
val = df.values[i, 0]
CN = ConnectivityNode(val)
CN_dict[val] = CN
self.add_connectivity_node(CN)
# Branches
df = pd.read_excel(xls, 'Branches')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
T2 = T_dict[df.values[i, 2]]
r = df.values[i, 3]
x = df.values[i, 4]
r0 = df.values[i, 5]
x0 = df.values[i, 6]
g = df.values[i, 7]
b = df.values[i, 8]
g0 = df.values[i, 9]
b0 = df.values[i, 10]
l = df.values[i, 11]
self.add_branch(Line(df.values[i, 0], T1, T2, r, x, r0, x0, g, b, g0, b0, l))
df = pd.read_excel(xls, 'Jumpers')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
T2 = T_dict[df.values[i, 2]]
self.add_branch(Jumper(df.values[i, 0], T1, T2))
# Switches
df = pd.read_excel(xls, 'Switches')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
T2 = T_dict[df.values[i, 2]]
state = bool(df.values[i, 3])
self.add_switch(Switch(df.values[i, 0], T1, T2, state))
# Loads
df = pd.read_excel(xls, 'Loads')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
p = df.values[i, 2]
q = df.values[i, 3]
self.add_load(Load(df.values[i, 0], T1, p, q))
# shunts
df = pd.read_excel(xls, 'Shunts')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
g = df.values[i, 2]
b = df.values[i, 3]
self.add_shunt(Shunt(df.values[i, 0], T1, g, b))
# Generators
df = pd.read_excel(xls, 'Generators')
for i in range(df.shape[0]):
T1 = T_dict[df.values[i, 1]]
p = df.values[i, 2]
vset = df.values[i, 3]
self.add_generator(Generator(df.values[i, 0], T1, p, vset))
# CN_T
df = pd.read_excel(xls, 'CN_T')
for i in range(df.shape[0]):
CN = CN_dict[df.values[i, 0]]
T = T_dict[df.values[i, 1]]
self.add_node_terminal_relation(CN, T)
def compile(self):
"""
Compile the circuit
"""
n_nodes = len(self.connectivity_nodes)
n_terminals = len(self.terminals)
n_br = len(self.branches) + len(self.jumpers)
n_sw = len(self.switches)
n_ld = len(self.loads)
n_gen = len(self.generators)
n_sh = len(self.shunts)
self.nodes_idx = dict() # dictionary of node object -> node index
self.terminals_idx = dict() # dictionary of terminals -> terminal index
conn = Connectivity(n_terminals=n_terminals,
n_nodes=n_nodes,
n_br=n_br,
n_sw=n_sw,
n_ld=n_ld,
n_gen=n_gen,
n_sh=n_sh,
Sbase=self.Sbase)
# Terminals
for i, terminal in enumerate(self.terminals):
self.terminals_idx[terminal] = i
conn.T_names[i] = terminal.name
# Connectivity Nodes
for i, node in enumerate(self.connectivity_nodes):
self.nodes_idx[node] = i
conn.CN_names[i] = node.name
terminals = self.node_terminal[node]
for terminal in terminals:
j = self.terminals_idx[terminal]
conn.CN_T[i, j] = 1
# Switches
for i, switch in enumerate(self.switches):
j = self.terminals_idx[switch.t1]
conn.SW_T[i, j] = 1
j = self.terminals_idx[switch.t2]
conn.SW_T[i, j] = 1
conn.SW_states[i] = int(switch.state)
conn.SW_names[i] = switch.name
# Branches (lines, transformers and jumpers)
for i, branch in enumerate(self.branches):
# from
f = self.terminals_idx[branch.t1]
conn.BR_T_f[i, f] = 1
# to
t = self.terminals_idx[branch.t2]
conn.BR_T_t[i, t] = 1
# name
conn.BR_names[i] = branch.name
# branch admittances
yff, yft, ytf, ytt = branch.get_y()
conn.BR_yff[i] = yff
conn.BR_yft[i] = yft
conn.BR_ytf[i] = ytf
conn.BR_ytt[i] = ytt
# Loads
for i, load in enumerate(self.loads):
j = self.terminals_idx[load.terminal]
conn.LD_T[i, j] = 1
conn.LD_names[i] = load.name
conn.LD_Power[i] = complex(load.P, load.Q)
# Generators
for i, generator in enumerate(self.generators):
j = self.terminals_idx[generator.terminal]
conn.GEN_T[i, j] = 1
conn.GEN_names[i] = generator.name
conn.Gen_Power[i] = generator.P
conn.Gen_voltage[i] = generator.Vset
# Shunts
for i, shunt in enumerate(self.shunts):
j = self.terminals_idx[shunt.terminal]
conn.SH_T[i, j] = 1
conn.SH_names[i] = shunt.name
conn.SH_Power[i] = complex(shunt.G, shunt.B)
# compute topology
conn.compute()
return conn
class PowerFlow:
def __init__(self, circuit: Circuit):
self.circuit = circuit
def run(self):
"""
Run power flow
:return:
"""
# compile circuit
conn = self.circuit.compile()
# run power flow
V, converged, normF, Scalc, iter_, elapsed = IwamotoNR(Ybus=conn.Ybus,
Sbus=conn.Sbus,
V0=conn.Vbus,
Ibus=conn.Ibus,
pv=conn.pv,
pq=conn.pq,
tol=conn.ref,
max_it=15,
robust=False)
return V
if __name__ == '__main__':
circuit = Circuit()
# circuit.load_file('substation_data.xlsx')
circuit.load_file('lynn5.xlsx')
conn_ = circuit.compile()
conn_.print()
pf = PowerFlow(circuit)
Vsol = pf.run()
print('\nVsol:', np.abs(Vsol))
| gpl-3.0 | -660,275,719,816,353,900 | 29.822715 | 129 | 0.517795 | false | 3.464197 | false | false | false |
jmuhlich/bayessb | examples/robertson/figs_msb_paper.py | 1 | 10106 | import bayessb
from pysb.examples.robertson import model
import pysb.integrate
import numpy
import matplotlib.pyplot as plt
import matplotlib.gridspec as mgridspec
import matplotlib.ticker as mticker
import functools
import sys
def likelihood(mcmc, position, data, scale_factor, sigma):
yout = mcmc.simulate(position)
yout_norm = yout / scale_factor
# fit to first two species
return numpy.sum((data[:,0:2] - yout_norm[:,0:2]) ** 2 / (2 * sigma ** 2))
def prior(mcmc, position):
est = [1e-2, 1e7, 1e4]
mean = numpy.log10(est)
var = 10
return numpy.sum((position - mean) ** 2 / ( 2 * var))
def step(mcmc):
if mcmc.iter % 20 == 0:
print 'iter=%-5d sigma=%-.3f T=%-.3f acc=%-.3f, lkl=%g prior=%g post=%g' % \
(mcmc.iter, mcmc.sig_value, mcmc.T, float(mcmc.acceptance)/(mcmc.iter+1), mcmc.accept_likelihood,
mcmc.accept_prior, mcmc.accept_posterior)
def scatter(mcmc, mask=True, example_pos_r=None, example_pos_g=None,
show_model=False):
"""
Display a grid of scatter plots for each 2-D projection of an MCMC walk.
Parameters
----------
mcmc : bayessb.MCMC
The MCMC object to display.
mask : bool/int, optional
If True (default) the annealing phase of the walk will be discarded
before plotting. If False, nothing will be discarded and all points will
be plotted. If an integer, specifies the number of steps to be discarded
from the beginning of the walk.
"""
# number of dimensions in position vector
ndims = mcmc.num_estimate
# vector of booleans indicating accepted MCMC moves
accepts = mcmc.accepts.copy()
# mask off the annealing (burn-in) phase, or up to a user-specified step
if mask is True:
mask = mcmc.options.anneal_length
if mask is False:
mask = 0
accepts[0:mask] = 0
# grab position vectors and posterior values from accepted moves
positions = mcmc.positions[accepts]
posteriors = mcmc.posteriors[accepts]
# calculate actual range of values on each dimension
maxes = positions.max(0)
mins = positions.min(0)
ranges = abs(maxes - mins)
# use 2% of the maximum range as a margin for all scatter plots
margin = max(ranges) * 0.02
# calculate upper and lower plot limits based on min/max plus the margin
lims_top = maxes + margin
lims_bottom = mins - margin
# calculate new ranges based on limits
lim_ranges = abs(lims_top - lims_bottom)
plt.figure()
# build a GridSpec which allocates space based on these ranges
import matplotlib.gridspec as mgridspec
gs = mgridspec.GridSpec(ndims, ndims, width_ratios=lim_ranges,
height_ratios=lim_ranges[-1::-1])
# build an axis locator for each dimension
locators = []
for i, r in enumerate(lim_ranges):
# place ticks on the integers, unless there is no integer within the
# given dimension's calculated range
nbins = numpy.ceil(r) * 5 + 1
locators.append(mticker.MaxNLocator(nbins=nbins, steps=[2, 10]))
fignum = 0
# reverse the param list along the y axis so we end up with the "origin"
# (i.e. the first param) at the bottom left instead of the top left. note
# that y==0 will be along the bottom now, but the figure numbers in the
# gridspec still begin counting at the top.
for y, py in reversed(list(enumerate(mcmc.options.estimate_params))):
for x, px in enumerate(mcmc.options.estimate_params):
ax = plt.subplot(gs[fignum])
ax.tick_params(left=False, right=True, top=True, bottom=False,
labelleft=False, labelright=False, labeltop=False,
labelbottom=False, direction='in')
ax.yaxis.set_major_locator(locators[y])
ax.xaxis.set_major_locator(locators[x])
if x == y:
# 1-D histograms along the diagonal
#
# distribute 200 total bins across all histograms,
# proportionally by their width, such that the bin density looks
# consistent across the different histograms
bins = 200 * lim_ranges[x] / numpy.sum(lim_ranges)
ax.hist(positions[:,x], bins=bins, histtype='stepfilled',
color='salmon', ec='tomato')
if example_pos_r is not None:
ax.vlines(example_pos_r[x], *ax.get_ylim(),
color='red', linewidth=2)
if example_pos_g is not None:
ax.vlines(example_pos_g[x], *ax.get_ylim(),
color='green', linewidth=2)
arrow_scale = ax.get_ylim()[1] / lim_ranges[x]
arrow_len = arrow_scale * 0.1
arrow_head_l = arrow_len * 0.4
arrow_head_w = min(lim_ranges) * .1
ax.arrow(numpy.log10(px.value), arrow_len, 0, -arrow_len,
head_length=arrow_head_l, head_width=arrow_head_w,
ec='k', fc='k', length_includes_head=True)
ax.set_xlim(lims_bottom[x], lims_top[x])
#ax.yaxis.set_major_locator(mticker.NullLocator())
ax.yaxis.set_major_locator(mticker.LinearLocator())
else:
# 2-D scatter plots off the diagonal
ax.plot(positions[:, x], positions[:, y], color='darkblue',
alpha=0.2)
ax.scatter(positions[:, x], positions[:, y], s=1, color='darkblue',
alpha=0.2)
ax.set_xlim(lims_bottom[x], lims_top[x])
ax.set_ylim(lims_bottom[y], lims_top[y])
# parameter name labels along left and bottom edge of the grid
if x == 0:
ax.set_ylabel(py.name, weight='black', size='large',
labelpad=10, rotation='horizontal',
horizontalalignment='right')
if y == 0:
ax.set_xlabel(px.name, weight='black', size='large',
labelpad=10,)
# tick labels along the right and top edge of the grid
if True:#x == ndims - 1: # XXX
ax.tick_params('y', labelright=True)
if y == ndims - 1:
ax.tick_params('x', labeltop=True)
# move to next figure in the gridspec
fignum += 1
# TODO: would axis('scaled') force the aspect ratio we want?
def prediction(mcmc, n, species_idx, scale_factor, data_std, plot_samples=False):
plt.figure()
positions = mcmc.positions[-n:]
accepts = mcmc.accepts[-n:]
accept_positions = positions[accepts]
tspan = mcmc.options.tspan
ysamples = numpy.empty((len(accept_positions), len(tspan)))
for i, pos in enumerate(accept_positions):
ysim = mcmc.simulate(pos)
ysamples[i] = ysim[:, species_idx] / scale_factor
ymean = numpy.mean(ysamples, 0)
ystd = numpy.std(ysamples, 0)
if plot_samples:
for y in ysamples:
plt.plot(tspan, y, c='gray', alpha=.01)
plt.plot(tspan, ymean, 'b:', linewidth=2)
std_interval = ystd[:, None] * [+1, -1]
plt.plot(tspan, ymean[:, None] + std_interval * 0.842, 'g-.', linewidth=2)
plt.plot(tspan, ymean[:, None] + std_interval * 1.645, 'k-.', linewidth=2)
plt.errorbar(tspan, ymean, yerr=data_std, fmt=None, ecolor='red')
plt.xlim(tspan[0] - 1, tspan[-1] + 1)
def data(mcmc, data_norm, scale_factor, data_species_idxs):
plt.figure()
colors = ('r', 'g', 'b')
labels = ('A', 'B', 'C')
tspan = mcmc.options.tspan
true_pos = numpy.log10([p.value for p in mcmc.options.estimate_params])
true_norm = mcmc.simulate(true_pos) / scale_factor
for i, (rl, dl, c, l) in enumerate(zip(true_norm.T, data_norm.T,
colors, labels)):
plt.plot(tspan, rl, color=c, label=l)
if i in data_species_idxs:
plt.plot(tspan, dl, linestyle=':', marker='o', color=c, ms=4, mew=0)
def main():
seed = 2
random = numpy.random.RandomState(seed)
sigma = 0.1;
ntimes = 20;
tspan = numpy.linspace(0, 40, ntimes);
solver = pysb.integrate.Solver(model, tspan)
solver.run()
ydata = solver.y * (random.randn(*solver.y.shape) * sigma + 1);
ysim_max = solver.y.max(0)
ydata_norm = ydata / ysim_max
opts = bayessb.MCMCOpts()
opts.model = model
opts.tspan = tspan
# estimate rates only (not initial conditions) from wild guesses
opts.estimate_params = [p for p in model.parameters if p.name.startswith('k') ]
opts.initial_values = [1e-4, 1e3, 1e6]
opts.nsteps = 10000
opts.likelihood_fn = functools.partial(likelihood, data=ydata_norm,
scale_factor=ysim_max, sigma=sigma)
opts.prior_fn = prior
opts.step_fn = step
opts.use_hessian = True
opts.hessian_period = opts.nsteps / 10
opts.seed = seed
mcmc = bayessb.MCMC(opts)
mcmc.run()
mixed_nsteps = opts.nsteps / 2
mixed_positions = mcmc.positions[-mixed_nsteps:]
mixed_accepts = mcmc.accepts[-mixed_nsteps:]
mixed_accept_positions = mixed_positions[mixed_accepts]
marginal_mean_pos = numpy.mean(mixed_accept_positions, 0)
# position is far from marginal mean, but posterior is good (determined by
# trial and error and some interactive plotting)
interesting_step = 8830
print "\nGenerating figures..."
# show scatter plot
scatter(mcmc, opts.nsteps / 2, mcmc.positions[interesting_step],
marginal_mean_pos)
# show prediction for C trajectory, which was not fit to
prediction(mcmc, opts.nsteps / 2, 2, ysim_max[2], sigma, plot_samples=True)
plt.title("Prediction for C")
# show "true" trajectories and noisy data
data(mcmc, ydata_norm, ysim_max, [0, 1])
plt.title("True trajectories and noisy data")
# show all plots at once
plt.show()
if __name__ == '__main__':
main()
| bsd-2-clause | -3,516,009,876,200,924,000 | 40.93361 | 109 | 0.596279 | false | 3.475241 | false | false | false |
fraser-lab/EMRinger | Phenix_Scripts/em_rscc.py | 1 | 4393 |
"""
Script to calculate per-residue RSCCs for a model versus an EM map with an
arbitrary origin.
"""
from __future__ import division
from mmtbx import real_space_correlation
import iotbx.phil
from cctbx import crystal
from cctbx import maptbx
from scitbx.array_family import flex
import sys
master_phil_str = """
model = None
.type = path
map = None
.type = path
d_min = 3.0
.type = float
.help = Optional cutoff resolution for computing F(calc). This will not \
affect the dimensions of the ultimate FC map.
atom_radius = 1.5
.type = float
"""
def run (args, out=sys.stdout) :
cmdline = iotbx.phil.process_command_line_with_files(
args=args,
master_phil_string=master_phil_str,
pdb_file_def="model",
map_file_def="map",
usage_string="""\
em_rscc.py model.pdb map.ccp4
%s""" % __doc__)
params = cmdline.work.extract()
assert (not None in [params.model, params.map])
pdb_in = cmdline.get_file(params.model).file_object
m = cmdline.get_file(params.map).file_object
print >> out, "Input electron density map:"
print >> out, "m.all() :", m.data.all()
print >> out, "m.focus() :", m.data.focus()
print >> out, "m.origin():", m.data.origin()
print >> out, "m.nd() :", m.data.nd()
print >> out, "m.size() :", m.data.size()
print >> out, "m.focus_size_1d():", m.data.focus_size_1d()
print >> out, "m.is_0_based() :", m.data.is_0_based()
print >> out, "map: min/max/mean:", flex.min(m.data), flex.max(m.data), flex.mean(m.data)
print >> out, "unit cell:", m.unit_cell_parameters
symm = crystal.symmetry(
space_group_symbol="P1",
unit_cell=m.unit_cell_parameters)
xrs = pdb_in.input.xray_structure_simple(crystal_symmetry=symm)
print >> out, "Setting up electron scattering table (d_min=%g)" % params.d_min
xrs.scattering_type_registry(
d_min=params.d_min,
table="electron")
fc = xrs.structure_factors(d_min=params.d_min).f_calc()
cg = maptbx.crystal_gridding(
unit_cell=symm.unit_cell(),
space_group_info=symm.space_group_info(),
pre_determined_n_real=m.data.all())
fc_map = fc.fft_map(
crystal_gridding=cg).apply_sigma_scaling().real_map_unpadded()
assert (fc_map.all() == fc_map.focus() == m.data.all())
em_data = m.data.as_double()
unit_cell_for_interpolation = m.grid_unit_cell()
frac_matrix = unit_cell_for_interpolation.fractionalization_matrix()
sites_cart = xrs.sites_cart()
sites_frac = xrs.sites_frac()
print >> out, "PER-RESIDUE CORRELATION:"
for chain in pdb_in.hierarchy.only_model().chains() :
for residue_group in chain.residue_groups() :
i_seqs = residue_group.atoms().extract_i_seq()
values_em = flex.double()
values_fc = flex.double()
for i_seq in i_seqs :
rho_em = maptbx.non_crystallographic_eight_point_interpolation(
map=em_data,
gridding_matrix=frac_matrix,
site_cart=sites_cart[i_seq])
rho_fc = fc_map.eight_point_interpolation(sites_frac[i_seq])
values_em.append(rho_em)
values_fc.append(rho_fc)
cc = flex.linear_correlation(x=values_em, y=values_fc).coefficient()
print >> out, residue_group.id_str(), cc
def exercise () :
import mmtbx.regression
from iotbx import file_reader
from cStringIO import StringIO
pdb_file = "tmp_em_rscc.pdb"
map_file = "tmp_em_rscc.map"
f = open(pdb_file, "w")
for line in mmtbx.regression.model_1yjp.splitlines() :
if line.startswith("ATOM") :
f.write(line + "\n")
f.close()
pdb_in = file_reader.any_file(pdb_file).file_object
symm = crystal.symmetry(
space_group_symbol="P1",
unit_cell=(30, 30, 30, 90, 90, 90))
xrs = pdb_in.input.xray_structure_simple(crystal_symmetry=symm)
xrs.scattering_type_registry(
d_min=3.0,
table="electron")
fc = xrs.structure_factors(d_min=3.0).f_calc()
fft_map = fc.fft_map(resolution_factor=1/3).apply_sigma_scaling()
assert (fft_map.n_real() == (32,32,32))
fft_map.as_ccp4_map(
file_name=map_file,
gridding_first=(-16,-16,-16),
gridding_last=(15,15,15))
out = StringIO()
run(args=[pdb_file, map_file], out=out)
assert ("""\
PER-RESIDUE CORRELATION:
A 1 1.0
A 2 1.0
A 3 1.0
A 4 1.0
A 5 1.0
A 6 1.0
A 7 1.0
""" in out.getvalue()), out.getvalue()
if (__name__ == "__main__") :
if ("--test" in sys.argv) :
exercise()
print "OK"
else :
run(sys.argv[1:])
| bsd-3-clause | 266,659,448,091,182,000 | 31.301471 | 91 | 0.642841 | false | 2.768116 | false | false | false |
jzbontar/orange-tree | Orange/widgets/evaluate/owliftcurve.py | 1 | 8101 | """
Lift Curve Widget
-----------------
"""
from collections import namedtuple
import numpy
import sklearn.metrics as skl_metrics
from PyQt4 import QtGui
from PyQt4.QtGui import QColor, QPen
from PyQt4.QtCore import Qt
import pyqtgraph as pg
import Orange.data
import Orange.evaluation.testing
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import colorpalette, colorbrewer
from .owrocanalysis import convex_hull
CurvePoints = namedtuple(
"CurvePoints",
["cases", "tpr", "thresholds"]
)
CurvePoints.is_valid = property(lambda self: self.cases.size > 0)
LiftCurve = namedtuple(
"LiftCurve",
["points", "hull"]
)
LiftCurve.is_valid = property(lambda self: self.points.is_valid)
def LiftCurve_from_results(results, clf_index, target):
x, y, thresholds = lift_curve_from_results(results, target, clf_index)
points = CurvePoints(x, y, thresholds)
hull = CurvePoints(*convex_hull([(x, y, thresholds)]))
return LiftCurve(points, hull)
PlotCurve = namedtuple(
"PlotCurve",
["curve",
"curve_item",
"hull_item"]
)
class OWLiftCurve(widget.OWWidget):
name = "Lift Curve"
description = ""
icon = "icons/LiftCurve.svg"
priority = 1020
inputs = [
{"name": "Evaluation Results",
"type": Orange.evaluation.testing.Results,
"handler": "set_results"}
]
target_index = settings.Setting(0)
selected_classifiers = settings.Setting([])
display_convex_hull = settings.Setting(False)
display_cost_func = settings.Setting(True)
fp_cost = settings.Setting(500)
fn_cost = settings.Setting(500)
target_prior = settings.Setting(50.0)
def __init__(self, parent=None):
super().__init__(parent)
self.results = None
self.classifier_names = []
self.colors = []
self._curve_data = {}
box = gui.widgetBox(self.controlArea, "Plot")
tbox = gui.widgetBox(box, "Target Class")
tbox.setFlat(True)
self.target_cb = gui.comboBox(
tbox, self, "target_index", callback=self._on_target_changed)
cbox = gui.widgetBox(box, "Classifiers")
cbox.setFlat(True)
self.classifiers_list_box = gui.listBox(
cbox, self, "selected_classifiers", "classifier_names",
selectionMode=QtGui.QListView.MultiSelection,
callback=self._on_classifiers_changed)
gui.checkBox(box, self, "display_convex_hull",
"Show lift convex hull", callback=self._replot)
self.plotview = pg.GraphicsView(background="w")
self.plotview.setFrameStyle(QtGui.QFrame.StyledPanel)
self.plot = pg.PlotItem()
self.plot.getViewBox().setMenuEnabled(False)
pen = QPen(self.palette().color(QtGui.QPalette.Text))
tickfont = QtGui.QFont(self.font())
tickfont.setPixelSize(max(int(tickfont.pixelSize() * 2 // 3), 11))
axis = self.plot.getAxis("bottom")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("P Rate")
axis = self.plot.getAxis("left")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("TP Rate")
self.plot.showGrid(True, True, alpha=0.1)
self.plot.setRange(xRange=(0.0, 1.0), yRange=(0.0, 1.0))
self.plotview.setCentralItem(self.plot)
self.mainArea.layout().addWidget(self.plotview)
def set_results(self, results):
"""Set the input evaluation results."""
self.clear()
self.error(0)
if results is not None:
if results.data is None:
self.error(0, "Give me data!!")
results = None
elif not isinstance(results.data.domain.class_var,
Orange.data.DiscreteVariable):
self.error(0, "Need discrete class variable")
results = None
self.results = results
if results is not None:
self._initialize(results)
self._setup_plot()
def clear(self):
"""Clear the widget state."""
self.plot.clear()
self.results = None
self.target_cb.clear()
self.target_index = 0
self.classifier_names = []
self.colors = []
self._curve_data = {}
def _initialize(self, results):
N = len(results.predicted)
names = getattr(results, "learner_names", None)
if names is None:
names = ["#{}".format(i + 1) for i in range(N)]
self.colors = colorpalette.ColorPaletteGenerator(
N, colorbrewer.colorSchemes["qualitative"]["Dark2"])
self.classifier_names = names
self.selected_classifiers = list(range(N))
for i in range(N):
item = self.classifiers_list_box.item(i)
item.setIcon(colorpalette.ColorPixmap(self.colors[i]))
self.target_cb.addItems(results.data.domain.class_var.values)
def plot_curves(self, target, clf_idx):
if (target, clf_idx) not in self._curve_data:
curve = LiftCurve_from_results(self.results, clf_idx, target)
color = self.colors[clf_idx]
pen = QPen(color, 1)
pen.setCosmetic(True)
shadow_pen = QPen(pen.color().lighter(160), 2.5)
shadow_pen.setCosmetic(True)
item = pg.PlotDataItem(
curve.points[0], curve.points[1],
pen=pen, shadowPen=shadow_pen,
symbol="+", symbolSize=3, symbolPen=shadow_pen,
antialias=True
)
hull_item = pg.PlotDataItem(
curve.hull[0], curve.hull[1],
pen=pen, antialias=True
)
self._curve_data[target, clf_idx] = \
PlotCurve(curve, item, hull_item)
return self._curve_data[target, clf_idx]
def _setup_plot(self):
target = self.target_index
selected = self.selected_classifiers
curves = [self.plot_curves(target, clf_idx) for clf_idx in selected]
for curve in curves:
self.plot.addItem(curve.curve_item)
if self.display_convex_hull:
hull = convex_hull([c.curve.hull for c in curves])
self.plot.plot(hull[0], hull[1], pen="y", antialias=True)
pen = QPen(QColor(100, 100, 100, 100), 1, Qt.DashLine)
pen.setCosmetic(True)
self.plot.plot([0, 1], [0, 1], pen=pen, antialias=True)
def _replot(self):
self.plot.clear()
if self.results is not None:
self._setup_plot()
def _on_target_changed(self):
self._replot()
def _on_classifiers_changed(self):
self._replot()
def lift_curve_from_results(results, target, clf_idx, subset=slice(0, -1)):
actual = results.actual[subset]
scores = results.probabilities[clf_idx][subset][:, target]
yrate, tpr, thresholds = lift_curve(actual, scores, target)
return yrate, tpr, thresholds
def lift_curve(ytrue, ypred, target=1):
P = numpy.sum(ytrue == target)
N = ytrue.size - P
fpr, tpr, thresholds = skl_metrics.roc_curve(ytrue, ypred, target)
rpp = fpr * (N / (P + N)) + tpr * (P / (P + N))
return rpp, tpr, thresholds
def main():
import sip
from PyQt4.QtGui import QApplication
from Orange.classification import logistic_regression, svm
from Orange.evaluation import testing
app = QApplication([])
w = OWLiftCurve()
w.show()
w.raise_()
data = Orange.data.Table("ionosphere")
results = testing.CrossValidation(
data,
[logistic_regression.LogisticRegressionLearner(penalty="l2"),
logistic_regression.LogisticRegressionLearner(penalty="l1"),
svm.SVMLearner(probability=True),
svm.NuSVMLearner(probability=True)
],
store_data=True
)
results.learner_names = ["LR l2", "LR l1", "SVM", "Nu SVM"]
w.set_results(results)
rval = app.exec_()
sip.delete(w)
del w
app.processEvents()
del app
return rval
if __name__ == "__main__":
main()
| gpl-3.0 | -6,325,582,373,010,567,000 | 28.458182 | 76 | 0.602642 | false | 3.540647 | false | false | false |
ingadhoc/odoo-nautical | nautical_x/wizard/contract_wizard.py | 2 | 1680 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp.osv import fields, osv
from openerp import _
class create_contract(osv.osv_memory):
_name = 'create_contract'
_description = 'Wizard to create contract'
_rec_name = 'start_date'
_columns = {
'start_date': fields.date(string='Contract Date', required=True),
'start_code': fields.char(string='Contract Number', required=True),
'expiration_date': fields.date(string='Expiration Date'),
}
_defaults = {
}
def create_contract(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids)[0]
active_id = context.get('active_id', False)
contract_obj = self.pool.get('nautical.contract')
craft_obj = self.pool.get('nautical.craft')
craft = craft_obj.browse(cr, uid, [active_id])[0]
if active_id:
start_date = wizard.start_date
start_code = wizard.start_code
expiration_date = wizard.expiration_date
new_contract_vals = {'start_date': start_date, 'start_code': start_code, 'expiration_date':
expiration_date, 'owner_id': craft.owner_id.id, 'craft_id': craft.id, 'state': 'contracted'}
contract_obj.create(
cr, uid, new_contract_vals, context=context)
craft_obj.write(
cr, uid, craft.id, {'state': 'contracted'}, context=context)
return True
| agpl-3.0 | 3,110,272,112,382,063,000 | 38.069767 | 125 | 0.544048 | false | 3.971631 | false | false | false |
lzhjie/benchmark | db_bench/DbBench.py | 1 | 15971 | # coding: utf-8
# Copyright (C) zhongjie luo <l.zhjie@qq.com>
import datetime, random, os, sys, copy, json
if sys.version_info.major >= 3:
from .tools.StopWatch import StopWatch
from .tools.ProgressBar import ProgressBar, MultiBar
from .tools.ColorPrint import ColorPrint
from .tools.MultiProcess import MultiProcess
from .tools.Options import Options as toolsOptions, Option, string2bool
else:
from tools.StopWatch import StopWatch
from tools.ProgressBar import ProgressBar, MultiBar
from tools.ColorPrint import ColorPrint
from tools.MultiProcess import MultiProcess
from tools.Options import Options as toolsOptions, Option, string2bool
from multiprocessing import Lock, Queue, Semaphore
class Options(toolsOptions):
options = (
Option("host", "h", "127.0.0.1"),
Option("port", "p", 0),
Option("processor_num", "n", 1),
Option("record_num", "r", 1000),
Option("processor_num_max", "n_max", 50),
Option("record_num_max", "r_max", 10000000),
Option("out_dir", "d", "result"),
Option("tag", "t", "tag",
help=u"添加到输出文件名中,可用于区分同类型测试\r\n" \
u"例如用时间来命名每次测试结果的输出文件\r\n"),
Option("table", "T", "__benchmark"),
Option("key_start", "k", 10000),
Option("w", "w", True, help="warm up, use --w enable"),
Option("quiet", "q", False, string2bool))
def __init__(self, options=None, args=None):
if options is None:
options = Options.options
super(Options, self).__init__(options, args)
def parse_option(self, raise_when_fail=False):
if super(Options, self).parse_option(raise_when_fail) is False:
print(self.usage() + self.help())
return False
return True
class DbConnection(object):
"""type(record)->((k, v), index, last_index)"""
def __init__(self, options):
self.name = options.get("_name")
self.host = options.get("host")
self.port = options.get("port")
self.table = options.get("table")
self.id = options.get("_id")
self.quiet = options.get("quiet")
self.record_num = options.get("_count_per_processor")
self.options = options
self._benchmark_funcs = {}
default_funcs = ("insert", "search", "update", "delete")
for func_name in default_funcs:
func = getattr(self, func_name, None)
func_self = getattr(DbConnection, func_name, None)
if getattr(func, "__code__") != getattr(func_self, "__code__"):
self._benchmark_funcs[func.__name__] = func
for func in self.__class__.__dict__.values():
if getattr(func, "benchmark", None) is True:
self._benchmark_funcs[func.__name__] = getattr(self, func.__name__)
def connect(self):
"""must override"""
raise NotImplemented
def disconnect(self):
"""must override"""
raise NotImplemented
def insert(self, record):
raise NotImplemented
def search(self, record):
raise NotImplemented
def update(self, record):
raise NotImplemented
def delete(self, record):
raise NotImplemented
def set_up(self):
"""invoke before benchmark"""
raise NotImplemented
def tear_down(self):
"""invoke after benchmark"""
raise NotImplemented
@staticmethod
def benchmark(label=None):
""":param label, for echarts label"""
def _benchmark(func):
func.benchmark = True
func.label = label if label else func.__name__
return func
return _benchmark
def benchmark_funcs(self):
"""benchmark_funcs()->{func_name: func}"""
return self._benchmark_funcs
def _warm_up(self, record):
(k, v), index, last_index = record
return True
def __str__(self):
return "%d %s[%s] %s:%s" % \
(self.id, self.name, self.table, self.host, self.port)
class Data(object):
def __init__(self, size, range_l=10000, options=None):
self.__size = int(size)
self.size = int(size)
self.range_l = int(range_l)
self.options = options
self.__cursor = int(0)
self.reset()
def hook_reset(self):
pass
def hook_get_key_and_value(self, index):
return (None, None)
def reset(self):
self.__cursor = 0
self.hook_reset()
def next(self):
if self.__cursor >= self.__size:
raise StopIteration()
item = self.hook_get_key_and_value(self.__cursor)
self.__cursor += 1
return item
def __next__(self):
return self.next()
def __len__(self):
return self.__size
def __iter__(self):
return self
class DataRecord(Data):
def __init__(self, size, range_l=10000, options=None):
super(DataRecord, self).__init__(size, range_l, options)
def hook_get_key_and_value(self, index):
key = str(index + self.range_l)
return (key, key)
class DataRandom(DataRecord):
def __init__(self, size, range_l=10000, options=None):
self.__seed = range_l + size
self.__range_l = range_l
self.__range_r = range_l + size * 10
self.__value = str(datetime.datetime.now()) + " "
super(DataRandom, self).__init__(size, range_l, options)
def hook_get_key_and_value(self, index):
return (str(random.randint(self.__range_l, self.__range_r)),
self.__value + str(index))
def hook_reset(self):
random.seed(self.__seed)
class DataFile(DataRecord):
def __init__(self, size, range_l=10000, options=None):
super(DataFile, self).__init__(size, range_l, options)
file_name = options.get("file", None)
if file_name is None:
raise Exception("require option file")
with open(file_name, "r") as fp:
self.lines = fp.readlines()
self.size = len(self.lines)
self.key = str(datetime.datetime.now()) + " " + str(range_l) + " "
def hook_get_key_and_value(self, index):
return (self.key + str(index), self.lines[index % self.size])
def benchmark(theme, data, watch, func, func_hook, context):
failed_counter = 0
data.reset()
size = len(data)
last_index = size - 1
step = size / 10
next_level = 0
__func_get_kv = data.hook_get_key_and_value
__func_hook = func_hook
__context = context
watch.reset()
if __func_hook is not None:
for index in range(size):
kv = __func_get_kv(index)
record = (kv, index, last_index)
if not func(record):
failed_counter += 1
if index >= next_level:
__func_hook(theme, record, __context)
next_level += step
if next_level > last_index:
next_level = last_index
else:
for index in range(size):
kv = __func_get_kv(index)
if not func((kv, index, last_index)):
failed_counter += 1
watch.stop()
return failed_counter
class DbBench:
def __init__(self, connection, data, hook_func=None, context=None):
if not issubclass(type(connection), DbConnection):
raise TypeError("param 1 must be a instance of DbConnection's subclass ")
if not issubclass(type(data), Data):
raise TypeError("param 2 must be a instance of Data's subclass ")
self.__connected = False
self.conn = connection
self.conn.connect()
self.__connected = True
self.data = data
self.__hook_func = hook_func
self.__result = {}
self.__context = context
self.__warm_up = False
if connection.options.get("w", False) is False:
self.__warm_up = True
def __del__(self):
if self.__connected:
self.conn.disconnect()
def get_result(self):
return self.__result
def __test_func(self, func, theme):
watch = StopWatch()
__benchmark = benchmark
m = sys.modules.get('db_bench.DbBench', None)
if m and m.__file__.endswith(".so") and DataRecord == self.data.__class__:
import importlib
temp = importlib.import_module("db_bench.DbBenchCython")
__benchmark = temp.benchmark_cython
# warm up
if self.__warm_up is False:
self.__warm_up = True
__benchmark("warmup", self.data, watch, self.conn._warm_up, self.__hook_func, self.__context)
failed_counter = __benchmark(theme, self.data, watch, func, self.__hook_func, self.__context)
cost = max(float("%.3f" % watch.seconds_float()), 0.001)
self.__result[theme] = {}
stat = self.__result[theme]
size = len(self.data)
stat["label"] = getattr(func, "label", theme)
stat["sum"] = size
stat["cost"] = cost
stat["qps"] = float("%.3f" % (size / cost))
stat["fail"] = failed_counter
def benchmark(self):
funcs = DbConnection.benchmark_funcs(self.conn)
for name, func in funcs.items():
self.__test_func(func, name)
def process_func(msg, context):
id = int(msg)
multi_bar = context["bar"]
options = context["options"]
options.set("_id", id)
def progress_bar(theme, record, context):
bar, bar_index = context
cur_index, last_index = record[1:]
if bar.check(bar_index, cur_index + 1):
bar.print_bar(bar_index, cur_index + 1, "%d %s" % (bar_index + 1, theme))
if cur_index == last_index:
bar.reset(bar_index)
data_count = context["data_count"]
key_start = options.get("key_start")
data = context["data_class"](data_count, key_start + id * data_count, options)
bar_index = id - 1
semaphore = context["semaphore"]
queue_startline = context["queue_startline"]
conn_c = context["connection_class"]
connection = conn_c(options)
try:
if options.get("quiet") is True:
db_bench = DbBench(connection, data)
else:
db_bench = DbBench(connection, data,
hook_func=progress_bar, context=(multi_bar, bar_index))
multi_bar.reset(id)
queue_startline.put(id)
semaphore.acquire()
db_bench.benchmark()
context["queue"].put(db_bench.get_result(), True)
finally:
if db_bench:
del db_bench
del data
del connection
def multi_process_bench(options, connection_class, data_class=DataRecord):
if not isinstance(options, Options):
raise TypeError("param options must be a instance of Options")
if not issubclass(connection_class, DbConnection):
raise TypeError("param connection_class must be DbConnection's subclass ")
if not issubclass(data_class, Data):
raise TypeError("param data_class must be Data's subclass ")
processor_num = options.get("processor_num")
processor_num_max = options.get("processor_num_max")
record_num = options.get("record_num")
record_num_max = options.get("record_num_max")
if processor_num > processor_num_max:
processor_num = processor_num_max
print("processor_num to %d" % processor_num)
if record_num > record_num_max:
record_num = record_num_max
print ("change record_num to %d" % record_num)
count_per_processor = int(record_num / processor_num)
if count_per_processor <= 0:
print("count_per_processor is 0")
return
options.set("_id", 0)
def clear(func):
hook = connection_class.__dict__.get(func, None)
if hook is not None:
print("%s..." % func)
conn = connection_class(options)
conn.connect()
hook(conn)
conn.disconnect()
clear("set_up")
quiet = options.get("quiet")
if quiet:
bar = None
else:
bar = MultiBar(color=ColorPrint(36))
for i in range(processor_num):
bar.append_bar(ProgressBar(count_per_processor, "processor " + str(i)))
queue = Queue()
semaphore = Semaphore(processor_num)
options.set("_name", connection_class.__dict__.get("name", connection_class.__name__))
options.set("_count_per_processor", count_per_processor)
queue_startline = Queue()
context = {
"data_class": data_class,
"connection_class": connection_class,
"data_count": count_per_processor,
"bar": bar,
"lock": Lock(),
"queue": queue,
"queue_startline": queue_startline,
"semaphore": semaphore,
"options": copy.deepcopy(options)
}
pool = MultiProcess(processor_num, process_func, context, True)
# barrier lock
for i in range(processor_num):
semaphore.acquire()
for i in range(processor_num):
pool.process_msg(i + 1)
for i in range(processor_num):
queue_startline.get()
for i in range(processor_num):
semaphore.release()
pool.join()
clear("tear_down")
result = {
"stat": {},
"detail": [],
"dbinfo": {"type": options.get("_name"),
"host": options.get("host"),
"port": options.get("port"),
"table": options.get("table")}}
stat = result["stat"]
detail = result["detail"]
try:
for i in range(processor_num):
msg = queue.get(True, 1)
detail.append(copy.deepcopy(msg))
if len(stat) == 0:
result["stat"] = msg
stat = result["stat"]
continue
for k, v in msg.items():
target = stat[k]
target["fail"] += v["fail"]
target["sum"] += v["sum"]
target["cost"] = max(target["cost"], v["cost"])
except:
raise RuntimeError("benchmark lost, name: " + options.get("_name"))
if stat is not None:
for k, v in stat.items():
v["qps"] = int(v["sum"] / v["cost"])
print("%s %s" % (str(k), str(v)))
out_dir = options.get("out_dir")
if os.path.exists(out_dir) is False:
os.mkdir(out_dir)
with open("%s/benchmark_%s_%d_%d_%s.json" % (out_dir,
options.get("_name").replace("_", " "),
record_num,
processor_num,
options.get("tag", "tag")), "w") as fp:
fp.write(json.dumps(result, indent=2))
return result
class ConnectionExample(DbConnection):
def __init__(self, options):
super(ConnectionExample, self).__init__(options)
self.__client = None
def connect(self):
self.__client = {}
def disconnect(self):
self.__client = None
@DbConnection.benchmark(u"测试")
def null(self, record):
return True
def insert(self, record):
k, v = record[0]
self.__client[k] = v
return True
def search(self, record):
k, v = record[0]
self.__client[k] = v
return self.__client.get(k) == v
def update(self, record):
return self.search(record)
def delete(self, record):
k, v = record[0]
return self.__client.pop(k, None) is not None
def clear(self):
self.__client = {}
def example():
option = Options()
option.set("record_num", 100000)
option.set("processor_num", 2)
if option.parse_option() is False:
return
# option.set("quiet", True)
print(option)
result = multi_process_bench(option, ConnectionExample)
print(result)
if __name__ == "__main__":
example()
| mit | -6,949,132,136,422,048,000 | 31.426531 | 105 | 0.561143 | false | 3.804837 | false | false | false |
larsks/cloud-init | cloudinit/config/cc_mcollective.py | 1 | 5204 | # Copyright (C) 2009-2011 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Marc Cluet <marc.cluet@canonical.com>
# Based on code by Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
"""
Mcollective
-----------
**Summary:** install, configure and start mcollective
This module installs, configures and starts mcollective. If the ``mcollective``
key is present in config, then mcollective will be installed and started.
Configuration for ``mcollective`` can be specified in the ``conf`` key under
``mcollective``. Each config value consists of a key value pair and will be
written to ``/etc/mcollective/server.cfg``. The ``public-cert`` and
``private-cert`` keys, if present in conf may be used to specify the public and
private certificates for mcollective. Their values will be written to
``/etc/mcollective/ssl/server-public.pem`` and
``/etc/mcollective/ssl/server-private.pem``.
.. note::
The ec2 metadata service is readable by non-root users.
If security is a concern, use include-once and ssl urls.
**Internal name:** ``cc_mcollective``
**Module frequency:** per instance
**Supported distros:** all
**Config keys**::
mcollective:
conf:
<key>: <value>
public-cert: |
-------BEGIN CERTIFICATE--------
<cert data>
-------END CERTIFICATE--------
private-cert: |
-------BEGIN CERTIFICATE--------
<cert data>
-------END CERTIFICATE--------
"""
import errno
import six
from six import BytesIO
# Used since this can maintain comments
# and doesn't need a top level section
from configobj import ConfigObj
from cloudinit import log as logging
from cloudinit import util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
SERVER_CFG = '/etc/mcollective/server.cfg'
LOG = logging.getLogger(__name__)
def configure(config, server_cfg=SERVER_CFG,
pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
# Read server.cfg (if it exists) values from the
# original file in order to be able to mix the rest up.
try:
old_contents = util.load_file(server_cfg, quiet=False, decode=False)
mcollective_config = ConfigObj(BytesIO(old_contents))
except IOError as e:
if e.errno != errno.ENOENT:
raise
else:
LOG.debug("Did not find file %s (starting with an empty"
" config)", server_cfg)
mcollective_config = ConfigObj()
for (cfg_name, cfg) in config.items():
if cfg_name == 'public-cert':
util.write_file(pubcert_file, cfg, mode=0o644)
mcollective_config[
'plugin.ssl_server_public'] = pubcert_file
mcollective_config['securityprovider'] = 'ssl'
elif cfg_name == 'private-cert':
util.write_file(pricert_file, cfg, mode=0o600)
mcollective_config[
'plugin.ssl_server_private'] = pricert_file
mcollective_config['securityprovider'] = 'ssl'
else:
if isinstance(cfg, six.string_types):
# Just set it in the 'main' section
mcollective_config[cfg_name] = cfg
elif isinstance(cfg, (dict)):
# Iterate through the config items, create a section if
# it is needed and then add/or create items as needed
if cfg_name not in mcollective_config.sections:
mcollective_config[cfg_name] = {}
for (o, v) in cfg.items():
mcollective_config[cfg_name][o] = v
else:
# Otherwise just try to convert it to a string
mcollective_config[cfg_name] = str(cfg)
try:
# We got all our config as wanted we'll copy
# the previous server.cfg and overwrite the old with our new one
util.copy(server_cfg, "%s.old" % (server_cfg))
except IOError as e:
if e.errno == errno.ENOENT:
# Doesn't exist to copy...
pass
else:
raise
# Now we got the whole (new) file, write to disk...
contents = BytesIO()
mcollective_config.write(contents)
util.write_file(server_cfg, contents.getvalue(), mode=0o644)
def handle(name, cfg, cloud, log, _args):
# If there isn't a mcollective key in the configuration don't do anything
if 'mcollective' not in cfg:
log.debug(("Skipping module named %s, "
"no 'mcollective' key in configuration"), name)
return
mcollective_cfg = cfg['mcollective']
# Start by installing the mcollective package ...
cloud.distro.install_packages(("mcollective",))
# ... and then update the mcollective configuration
if 'conf' in mcollective_cfg:
configure(config=mcollective_cfg['conf'])
# restart mcollective to handle updated config
util.subp(['service', 'mcollective', 'restart'], capture=False)
# vi: ts=4 expandtab
| gpl-3.0 | -6,567,645,452,130,444,000 | 34.401361 | 79 | 0.623174 | false | 3.826471 | true | false | false |
VirusTotal/msticpy | tests/test_timeseries.py | 1 | 2573 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import unittest
from pathlib import Path
import nbformat
import pandas as pd
import pytest
from nbconvert.preprocessors import CellExecutionError, ExecutePreprocessor
from msticpy.analysis.timeseries import timeseries_anomalies_stl
_NB_FOLDER = "docs/notebooks"
_NB_NAME = "TimeSeriesAnomaliesVisualization.ipynb"
_test_data_folders = [
d for d, _, _ in os.walk(os.getcwd()) if d.endswith("/docs/notebooks/data")
]
if len(_test_data_folders) == 1:
_TEST_DATA = _test_data_folders[0]
else:
_TEST_DATA = "./docs/notebooks/data"
class TestTimeSeries(unittest.TestCase):
"""Unit test class."""
def setUp(self):
input_file = os.path.join(_TEST_DATA, "TimeSeriesDemo.csv")
self.input_df = pd.read_csv(
input_file,
index_col=["TimeGenerated"],
usecols=["TimeGenerated", "TotalBytesSent"],
)
def test_timeseries_anomalies_stl(self):
out_df = timeseries_anomalies_stl(data=self.input_df)
self.assertIn("residual", out_df.columns)
self.assertIn("trend", out_df.columns)
self.assertIn("seasonal", out_df.columns)
self.assertIn("weights", out_df.columns)
self.assertIn("baseline", out_df.columns)
self.assertIn("score", out_df.columns)
self.assertIn("anomalies", out_df.columns)
self.assertGreater(len(out_df[out_df["anomalies"] == 1]), 0)
@pytest.mark.skipif(
not os.environ.get("MSTICPY_TEST_NOSKIP"), reason="Skipped for local tests."
)
def test_timeseries_controls(self):
nb_path = Path(_NB_FOLDER).joinpath(_NB_NAME)
abs_path = Path(_NB_FOLDER).absolute()
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
try:
ep.preprocess(nb, {"metadata": {"path": abs_path}})
except CellExecutionError:
nb_err = str(nb_path).replace(".ipynb", "-err.ipynb")
msg = f"Error executing the notebook '{nb_path}'.\n"
msg += f"See notebook '{nb_err}' for the traceback."
print(msg)
with open(nb_err, mode="w", encoding="utf-8") as f:
nbformat.write(nb, f)
raise
| mit | -4,295,610,028,682,117,600 | 35.239437 | 84 | 0.597357 | false | 3.675714 | true | false | false |
KaranToor/MA450 | google-cloud-sdk/lib/surface/compute/target_pools/remove_health_checks.py | 6 | 2895 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for removing health checks from target pools."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.http_health_checks import (
flags as http_health_check_flags)
from googlecloudsdk.command_lib.compute.target_pools import flags
class RemoveHealthChecks(base_classes.NoOutputAsyncMutator):
"""Remove an HTTP health check from a target pool.
*{command}* is used to remove an HTTP health check
from a target pool. Health checks are used to determine
the health status of instances in the target pool. For more
information on health checks and load balancing, see
[](https://cloud.google.com/compute/docs/load-balancing-and-autoscaling/)
"""
HEALTH_CHECK_ARG = None
TARGET_POOL_ARG = None
@classmethod
def Args(cls, parser):
cls.HEALTH_CHECK_ARG = (
http_health_check_flags.HttpHealthCheckArgumentForTargetPool(
'remove from'))
cls.HEALTH_CHECK_ARG.AddArgument(parser)
cls.TARGET_POOL_ARG = flags.TargetPoolArgument(
help_suffix=' from which to remove the health check.')
cls.TARGET_POOL_ARG.AddArgument(
parser, operation_type='remove health checks from')
@property
def service(self):
return self.compute.targetPools
@property
def method(self):
return 'RemoveHealthCheck'
@property
def resource_type(self):
return 'targetPools'
def CreateRequests(self, args):
http_health_check_ref = self.HEALTH_CHECK_ARG.ResolveAsResource(
args, self.resources)
target_pool_ref = self.TARGET_POOL_ARG.ResolveAsResource(
args,
self.resources,
scope_lister=compute_flags.GetDefaultScopeLister(self.compute_client,
self.project))
request = self.messages.ComputeTargetPoolsRemoveHealthCheckRequest(
region=target_pool_ref.region,
project=self.project,
targetPool=target_pool_ref.Name(),
targetPoolsRemoveHealthCheckRequest=(
self.messages.TargetPoolsRemoveHealthCheckRequest(
healthChecks=[self.messages.HealthCheckReference(
healthCheck=http_health_check_ref.SelfLink())])))
return [request]
| apache-2.0 | 5,334,987,423,451,908,000 | 35.64557 | 77 | 0.717098 | false | 4.07173 | false | false | false |
jamesrhaley/boilJS | GitBoil.py | 1 | 2946 | from subprocess import call
import shutil
import simplejson
from collections import OrderedDict
import os
import sys
# this is almost done but there is something wrong with the
# updating of the package.json file
## Helpers
# performs git clone into a new directory
def _clone_mkdir(git_url, new_name):
hole_path = git_url + ' ' + new_name
call('git clone '+ hole_path, shell=True)
def _remove_git(new_name):
git_path = new_name + '/.git'
shutil.rmtree(git_path)
def _prep_json(path):
# overkill but I was having issues. The following steps load and clean up
# the package.json string before loading it into simplejson
json_file = open(path, 'r+')
f = json_file.read()
g = f.split('\n')
for i, item in enumerate(g):
print item
print item.strip()
g[i] = item.strip()
together = ''.join(g)
# load json into as an OrderedDict to retain original order
return simplejson.loads(together, object_pairs_hook=OrderedDict)
# object to collect appropriate data and to then use it
class Boil(object):
def _keywords(pack_keys):
if ',' in pack_keys:
return pack_keys.split(',')
else:
return pack_keys.split()
@classmethod
def git_clone(cls, git_url, new_name):
_clone_mkdir(git_url, new_name)
_remove_git(new_name)
@classmethod
def cleanup_packagejson(cls, new_name, author, description, version,
license, pack_keys):
# package.json path
pack_path = new_name + '/package.json'
data = _prep_json(pack_path)
# update feilds. Need to update keywords
data["name"] = new_name
data["author"] = author
data["description"] = description
data["version"] = version
data["license"] = license
data["keywords"] = self._keywords(pack_keys)
# convert OrderedDict into a json string
outfile = simplejson.dumps(data, indent=4)
# remove old package.json and create/write a new one
os.remove(pack_path)
new_pack = open(pack_path, 'w')
new_pack.write(outfile)
new_pack.close()
@classmethod
def remove_licence(cls, new_name):
license_path = new_name + '/LICENCE'
try:
os.remove(license_path)
except:
print('Something went wrong when removing the license! Can\'t tell what?')
sys.exit(0) # quit Python
@classmethod
def clean_readme(cls, new_name):
readme_path = new_name + '/README.md'
# readme_path = 'new-JS' + '/README.md'
try:
os.remove(readme_path)
readme = open(readme_path,'w')
readme.close()
except:
print('Something went wrong when updating the readme! Can\'t tell what?')
sys.exit(0) # quit Python
| mit | -1,293,920,882,973,797,400 | 28.46 | 86 | 0.590631 | false | 3.901987 | false | false | false |
JoeJasinski/WindyTransit | mobiletrans/mtdistmap/route_planner.py | 1 | 2026 | from __future__ import division
from django.contrib.gis.geos import Point, fromstr
from mobiletrans.mtlocation import models
from mobiletrans.mtdistmap.cta_conn import load_transitnetwork
from mobiletrans.mtdistmap.transit_network import Path
def distance_to_time(distance, unit="m", units_per_min=60):
return_value = getattr(distance, unit) * (1 / units_per_min )
return return_value
class RoutePlanner(object):
def __init__(self, tn, unit="m", units_per_min=60, max_distance=1500, num_routes=2):
self.tn = tn
self.unit = unit
self.units_per_min = units_per_min
self.max_distance = max_distance
self.num_routes = num_routes
def get_distance_dict(self):
return {self.unit:self.max_distance}
def fastest_route_from_point(self, point, station_id):
distance_dict=self.get_distance_dict()
stations = models.TransitStop.objects.filter(location_type=1).get_closest_x(point, distance_dict, number=self.num_routes )
paths = []
for station in stations:
path = self.tn.shortest_path(str(station.stop_id), station_id)
if path:
walking_distance = station.distance
walking_time = distance_to_time(walking_distance, self.unit, self.max_distance)
new_path = Path(self.tn,
["walk_%s" % (walking_time)] + path.stops,
path.total_time + walking_time )
paths.append(new_path)
sorted(paths, key=lambda x:x.total_time)
return paths
"""
from mobiletrans.mtdistmap.cta_conn import load_transitnetwork
from mobiletrans.mtdistmap.route_planner import RoutePlanner
from django.contrib.gis.geos import Point, fromstr
from_point = fromstr('POINT(%s %s)' % ("-87.66638826", "41.96182144"))
#from_point = GPlace.objects.get(name__icontains='Precious Blood Church')
tn = load_transitnetwork()
t = RoutePlanner(tn)
p = t.fastest_route_from_point(from_point, '41320')
"""
| mit | -1,532,410,679,510,258,700 | 44.022222 | 131 | 0.653011 | false | 3.399329 | false | false | false |
sissaschool/elementpath | elementpath/regex/unicode_subsets.py | 1 | 18850 | #
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module defines Unicode character categories and blocks.
"""
from sys import maxunicode
from collections.abc import Iterable, MutableSet
from .unicode_categories import RAW_UNICODE_CATEGORIES
from .codepoints import code_point_order, code_point_repr, iter_code_points, get_code_point_range
class RegexError(Exception):
"""
Error in a regular expression or in a character class specification.
This exception is derived from `Exception` base class and is raised
only by the regex subpackage.
"""
def iterparse_character_subset(s, expand_ranges=False):
"""
Parses a regex character subset, generating a sequence of code points
and code points ranges. An unescaped hyphen (-) that is not at the
start or at the and is interpreted as range specifier.
:param s: a string representing the character subset.
:param expand_ranges: if set to `True` then expands character ranges.
:return: yields integers or couples of integers.
"""
escaped = False
on_range = False
char = None
length = len(s)
subset_index_iterator = iter(range(len(s)))
for k in subset_index_iterator:
if k == 0:
char = s[0]
if char == '\\':
escaped = True
elif char in r'[]' and length > 1:
raise RegexError("bad character %r at position 0" % char)
elif expand_ranges:
yield ord(char)
elif length <= 2 or s[1] != '-':
yield ord(char)
elif s[k] == '-':
if escaped or (k == length - 1):
char = s[k]
yield ord(char)
escaped = False
elif on_range:
char = s[k]
yield ord(char)
on_range = False
else:
# Parse character range
on_range = True
k = next(subset_index_iterator)
end_char = s[k]
if end_char == '\\' and (k < length - 1):
if s[k + 1] in r'-|.^?*+{}()[]':
k = next(subset_index_iterator)
end_char = s[k]
elif s[k + 1] in r'sSdDiIcCwWpP':
msg = "bad character range '%s-\\%s' at position %d: %r"
raise RegexError(msg % (char, s[k + 1], k - 2, s))
if ord(char) > ord(end_char):
msg = "bad character range '%s-%s' at position %d: %r"
raise RegexError(msg % (char, end_char, k - 2, s))
elif expand_ranges:
yield from range(ord(char) + 1, ord(end_char) + 1)
else:
yield ord(char), ord(end_char) + 1
elif s[k] in r'|.^?*+{}()':
if escaped:
escaped = False
on_range = False
char = s[k]
yield ord(char)
elif s[k] in r'[]':
if not escaped and length > 1:
raise RegexError("bad character %r at position %d" % (s[k], k))
escaped = on_range = False
char = s[k]
if k >= length - 2 or s[k + 1] != '-':
yield ord(char)
elif s[k] == '\\':
if escaped:
escaped = on_range = False
char = '\\'
yield ord(char)
else:
escaped = True
else:
if escaped:
escaped = False
yield ord('\\')
on_range = False
char = s[k]
if k >= length - 2 or s[k + 1] != '-':
yield ord(char)
if escaped:
yield ord('\\')
class UnicodeSubset(MutableSet):
"""
Represents a subset of Unicode code points, implemented with an ordered list of
integer values and ranges. Codepoints can be added or discarded using sequences
of integer values and ranges or with strings equivalent to regex character set.
:param codepoints: a sequence of integer values and ranges, another UnicodeSubset \
instance ora a string equivalent of a regex character set.
"""
__slots__ = '_codepoints',
def __init__(self, codepoints=None):
if not codepoints:
self._codepoints = list()
elif isinstance(codepoints, list):
self._codepoints = sorted(codepoints, key=code_point_order)
elif isinstance(codepoints, UnicodeSubset):
self._codepoints = codepoints.codepoints.copy()
else:
self._codepoints = list()
self.update(codepoints)
@property
def codepoints(self):
return self._codepoints
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, str(self))
def __str__(self):
return ''.join(code_point_repr(cp) for cp in self._codepoints)
def copy(self):
return self.__copy__()
def __copy__(self):
return UnicodeSubset(self._codepoints)
def __reversed__(self):
for item in reversed(self._codepoints):
if isinstance(item, int):
yield item
else:
yield from reversed(range(item[0], item[1]))
def complement(self):
last_cp = 0
for cp in self._codepoints:
if isinstance(cp, int):
cp = cp, cp + 1
diff = cp[0] - last_cp
if diff > 2:
yield last_cp, cp[0]
elif diff == 2:
yield last_cp
yield last_cp + 1
elif diff == 1:
yield last_cp
elif diff:
raise ValueError("unordered code points found in {!r}".format(self))
last_cp = cp[1]
if last_cp < maxunicode:
yield last_cp, maxunicode + 1
elif last_cp == maxunicode:
yield maxunicode
def iter_characters(self):
return map(chr, self.__iter__())
#
# MutableSet's abstract methods implementation
def __contains__(self, value):
if not isinstance(value, int):
try:
value = ord(value)
except TypeError:
return False
for cp in self._codepoints:
if not isinstance(cp, int):
if cp[0] > value:
return False
elif cp[1] <= value:
continue
else:
return True
elif cp > value:
return False
elif cp == value:
return True
return False
def __iter__(self):
for cp in self._codepoints:
if isinstance(cp, int):
yield cp
else:
yield from range(*cp)
def __len__(self):
k = 0
for _ in self:
k += 1
return k
def update(self, *others):
for value in others:
if isinstance(value, str):
for cp in iter_code_points(iterparse_character_subset(value), reverse=True):
self.add(cp)
else:
for cp in iter_code_points(value, reverse=True):
self.add(cp)
def add(self, value):
try:
start_value, end_value = get_code_point_range(value)
except TypeError:
raise ValueError("{!r} is not a Unicode code point value/range".format(value))
code_points = self._codepoints
last_index = len(code_points) - 1
for k, cp in enumerate(code_points):
if isinstance(cp, int):
cp = cp, cp + 1
if end_value < cp[0]:
code_points.insert(k, value if isinstance(value, int) else tuple(value))
elif start_value > cp[1]:
continue
elif end_value > cp[1]:
if k == last_index:
code_points[k] = min(cp[0], start_value), end_value
else:
next_cp = code_points[k + 1]
higher_bound = next_cp if isinstance(next_cp, int) else next_cp[0]
if end_value <= higher_bound:
code_points[k] = min(cp[0], start_value), end_value
else:
code_points[k] = min(cp[0], start_value), higher_bound
start_value = higher_bound
continue
elif start_value < cp[0]:
code_points[k] = start_value, cp[1]
break
else:
self._codepoints.append(tuple(value) if isinstance(value, list) else value)
def difference_update(self, *others):
for value in others:
if isinstance(value, str):
for cp in iter_code_points(iterparse_character_subset(value), reverse=True):
self.discard(cp)
else:
for cp in iter_code_points(value, reverse=True):
self.discard(cp)
def discard(self, value):
try:
start_cp, end_cp = get_code_point_range(value)
except TypeError:
raise ValueError("{!r} is not a Unicode code point value/range".format(value))
code_points = self._codepoints
for k in reversed(range(len(code_points))):
cp = code_points[k]
if isinstance(cp, int):
cp = cp, cp + 1
if start_cp >= cp[1]:
break
elif end_cp >= cp[1]:
if start_cp <= cp[0]:
del code_points[k]
elif start_cp - cp[0] > 1:
code_points[k] = cp[0], start_cp
else:
code_points[k] = cp[0]
elif end_cp > cp[0]:
if start_cp <= cp[0]:
if cp[1] - end_cp > 1:
code_points[k] = end_cp, cp[1]
else:
code_points[k] = cp[1] - 1
else:
if cp[1] - end_cp > 1:
code_points.insert(k + 1, (end_cp, cp[1]))
else:
code_points.insert(k + 1, cp[1] - 1)
if start_cp - cp[0] > 1:
code_points[k] = cp[0], start_cp
else:
code_points[k] = cp[0]
#
# MutableSet's mixin methods override
def clear(self):
del self._codepoints[:]
def __eq__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
elif isinstance(other, UnicodeSubset):
return self._codepoints == other._codepoints
else:
return self._codepoints == other
def __ior__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
elif isinstance(other, UnicodeSubset):
other = reversed(other._codepoints)
elif isinstance(other, str):
other = reversed(UnicodeSubset(other)._codepoints)
else:
other = iter_code_points(other, reverse=True)
for cp in other:
self.add(cp)
return self
def __isub__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
elif isinstance(other, UnicodeSubset):
other = reversed(other._codepoints)
elif isinstance(other, str):
other = reversed(UnicodeSubset(other)._codepoints)
else:
other = iter_code_points(other, reverse=True)
for cp in other:
self.discard(cp)
return self
def __sub__(self, other):
obj = self.copy()
return obj.__isub__(other)
__rsub__ = __sub__
def __iand__(self, other):
for value in (self - other):
self.discard(value)
return self
def __ixor__(self, other):
if other is self:
self.clear()
return self
elif not isinstance(other, Iterable):
return NotImplemented
elif not isinstance(other, UnicodeSubset):
other = UnicodeSubset(other)
for value in other:
if value in self:
self.discard(value)
else:
self.add(value)
return self
UNICODE_CATEGORIES = {k: UnicodeSubset(v) for k, v in RAW_UNICODE_CATEGORIES.items()}
# See http://www.unicode.org/Public/UNIDATA/Blocks.txt
UNICODE_BLOCKS = {
'IsBasicLatin': UnicodeSubset('\u0000-\u007F'),
'IsLatin-1Supplement': UnicodeSubset('\u0080-\u00FF'),
'IsLatinExtended-A': UnicodeSubset('\u0100-\u017F'),
'IsLatinExtended-B': UnicodeSubset('\u0180-\u024F'),
'IsIPAExtensions': UnicodeSubset('\u0250-\u02AF'),
'IsSpacingModifierLetters': UnicodeSubset('\u02B0-\u02FF'),
'IsCombiningDiacriticalMarks': UnicodeSubset('\u0300-\u036F'),
'IsGreek': UnicodeSubset('\u0370-\u03FF'),
'IsCyrillic': UnicodeSubset('\u0400-\u04FF'),
'IsArmenian': UnicodeSubset('\u0530-\u058F'),
'IsHebrew': UnicodeSubset('\u0590-\u05FF'),
'IsArabic': UnicodeSubset('\u0600-\u06FF'),
'IsSyriac': UnicodeSubset('\u0700-\u074F'),
'IsThaana': UnicodeSubset('\u0780-\u07BF'),
'IsDevanagari': UnicodeSubset('\u0900-\u097F'),
'IsBengali': UnicodeSubset('\u0980-\u09FF'),
'IsGurmukhi': UnicodeSubset('\u0A00-\u0A7F'),
'IsGujarati': UnicodeSubset('\u0A80-\u0AFF'),
'IsOriya': UnicodeSubset('\u0B00-\u0B7F'),
'IsTamil': UnicodeSubset('\u0B80-\u0BFF'),
'IsTelugu': UnicodeSubset('\u0C00-\u0C7F'),
'IsKannada': UnicodeSubset('\u0C80-\u0CFF'),
'IsMalayalam': UnicodeSubset('\u0D00-\u0D7F'),
'IsSinhala': UnicodeSubset('\u0D80-\u0DFF'),
'IsThai': UnicodeSubset('\u0E00-\u0E7F'),
'IsLao': UnicodeSubset('\u0E80-\u0EFF'),
'IsTibetan': UnicodeSubset('\u0F00-\u0FFF'),
'IsMyanmar': UnicodeSubset('\u1000-\u109F'),
'IsGeorgian': UnicodeSubset('\u10A0-\u10FF'),
'IsHangulJamo': UnicodeSubset('\u1100-\u11FF'),
'IsEthiopic': UnicodeSubset('\u1200-\u137F'),
'IsCherokee': UnicodeSubset('\u13A0-\u13FF'),
'IsUnifiedCanadianAboriginalSyllabics': UnicodeSubset('\u1400-\u167F'),
'IsOgham': UnicodeSubset('\u1680-\u169F'),
'IsRunic': UnicodeSubset('\u16A0-\u16FF'),
'IsKhmer': UnicodeSubset('\u1780-\u17FF'),
'IsMongolian': UnicodeSubset('\u1800-\u18AF'),
'IsLatinExtendedAdditional': UnicodeSubset('\u1E00-\u1EFF'),
'IsGreekExtended': UnicodeSubset('\u1F00-\u1FFF'),
'IsGeneralPunctuation': UnicodeSubset('\u2000-\u206F'),
'IsSuperscriptsandSubscripts': UnicodeSubset('\u2070-\u209F'),
'IsCurrencySymbols': UnicodeSubset('\u20A0-\u20CF'),
'IsCombiningMarksforSymbols': UnicodeSubset('\u20D0-\u20FF'),
'IsLetterlikeSymbols': UnicodeSubset('\u2100-\u214F'),
'IsNumberForms': UnicodeSubset('\u2150-\u218F'),
'IsArrows': UnicodeSubset('\u2190-\u21FF'),
'IsMathematicalOperators': UnicodeSubset('\u2200-\u22FF'),
'IsMiscellaneousTechnical': UnicodeSubset('\u2300-\u23FF'),
'IsControlPictures': UnicodeSubset('\u2400-\u243F'),
'IsOpticalCharacterRecognition': UnicodeSubset('\u2440-\u245F'),
'IsEnclosedAlphanumerics': UnicodeSubset('\u2460-\u24FF'),
'IsBoxDrawing': UnicodeSubset('\u2500-\u257F'),
'IsBlockElements': UnicodeSubset('\u2580-\u259F'),
'IsGeometricShapes': UnicodeSubset('\u25A0-\u25FF'),
'IsMiscellaneousSymbols': UnicodeSubset('\u2600-\u26FF'),
'IsDingbats': UnicodeSubset('\u2700-\u27BF'),
'IsBraillePatterns': UnicodeSubset('\u2800-\u28FF'),
'IsCJKRadicalsSupplement': UnicodeSubset('\u2E80-\u2EFF'),
'IsKangxiRadicals': UnicodeSubset('\u2F00-\u2FDF'),
'IsIdeographicDescriptionCharacters': UnicodeSubset('\u2FF0-\u2FFF'),
'IsCJKSymbolsandPunctuation': UnicodeSubset('\u3000-\u303F'),
'IsHiragana': UnicodeSubset('\u3040-\u309F'),
'IsKatakana': UnicodeSubset('\u30A0-\u30FF'),
'IsBopomofo': UnicodeSubset('\u3100-\u312F'),
'IsHangulCompatibilityJamo': UnicodeSubset('\u3130-\u318F'),
'IsKanbun': UnicodeSubset('\u3190-\u319F'),
'IsBopomofoExtended': UnicodeSubset('\u31A0-\u31BF'),
'IsEnclosedCJKLettersandMonths': UnicodeSubset('\u3200-\u32FF'),
'IsCJKCompatibility': UnicodeSubset('\u3300-\u33FF'),
'IsCJKUnifiedIdeographsExtensionA': UnicodeSubset('\u3400-\u4DB5'),
'IsCJKUnifiedIdeographs': UnicodeSubset('\u4E00-\u9FFF'),
'IsYiSyllables': UnicodeSubset('\uA000-\uA48F'),
'IsYiRadicals': UnicodeSubset('\uA490-\uA4CF'),
'IsHangulSyllables': UnicodeSubset('\uAC00-\uD7A3'),
'IsHighSurrogates': UnicodeSubset('\uD800-\uDB7F'),
'IsHighPrivateUseSurrogates': UnicodeSubset('\uDB80-\uDBFF'),
'IsLowSurrogates': UnicodeSubset('\uDC00-\uDFFF'),
'IsPrivateUse': UnicodeSubset('\uE000-\uF8FF\U000F0000-\U000FFFFF\U00100000-\U0010FFFF'),
'IsCJKCompatibilityIdeographs': UnicodeSubset('\uF900-\uFAFF'),
'IsAlphabeticPresentationForms': UnicodeSubset('\uFB00-\uFB4F'),
'IsArabicPresentationForms-A': UnicodeSubset('\uFB50-\uFDFF'),
'IsCombiningHalfMarks': UnicodeSubset('\uFE20-\uFE2F'),
'IsCJKCompatibilityForms': UnicodeSubset('\uFE30-\uFE4F'),
'IsSmallFormVariants': UnicodeSubset('\uFE50-\uFE6F'),
'IsArabicPresentationForms-B': UnicodeSubset('\uFE70-\uFEFE'),
'IsSpecials': UnicodeSubset('\uFEFF\uFFF0-\uFFFD'),
'IsHalfwidthandFullwidthForms': UnicodeSubset('\uFF00-\uFFEF'),
'IsOldItalic': UnicodeSubset('\U00010300-\U0001032F'),
'IsGothic': UnicodeSubset('\U00010330-\U0001034F'),
'IsDeseret': UnicodeSubset('\U00010400-\U0001044F'),
'IsByzantineMusicalSymbols': UnicodeSubset('\U0001D000-\U0001D0FF'),
'IsMusicalSymbols': UnicodeSubset('\U0001D100-\U0001D1FF'),
'IsMathematicalAlphanumericSymbols': UnicodeSubset('\U0001D400-\U0001D7FF'),
'IsCJKUnifiedIdeographsExtensionB': UnicodeSubset('\U00020000-\U0002A6D6'),
'IsCJKCompatibilityIdeographsSupplement': UnicodeSubset('\U0002F800-\U0002FA1F'),
'IsTags': UnicodeSubset('\U000E0000-\U000E007F'),
}
UNICODE_BLOCKS['IsPrivateUse'].update('\U000F0000-\U0010FFFD'),
def unicode_subset(name):
if name.startswith('Is'):
try:
return UNICODE_BLOCKS[name]
except KeyError:
raise RegexError("%r doesn't match to any Unicode block." % name)
else:
try:
return UNICODE_CATEGORIES[name]
except KeyError:
raise RegexError("%r doesn't match to any Unicode category." % name)
| mit | 2,546,672,420,595,934,000 | 36.927565 | 97 | 0.564403 | false | 3.761724 | false | false | false |
Aeronautics/aero | aero/adapters/npm.py | 1 | 1826 | # -*- coding: utf-8 -*-
__author__ = 'nickl-'
__all__ = ('Npm', )
from string import strip
from re import match, sub
from aero.__version__ import __version__,enc
from .base import BaseAdapter
class Npm(BaseAdapter):
"""
Node package manager adapter.
"""
def search(self, query):
response = self.command('search -q', query)[0].decode(*enc)
lst = list(
self.__parse_search(line) for line in response.splitlines()
if 'npm http' not in line and not bool(match(
'^NAME\s+DESCRIPTION\s+AUTHOR\s+DATE\s+KEYWORDS', line
))
)
if lst:
return dict([(k, v) for k, v in lst if k != 0])
return {}
def __parse_search(self, result):
r = match(
'^([A-Za-z0-9\-]*)\s+(\w.*)=(.+)\s+(\d\d\d\d[\d\-: ]*)\s*?(\w?.*?)$',
result
)
if r and len(r.groups()) == 5:
r = map(strip, list(r.groups()))
pkg = self.package_name(r.pop(0))
return pkg, r[2] + '\n' + r[0]
return 0, 0
def install(self, query):
return self.shell('install', query, ['--global'])
def info(self, query):
response = self.command('view', query)[0].decode(*enc)
try:
import json
r = json.loads(sub("'", '"', sub('\s(\w+):', r' "\1":', response.strip())))
response = []
for k in sorted(r):
if isinstance(r[k], dict):
r[k] = '\n'.join([': '.join(list(l)) for l in r[k].items()])
elif isinstance(r[k], list):
r[k] = ', '.join(r[k])
if r[k]:
response.append((k, str(r[k])))
return response
except ValueError:
return ['Aborted: No info available']
| bsd-3-clause | -2,070,850,359,834,000,600 | 31.035088 | 87 | 0.468237 | false | 3.566406 | false | false | false |
Morisset/PyNeb_devel | pyneb/sample_scripts/Choroni_School/ex2_1.py | 1 | 2618 | """
this file contains the definitions of the functions used to answer the questions of
exercise 2.1 of PLASMA DIAGNOSTICS
to execute these functions, one first needs to
import a few libraries as listed at the beginning of the file run_ex.py
as well as this file.
the functions can then be called as explained in the file run_ex.py
the functions defined here can be modified.
In that case, is is necessary, before using the modified version,
to do "reload(ex1_1)" from the terminal
sometimes this does not work well.
it is then recommended to quit python (ctrl D) and enter again (ipython --pylab)
"""
import numpy as np
import matplotlib.pyplot as plt
import pyneb as pn
from pyneb.utils.misc import parseAtom
#pn.atomicData.setDataFile('cl_iii_atom_M83-KS86.fits')
def p1(ion):
# split ion into elem and spec, e.g 'O3' into 'O' and 3
elem, spec = parseAtom(ion)
# instanciate the corresponding Atom object
atom = pn.Atom(elem, spec)
# print information including transition probabilities
#atom.printIonic(printA = True)
# prepare a new figure
plt.figure()
# plot energy levels
atom.plotGrotrian()
def p2(diag):
# get the ion, and diagnostic description from the dictionary:
ion, diag_eval, err = pn.diags_dict[diag]
# split ion into elem and spec, e.g 'O3' into 'O' and 3
elem, spec = parseAtom(ion)
# prepare a new figure
plt.figure()
# create a grid of emissivities
#NB: one can use a pypic file containing all the emissivities, if already made
# in that case set restore_file to the name of the pypic file.
grid = pn.EmisGrid(elem, spec, restore_file=None, OmegaInterp='Linear')
# plot the contours
grid.plotContours(to_eval=diag_eval, low_level=None, high_level=None, n_levels=20,
linestyles='-', clabels=True, log_levels=True,
title='{0} {1}'.format(ion, diag_eval))
# save the plot into pdf files
plt.savefig('{0}_{1}.pdf'.format(ion, diag_eval.replace('/', '_')))
# the following is to plot all the possible diagnostic ratiios available in pyneb
def plot_all(save=False):
pn.log_.level=1
AA = pn.getAtomDict(OmegaInterp='Linear')
for diag in pn.diags_dict:
atom, diag_eval, err = pn.diags_dict[diag]
if atom in AA:
plt.figure()
grid = pn.EmisGrid(atomObj=AA[atom])
grid.plotContours(to_eval=diag_eval)
if save:
plt.savefig('{0}_{1}.pdf'.format(atom, diag_eval.replace('/', '_')))
| gpl-3.0 | 7,602,913,074,641,236,000 | 37.5 | 87 | 0.65317 | false | 3.486019 | false | false | false |
tanzaho/python-goose | goose/images/extractors.py | 3 | 2648 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from goose.images.image import Image
import re
class ImageExtractor(object):
def __init__(self, config, article):
self.article = article
self.config = config
self.parser = self.config.get_parser()
def get_images(self, top_node):
return self.get_opengraph_tags() + self.get_content_images(top_node)
def get_opengraph_tags(self):
node = self.article.raw_doc
meta = self.parser.getElementsByTag(node, tag='meta', attr='property', value='og:image')
images = []
for item in meta:
if self.parser.getAttribute(item, attr='property') == 'og:image':
src = self.parser.getAttribute(item, attr='content')
if src:
images.append(self.from_image_node_to_image(item, src))
return images
def get_content_images(self, top_node):
images = []
image_nodes = self.parser.getElementsByTag(top_node, tag='img')
for image_node in image_nodes:
image = self.from_image_node_to_image(image_node)
images.append(image)
return images
def from_image_node_to_image(self, image_node, src=None):
image = Image()
if src:
image.src = src
else:
image.src = self.parser.getAttribute(image_node, 'src')
image.width = self.size_to_int(image_node, 'width')
image.height = self.size_to_int(image_node, 'height')
return image
def size_to_int(self, image_node, attribute_name):
size = self.parser.getAttribute(image_node, attribute_name)
if size is None:
return None
digits_only = re.sub("\D", "", size)
if len(digits_only) is 0:
return None
return int(digits_only)
| apache-2.0 | 3,659,734,277,561,665,000 | 34.306667 | 96 | 0.655589 | false | 3.882698 | false | false | false |
Emory-LCS/Alma-Public | NewEbooks/new_ebooks_api.py | 1 | 5458 | #!/opt/rh/python27/root/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Program name: parse_new_ids.py
Author: Bernardo Gomez/Alex Cooper
Date: june, 2016
Purpose:
"""
import os
import sys
import re
import requests
import xml.etree.ElementTree as elementTree
def get_item_info(result_node,id_list):
outcome=1
try:
rows=result_node.findall("Row")
except:
sys.stderr.write("couldn't find Rows."+"\n")
return id_list,outcome
mms_id=""
item_creation=""
item_modification=""
item_status=""
timestamp=""
process_type=""
receiving_date=""
barcode=""
holding_id=""
item_id=""
for this_row in rows:
item_row=""
try:
this_node=this_row.find("Column1")
mms_id=str(this_node.text)
except:
sys.stderr.write("couldn't find Column1."+"\n")
return id_list,outcome
try:
this_node=this_row.find("Column2")
active_date=str(this_node.text)
except:
sys.stderr.write("couldn't find Column2."+"\n")
return id_list,outcome
item_row=str(mms_id)
id_list.append(item_row)
return id_list,0
def get_record_ids(result_node,id_list):
outcome=1
try:
rows=result_node.findall("Row")
except:
sys.stderr.write("couldn't find Rows."+"\n")
return id_list,outcome
for this_row in rows:
try:
id_node=this_row.find("Column3")
id_list.append(str(id_node.text))
except:
sys.stderr.write("couldn't find Column3."+"\n")
return id_list,outcome
return id_list,0
def main():
if len(sys.argv) < 2:
sys.stderr.write("system failure. configuration file is missing."+"\n")
return 1
try:
configuration=open(sys.argv[1], 'Ur')
except:
sys.stderr.write("couldn't open configuration file "+sys.argv[1]+"\n")
return 1
pat=re.compile("(.*?)=(.*)")
for line in configuration:
line=line.rstrip("\n")
m=pat.match(line)
if m:
if m.group(1) == "url":
url=m.group(2)
if m.group(1) == "path":
path=m.group(2)
if m.group(1) == "apikey":
apikey=m.group(2)
if m.group(1) == "limit":
limit=m.group(2)
configuration.close()
in_string=""
outcome=1
payload={'apikey':apikey,'path':path,'limit':limit}
try:
r=requests.get(url,params=payload)
except:
sys.stderr.write("api request failed."+"\n")
return [],outcome
return_code=r.status_code
if return_code == 200:
response=r.content
else:
sys.stderr.write("FAILED(1)\n")
response=r.content
sys.stderr.write(str(response)+"\n")
return 1
in_string=response
in_string=in_string.replace("\n","")
in_string=in_string.replace(" xmlns=\"urn:schemas-microsoft-com:xml-analysis:rowset\"","")
try:
tree=elementTree.fromstring(in_string)
except:
sys.stderr.write("parse failed(1)."+"\n")
return outcome
try:
finished=tree.find("QueryResult/IsFinished")
except:
sys.stderr.write("parse failed(2)."+"\n")
return outcome
id_list=[]
if finished.text == "false":
try:
token=tree.find("QueryResult/ResumptionToken")
except:
sys.stderr.write("parse failed(3)."+"\n")
return outcome
this_token=str(token.text)
id_list=[]
sys.stderr.write(str(url)+" "+str(apikey)+" "+this_token+" "+str(id_list)+" "+limit+"\n")
try:
result_node=tree.find("QueryResult/ResultXml/rowset")
except:
sys.stderr.write("couldn't find rowset."+"\n")
return outcome
id_list,outcome=get_item_info(result_node,id_list)
work_to_do=True
outcome=1
while work_to_do:
payload={'apikey':apikey,'token':this_token,'limit':limit}
try:
r=requests.get(url,params=payload)
except:
sys.stderr.write("api request failed."+"\n")
return outcome
return_code=r.status_code
if return_code == 200:
response=r.content
else:
sys.stderr.write("FAILED(2)\n")
response=r.content
sys.stderr.write(str(response)+"\n")
return outcome
in_string=response
in_string=in_string.replace("\n","")
in_string=in_string.replace(" xmlns=\"urn:schemas-microsoft-com:xml-analysis:rowset\"","")
try:
tree=elementTree.fromstring(in_string)
except:
sys.stderr.write("parse failed(1)."+"\n")
return outcome
try:
finished=tree.find("QueryResult/IsFinished")
except:
sys.stderr.write("parse failed(2)."+"\n")
return outcome
if finished.text == "true":
work_to_do=False
try:
result_node=tree.find("QueryResult/ResultXml/rowset")
# print result_node
except:
sys.stderr.write("couldn't find rowset."+"\n")
return outcome
id_list,outcome=get_item_info(result_node,id_list)
else:
try:
result_node=tree.find("QueryResult/ResultXml/rowset")
except:
sys.stderr.write("couldn't find rowset."+"\n")
return outcome
id_list,outcome=get_item_info(result_node,id_list)
for id in id_list:
print str(id)
return 0
if __name__=="__main__":
sys.exit(main())
| gpl-3.0 | -8,139,539,969,137,573,000 | 27.134021 | 99 | 0.572188 | false | 3.463198 | false | false | false |
Caoimhinmg/PmagPy | programs/customize_criteria.py | 1 | 2371 | #!/usr/bin/env python
from __future__ import print_function
from builtins import input
import sys
import pmagpy.pmag as pmag
def main():
"""
NAME
customize_criteria.py
DESCRIPTION
Allows user to specify acceptance criteria, saves them in pmag_criteria.txt
SYNTAX
customize_criteria.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f IFILE, reads in existing criteria
-F OFILE, writes to pmag_criteria format file
DEFAULTS
IFILE: pmag_criteria.txt
OFILE: pmag_criteria.txt
OUTPUT
creates a pmag_criteria.txt formatted output file
"""
infile,critout="","pmag_criteria.txt"
# parse command line options
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
infile=sys.argv[ind+1]
crit_data,file_type=pmag.magic_read(infile)
if file_type!='pmag_criteria':
print('bad input file')
print(main.__doc__)
sys.exit()
print("Acceptance criteria read in from ", infile)
if '-F' in sys.argv:
ind=sys.argv.index('-F')
critout=sys.argv[ind+1]
Dcrit,Icrit,nocrit=0,0,0
custom='1'
crit=input(" [0] Use no acceptance criteria?\n [1] Use default criteria\n [2] customize criteria \n ")
if crit=='0':
print('Very very loose criteria saved in ',critout)
crit_data=pmag.default_criteria(1)
pmag.magic_write(critout,crit_data,'pmag_criteria')
sys.exit()
crit_data=pmag.default_criteria(0)
if crit=='1':
print('Default criteria saved in ',critout)
pmag.magic_write(critout,crit_data,'pmag_criteria')
sys.exit()
CritRec=crit_data[0]
crit_keys=list(CritRec.keys())
crit_keys.sort()
print("Enter new threshold value.\n Return to keep default.\n Leave blank to not use as a criterion\n ")
for key in crit_keys:
if key!='pmag_criteria_code' and key!='er_citation_names' and key!='criteria_definition' and CritRec[key]!="":
print(key, CritRec[key])
new=input('new value: ')
if new != "": CritRec[key]=(new)
pmag.magic_write(critout,[CritRec],'pmag_criteria')
print("Criteria saved in pmag_criteria.txt")
if __name__ == "__main__":
main()
| bsd-3-clause | 5,930,729,533,313,144,000 | 31.479452 | 118 | 0.611556 | false | 3.406609 | false | false | false |
neuroidss/nupic.studio | nupic_studio/htm/node_sensor.py | 1 | 13088 | import os
import collections
import numpy
import operator
import math
import dateutil.parser
from PyQt4 import QtGui, QtCore
from nupic_studio import getInstantiatedClass
from nupic_studio.ui import Global
from nupic_studio.htm import maxPreviousSteps, maxFutureSteps, maxPreviousStepsWithInference
from nupic_studio.htm.node import Node, NodeType
from nupic_studio.htm.bit import Bit
from nupic_studio.htm.encoding import FieldDataType
from nupic.encoders import MultiEncoder
from nupic.data.file_record_stream import FileRecordStream
class DataSourceType:
"""
Types of data sources which a sensor gets inputs.
"""
file = 1
database = 2
class PredictionsMethod:
"""
Methods used to get predicted values and their probabilities
"""
reconstruction = "Reconstruction"
classification = "Classification"
class Sensor(Node):
"""
A super class only to group properties related to sensors.
"""
#region Constructor
def __init__(self, name):
"""
Initializes a new instance of this class.
"""
Node.__init__(self, name, NodeType.sensor)
#region Instance fields
self.bits = []
"""An array of the bit objects that compose the current output of this node."""
self.dataSource = None
"""Data source which provides records to fed into a region."""
self.dataSourceType = DataSourceType.file
"""Type of the data source (File or Database)"""
self.fileName = ''
"""The input file name to be handled. Returns the input file name only if it is in the project directory, full path otherwise."""
self.databaseConnectionString = ""
"""Connection string of the database."""
self.databaseTable = ''
"""Target table of the database."""
self.encoder = None
"""Multi-encoder which concatenate sub-encodings to convert raw data to htm input and vice-versa."""
self.encodings = []
"""List of sub-encodings that handles the input from database"""
self.predictionsMethod = PredictionsMethod.reconstruction
"""Method used to get predicted values and their probabilities."""
self.enableClassificationLearning = True
"""Switch for classification learning"""
self.enableClassificationInference = True
"""Switch for classification inference"""
#endregion
#region Statistics properties
self.statsPrecisionRate = 0.
#endregion
#endregion
#region Methods
def getBit(self, x, y):
"""
Return the bit located at given position
"""
bit = self.bits[(y * self.width) + x]
return bit
def initialize(self):
"""
Initialize this node.
"""
Node.initialize(self)
# Initialize input bits
self.bits = []
for x in range(self.width):
for y in range(self.height):
bit = Bit()
bit.x = x
bit.y = y
self.bits.append(bit)
if self.dataSourceType == DataSourceType.file:
"""
Initialize this node opening the file and place cursor on the first record.
"""
# If file name provided is a relative path, use project file path
if self.fileName != '' and os.path.dirname(self.fileName) == '':
fullFileName = os.path.dirname(Global.project.fileName) + '/' + self.fileName
else:
fullFileName = self.fileName
# Check if file really exists
if not os.path.isfile(fullFileName):
QtGui.QMessageBox.warning(None, "Warning", "Input stream file '" + fullFileName + "' was not found or specified.", QtGui.QMessageBox.Ok)
return
# Create a data source for read the file
self.dataSource = FileRecordStream(fullFileName)
elif self.dataSourceType == DataSourceType.database:
pass
self.encoder = MultiEncoder()
for encoding in self.encodings:
encoding.initialize()
# Create an instance class for an encoder given its module, class and constructor params
encoding.encoder = getInstantiatedClass(encoding.encoderModule, encoding.encoderClass, encoding.encoderParams)
# Take the first part of encoder field name as encoder name
# Ex: timestamp_weekend.weekend => timestamp_weekend
encoding.encoder.name = encoding.encoderFieldName.split('.')[0]
# Add sub-encoder to multi-encoder list
self.encoder.addEncoder(encoding.dataSourceFieldName, encoding.encoder)
# If encoder size is not the same to sensor size then throws exception
encoderSize = self.encoder.getWidth()
sensorSize = self.width * self.height
if encoderSize > sensorSize:
QtGui.QMessageBox.warning(None, "Warning", "'" + self.name + "': Encoder size (" + str(encoderSize) + ") is different from sensor size (" + str(self.width) + " x " + str(self.height) + " = " + str(sensorSize) + ").", QtGui.QMessageBox.Ok)
return
return True
def nextStep(self):
"""
Performs actions related to time step progression.
"""
# Update states machine by remove the first element and add a new element in the end
for encoding in self.encodings:
encoding.currentValue.rotate()
if encoding.enableInference:
encoding.predictedValues.rotate()
encoding.bestPredictedValue.rotate()
Node.nextStep(self)
for bit in self.bits:
bit.nextStep()
# Get record value from data source
# If the last record was reached just rewind it
data = self.dataSource.getNextRecordDict()
if not data:
self.dataSource.rewind()
data = self.dataSource.getNextRecordDict()
# Pass raw values to encoder and get a concatenated array
outputArray = numpy.zeros(self.encoder.getWidth())
self.encoder.encodeIntoArray(data, outputArray)
# Get values obtained from the data source.
outputValues = self.encoder.getScalars(data)
# Get raw values and respective encoded bit array for each field
prevOffset = 0
for i in range(len(self.encodings)):
encoding = self.encodings[i]
# Convert the value to its respective data type
currValue = outputValues[i]
if encoding.encoderFieldDataType == FieldDataType.boolean:
currValue = bool(currValue)
elif encoding.encoderFieldDataType == FieldDataType.integer:
currValue = int(currValue)
elif encoding.encoderFieldDataType == FieldDataType.decimal:
currValue = float(currValue)
elif encoding.encoderFieldDataType == FieldDataType.dateTime:
currValue = dateutil.parser.parse(str(currValue))
elif encoding.encoderFieldDataType == FieldDataType.string:
currValue = str(currValue)
encoding.currentValue.setForCurrStep(currValue)
# Update sensor bits
for i in range(len(outputArray)):
if outputArray[i] > 0.:
self.bits[i].isActive.setForCurrStep(True)
else:
self.bits[i].isActive.setForCurrStep(False)
# Mark falsely predicted bits
for bit in self.bits:
if bit.isPredicted.atPreviousStep() and not bit.isActive.atCurrStep():
bit.isFalselyPredicted.setForCurrStep(True)
self._output = outputArray
def getPredictions(self):
"""
Get the predictions after an iteration.
"""
if self.predictionsMethod == PredictionsMethod.reconstruction:
# Prepare list with predictions to be classified
# This list contains the indexes of all bits that are predicted
output = []
for i in range(len(self.bits)):
if self.bits[i].isPredicted.atCurrStep():
output.append(1)
else:
output.append(0)
output = numpy.array(output)
# Decode output and create predictions list
fieldsDict, fieldsOrder = self.encoder.decode(output)
for encoding in self.encodings:
if encoding.enableInference:
predictions = []
encoding.predictedValues.setForCurrStep(dict())
# If encoder field name was returned by decode(), assign the the predictions to it
if encoding.encoderFieldName in fieldsOrder:
predictedLabels = fieldsDict[encoding.encoderFieldName][1].split(', ')
predictedValues = fieldsDict[encoding.encoderFieldName][0]
for i in range(len(predictedLabels)):
predictions.append([predictedValues[i], predictedLabels[i]])
encoding.predictedValues.atCurrStep()[1] = predictions
# Get the predicted value with the biggest probability to happen
if len(predictions) > 0:
bestPredictionRange = predictions[0][0]
min = bestPredictionRange[0]
max = bestPredictionRange[1]
bestPredictedValue = (min + max) / 2.0
encoding.bestPredictedValue.setForCurrStep(bestPredictedValue)
elif self.predictionsMethod == PredictionsMethod.classification:
# A classification involves estimate which are the likely values to occurs in the next time step.
offset = 0
for encoding in self.encodings:
encoderWidth = encoding.encoder.getWidth()
if encoding.enableInference:
# Prepare list with predictions to be classified
# This list contains the indexes of all bits that are predicted
patternNZ = []
for i in range(offset, encoderWidth):
if self.bits[i].isActive.atCurrStep():
patternNZ.append(i)
# Get the bucket index of the current value at the encoder
actualValue = encoding.currentValue.atCurrStep()
bucketIdx = encoding.encoder.getBucketIndices(actualValue)[0]
# Perform classification
clasResults = encoding.classifier.compute(recordNum=Global.currStep, patternNZ=patternNZ, classification={'bucketIdx': bucketIdx, 'actValue': actualValue}, learn=self.enableClassificationLearning, infer=self.enableClassificationInference)
encoding.predictedValues.setForCurrStep(dict())
for step in encoding.steps:
# Calculate probability for each predicted value
predictions = dict()
for (actValue, prob) in zip(clasResults['actualValues'], clasResults[step]):
if actValue in predictions:
predictions[actValue] += prob
else:
predictions[actValue] = prob
# Remove predictions with low probabilities
maxVal = (None, None)
for (actValue, prob) in predictions.items():
if len(predictions) <= 1:
break
if maxVal[0] is None or prob >= maxVal[1]:
if maxVal[0] is not None and maxVal[1] < encoding.minProbabilityThreshold:
del predictions[maxVal[0]]
maxVal = (actValue, prob)
elif prob < encoding.minProbabilityThreshold:
del predictions[actValue]
# Sort the list of values from more probable to less probable values
# an decrease the list length to max predictions per step limit
predictions = sorted(predictions.iteritems(), key=operator.itemgetter(1), reverse=True)
predictions = predictions[:maxFutureSteps]
encoding.predictedValues.atCurrStep()[step] = predictions
# Get the predicted value with the biggest probability to happen
bestPredictedValue = encoding.predictedValues.atCurrStep()[1][0][0]
encoding.bestPredictedValue.setForCurrStep(bestPredictedValue)
offset += encoderWidth
def calculateStatistics(self):
"""
Calculate statistics after an iteration.
"""
if Global.currStep > 0:
precision = 0.
# Calculate the prediction precision comparing if the current value is in the range of any prediction.
for encoding in self.encodings:
if encoding.enableInference:
predictions = encoding.predictedValues.atPreviousStep()[1]
for predictedValue in predictions:
min = None
max = None
value = predictedValue[0]
if self.predictionsMethod == PredictionsMethod.reconstruction:
min = value[0]
max = value[1]
elif self.predictionsMethod == PredictionsMethod.classification:
min = value
max = value
if isinstance(min, (int, long, float, complex)) and isinstance(max, (int, long, float, complex)):
min = math.floor(min)
max = math.ceil(max)
if min <= encoding.currentValue.atCurrStep() <= max:
precision = 100.
break
# The precision rate is the average of the precision calculated in every step
self.statsPrecisionRate = (self.statsPrecisionRate + precision) / 2
else:
self.statsPrecisionRate = 0.
for bit in self.bits:
bit.calculateStatistics()
#endregion
| gpl-2.0 | 3,352,760,374,799,920,000 | 34.055096 | 248 | 0.648227 | false | 4.524024 | false | false | false |
trailofbits/mcsema | tools/mcsema_disass/binja/util.py | 1 | 9080 | # Copyright (c) 2017 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binaryninja as binja
from binaryninja.enums import (
Endianness, LowLevelILOperation, SectionSemantics
)
import inspect
import logging
import magic
import re
import struct
from collections import defaultdict
LOGNAME = 'binja.cfg'
log = logging.getLogger(LOGNAME)
class StackFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
logging.Formatter.__init__(self, fmt, datefmt)
self.stack_base = len(inspect.stack()) + 7
def format(self, record):
record.indent = ' ' * (len(inspect.stack()) - self.stack_base)
res = logging.Formatter.format(self, record)
del record.indent
return res
def init_logger(log_file):
formatter = StackFormatter('[%(levelname)s] %(indent)s%(message)s')
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
ENDIAN_TO_STRUCT = {
Endianness.LittleEndian: '<',
Endianness.BigEndian: '>'
}
def read_dword(bv, addr):
# type: (binja.BinaryView, int) -> int
# Pad the data if fewer than 4 bytes are read
endianness = ENDIAN_TO_STRUCT[bv.endianness]
data = bv.read(addr, 4)
padded_data = '{{:\x00{}4s}}'.format(endianness).format(data)
fmt = '{}L'.format(endianness)
return struct.unpack(fmt, padded_data)[0]
def read_qword(bv, addr):
# type: (binja.BinaryView, int) -> int
# Pad the data if fewer than 8 bytes are read
endianness = ENDIAN_TO_STRUCT[bv.endianness]
data = bv.read(addr, 8)
padded_data = '{{:\x00{}8s}}'.format(endianness).format(data)
fmt = '{}Q'.format(endianness)
return struct.unpack(fmt, padded_data)[0]
def load_binary(path):
magic_type = magic.from_file(path)
if 'ELF' in magic_type:
bv_type = binja.BinaryViewType['ELF']
elif 'PE32' in magic_type:
bv_type = binja.BinaryViewType['PE']
elif 'Mach-O' in magic_type:
bv_type = binja.BinaryViewType['Mach-O']
else:
bv_type = binja.BinaryViewType['Raw']
# Can't do anything with Raw type
log.fatal('Unknown binary type: "{}", exiting'.format(magic_type))
exit(1)
log.debug('Loading binary in binja...')
bv = bv_type.open(path)
bv.update_analysis_and_wait()
# NOTE: at the moment binja will not load a binary
# that doesn't have an entry point
if len(bv) == 0:
log.error('Binary could not be loaded in binja, is it linked?')
exit(1)
return bv
def find_symbol_name(bv, addr):
"""Attempt to find a symbol for a given address
Args:
bv (binja.BinaryView)
addr (int): Address the symbol should point to
Returns:
(str): Symbol name if found, empty string otherwise
"""
sym = bv.get_symbol_at(addr)
if sym is not None:
return sym.name
return ''
def get_func_containing(bv, addr):
""" Finds the function, if any, containing the given address
Args:
bv (binja.BinaryView)
addr (int)
Returns:
binja.Function
"""
funcs = bv.get_functions_containing(addr)
return funcs[0] if funcs is not None else None
def get_section_at(bv, addr):
"""Returns the section in the binary that contains the given address"""
if not is_valid_addr(bv, addr):
return None
for sec in bv.sections.values():
if sec.start <= addr < sec.end:
return sec
return None
def is_external_ref(bv, addr):
sym = bv.get_symbol_at(addr)
return sym is not None and 'Import' in sym.type.name
def is_valid_addr(bv, addr):
return bv.get_segment_at(addr) is not None
def is_code(bv, addr):
"""Returns `True` if the given address lies in a code section"""
# This is a bit more specific than checking if a segment is executable,
# Binja will classify a section as ReadOnlyCode or ReadOnlyData, though
# both sections are still in an executable segment
sec = get_section_at(bv, addr)
return sec is not None and sec.semantics == SectionSemantics.ReadOnlyCodeSectionSemantics
def is_executable(bv, addr):
"""Returns `True` if the given address lies in an executable segment"""
seg = bv.get_segment_at(addr)
return seg is not None and seg.executable
def is_readable(bv, addr):
"""Returns `True` if the given address lies in a readable segment"""
seg = bv.get_segment_at(addr)
return seg is not None and seg.writable
def is_writeable(bv, addr):
"""Returns `True` if the given address lies in a writable segment"""
seg = bv.get_segment_at(addr)
return seg is not None and seg.readable
def is_ELF(bv):
return bv.view_type == 'ELF'
def is_PE(bv):
return bv.view_type == 'PE'
def clamp(val, vmin, vmax):
return min(vmax, max(vmin, val))
# Caching results of is_section_external
_EXT_SECTIONS = set()
_INT_SECTIONS = set()
def is_section_external(bv, sect):
"""Returns `True` if the given section contains only external references
Args:
bv (binja.BinaryView)
sect (binja.binaryview.Section)
"""
if sect.start in _EXT_SECTIONS:
return True
if sect.start in _INT_SECTIONS:
return False
if is_ELF(bv):
if re.search(r'\.(got|plt)', sect.name):
_EXT_SECTIONS.add(sect.start)
return True
if is_PE(bv):
if '.idata' in sect.name:
_EXT_SECTIONS.add(sect.start)
return True
_INT_SECTIONS.add(sect.start)
return False
def is_tls_section(bv, addr):
sect_names = (sect.name for sect in bv.get_sections_at(addr))
return any(sect in ['.tbss', '.tdata', '.tls'] for sect in sect_names)
def _search_phrase_op(il, target_op):
""" Helper for finding parts of a phrase[+displacement] il """
op = il.operation
# Handle starting points
if op == LowLevelILOperation.LLIL_SET_REG:
return _search_phrase_op(il.src, target_op)
if op == LowLevelILOperation.LLIL_STORE:
return _search_phrase_op(il.dest, target_op)
# The phrase il may be inside a LLIL_LOAD
if op == LowLevelILOperation.LLIL_LOAD:
return _search_phrase_op(il.src, target_op)
# Continue left/right at an ADD
if op == LowLevelILOperation.LLIL_ADD:
return (_search_phrase_op(il.left, target_op) or
_search_phrase_op(il.right, target_op))
# Continue left/right at an ADD
if op == LowLevelILOperation.LLIL_SUB:
return (_search_phrase_op(il.left, target_op) or
_search_phrase_op(il.right, target_op))
# Continue left/right at an ADD
if op == LowLevelILOperation.LLIL_CMP_E:
return (_search_phrase_op(il.left, target_op) or
_search_phrase_op(il.right, target_op))
# Terminate when constant is found
if op == target_op:
return il
def search_phrase_reg(il):
""" Searches for the register used in a phrase
ex: dword [ebp + 0x8] -> ebp
Args:
il (binja.LowLevelILInstruction): Instruction to parse
Returns:
str: register name
"""
res = _search_phrase_op(il, LowLevelILOperation.LLIL_REG)
if res is not None:
return res.src.name
def search_displ_base(il):
""" Searches for the base address used in a phrase[+displacement]
ex: dword [eax * 4 + 0x08040000] -> 0x08040000
dword [ebp + 0x8] -> 0x8
Args:
il (binja.LowLevelILInstruction): Instruction to parse
Returns:
int: base address
"""
res = _search_phrase_op(il, LowLevelILOperation.LLIL_CONST)
if res is not None:
# Interpret the string representation to avoid sign issues
return int(res.tokens[0].text, 16)
def is_jump_tail_call(bv, il):
""" Returns `True` if the given il is a jump to another function """
return il.operation == LowLevelILOperation.LLIL_JUMP and \
il.dest.operation == LowLevelILOperation.LLIL_CONST_PTR and \
get_jump_tail_call_target(bv, il) is not None
def get_jump_tail_call_target(bv, il):
""" Get the target function of a tail-call.
Returns:
binja.Function
"""
try:
return bv.get_function_at(il.dest.constant)
except:
return None
def collect_il_groups(il_func):
""" Gather all il instructions grouped by address
Some instructions (cmov, set, etc.) get expanded into multiple il
instructions when lifted, but `Function.get_lifted_il_at` will only return the first
of all the il instructions at an address. This will group all the il instructions
into a map of address to expanded instructions as follows:
{
addr1 => [single il instruction],
addr2 => [expanded il 1, expanded il 2, ...],
...
}
Args:
il_func: IL function to gather all il groups from
Returns:
dict: Map from address to all IL instructions at that address
"""
il_map = defaultdict(list)
for blk in il_func:
for il in blk:
il_map[il.address].append(il)
return il_map
| apache-2.0 | -7,528,632,476,422,962,000 | 25.627566 | 91 | 0.686454 | false | 3.29822 | false | false | false |